content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class MonolithicTest(unittest.TestCase):
def _steps(self):
for name in dir(self):
if name.startswith("step"):
yield name, getattr(self, name)
def assertBrowserTitle(self, expected):
self.assertIn(expected, self.browser.title.lower())
def assertCurrentUrl(self, expected):
self.assertIn(expected, self.browser.current_url.lower())
def assertElementText(self, expected, element):
self.assertIn(expected.lower(), element.text.lower())
def clearInputField(self, element):
while element.get_attribute('value') != '':
element.send_keys(Keys.BACKSPACE)
def xpath_string_escape(self, input_str):
""" creates a concatenation of alternately-quoted strings that is always a valid XPath expression """
parts = input_str.split("'")
return "concat('" + "', \"'\" , '".join(parts) + "', '')"
def login(self):
self.browser.get(self.domain+'login')
self.email = self.browser.find_element_by_name("email")
self.password = self.browser.find_element_by_name("password")
self.signin = self.browser.find_element_by_xpath(
"//button[.='Sign In']")
self.email.send_keys(self.correct_email)
self.password.send_keys(self.correct_password)
self.signin.click()
time.sleep(5)
self.assertCurrentUrl(self.domain+'admin/dashboard')
def logout(self):
iconbutton = self.browser.find_element_by_xpath(
'//button[@class="MuiButtonBase-root MuiIconButton-root MuiIconButton-colorInherit"]')
iconbutton.click()
time.sleep(1)
logoutbutton = self.browser.find_element_by_xpath(
'//li[@role="menuitem"][@tabindex="0"]')
logoutbutton.click()
time.sleep(2)
self.assertCurrentUrl(self.domain+'login')
def assertPanelLocked(self, button_id, is_locked):
locked = True
button = self.browser.find_element_by_id(button_id)
button.click()
time.sleep(1)
self.browser.switch_to.frame(
self.browser.find_element_by_id('content-iframe')
)
try:
# If error then Error element does not exist -> Unlocked
self.browser.find_element_by_id("unverified_text")
except:
locked = False
self.browser.switch_to.default_content()
self.assertEqual(is_locked, locked)
def test_steps(self):
print()
for name, step in self._steps():
try:
test_name = " ".join(name.split('_')[2:])
print("Running test: {}".format(test_name))
step()
time.sleep(1)
except Exception as e:
self.fail("{} failed ({}: {})".format(step, type(e), e))
def setUp(self):
# for linux
# self.browser = webdriver.Chrome()
# for Windows specify the path
self.browser = webdriver.Chrome('C:/chromedriver.exe')
self.domain = 'https://openinventoryorg.github.io/web-frontend/#/'
# change email, password to valid email,pwds of the system
self.correct_email = 'openinventorysystem@gmail.com'
self.correct_password = 'password'
self.browser.maximize_window()
time.sleep(1)
self.addCleanup(self.browser.quit)
|
nilq/baby-python
|
python
|
import json
from .measurementGroup import MeasurementGroup
from .measurementItem import MeasurementItem
from .codeSequences import CodeSequence
class MeasurementReport(object):
"""
Data structure plus convenience methods to create measurment reports following
the required format to be processed by the DCMQI tid1500writer tool (using the
JSON export of this).
"""
def __init__(self, seriesNumber, compositeContext, dicomSourceFileList, timePoint,
seriesDescription = "Measurements", procedureReported = None):
self.SeriesDescription = str(seriesDescription)
self.SeriesNumber = str(seriesNumber)
self.InstanceNumber = "1"
self.compositeContext = [compositeContext]
self.imageLibrary = dicomSourceFileList
self.observerContext = {
"ObserverType": "PERSON",
"PersonObserverName": "Reader01"
}
if procedureReported:
self.procedureReported = procedureReported
self.VerificationFlag = "VERIFIED"
self.CompletionFlag = "COMPLETE"
self.activitySession = "1"
self.timePoint = str(timePoint)
self.Measurements = []
def addMeasurementGroup(self, measurementGroup):
self.Measurements.append(measurementGroup)
def exportToJson(self, fileName):
with open(fileName, 'w') as fp:
json.dump(self._getAsDict(), fp, indent = 2)
def getJsonStr(self):
return json.dumps(self._getAsDict(), indent = 2)
def _getAsDict(self):
# This is a bit of a hack to get the "@schema" in there, didn't figure out how to
# do this otherwise with json.dumps. If this wasn't needed I could just dump
# the json directly with my custom encoder.
jsonStr = json.dumps(self, indent = 2, cls = self._MyJSONEncoder)
tempDict = json.loads(jsonStr)
outDict = {}
outDict["@schema"] = "https://raw.githubusercontent.com/qiicr/dcmqi/master/doc/schemas/sr-tid1500-schema.json#"
outDict.update(tempDict)
return outDict
# Inner private class to define a custom JSON encoder for serializing MeasurmentReport
class _MyJSONEncoder(json.JSONEncoder):
def default(self, obj):
if (isinstance(obj, MeasurementReport) or
isinstance(obj, MeasurementGroup) or
isinstance(obj, MeasurementItem) or
isinstance(obj, CodeSequence)):
return obj.__dict__
else:
return super(MyEncoder, self).default(obj)
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
setup(
name='pyhindsight',
packages=find_packages(),
include_package_data=True,
scripts=['hindsight.py', 'hindsight_gui.py'],
version='2.0.4',
description='Internet history forensics for Google Chrome/Chromium',
url='https://github.com/obsidianforensics/hindsight',
author='Ryan Benson',
author_email='ryan@obsidianforensics.com',
license='Apache',
keywords=['chrome', 'forensics'],
classifiers=[],
install_requires=[
'keyring>=9.0',
'pytz>=2016.4',
'pycryptodomex>=3.4.3',
'xlsxwriter>=0.8.4',
# 'pypiwin32>=219',
'bottle>=0.12.9'
]
)
|
nilq/baby-python
|
python
|
from bs4 import BeautifulSoup
import requests
import re
# function to get all the policy urls from a website
def collect_url_links(url_link) -> list:
url_list = []
pattern = re.compile(r'^http')
source = requests.get(url_link).text
soup = BeautifulSoup(source, 'lxml')
a_tag = soup.find_all("a") # Gives you the list of all the a tags
for i in a_tag:
if i.text in ["Privacy", "Terms", "Privacy Policy", "Terms of Service"]:
url = i["href"]
url_list.append(url)
for i in range(len(url_list)):
matches = pattern.finditer(url_list[i])
if(not (any(True for _ in matches))):
url_list[i] = url_link + url_list[i][1:]
return url_list
|
nilq/baby-python
|
python
|
def diamond(n):
"""Display a diamond made of *.
Args:
n: (int) Amount of *s in the middle row.
Returns:
Diamond shaped text. None if input n is invalid.
"""
if n <= 0 or n % 2 == 0:
return None
offset = int((n - 1)/2)
# for i in range(offset + 1):
# shape = shape + " "*(offset - i) + "*"*(1 + i*2) + "\n"
shape = [(" "*(offset - i) + "*"*(1 + i*2) + "\n") for i in range(offset + 1)]
shape = shape + shape[-2::-1]
return ''.join(shape)
print(diamond(29))
print(' *\n ***\n *****\n *******\n*********\n *******\n *****\n'
' ***\n *\n')
# __*__
# _***
# *****
# _***_
# __*__
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 04 20:05:13 2015
Translation of octave code for CSAPS.
@author: Kevin
"""
import numpy as np
import scipy as sp
from scipy import interpolate
from scipy.sparse import linalg
def csaps(x, y, p, xi=[], w=[]):
# sort the inputs by ordering of x
ii = np.argsort(x)
x = np.array(x)
y = np.array(y)
x = x.take(ii)
y = y.take(ii)
h = np.diff(x)
n = np.size(x)
if np.size(w) == 0:
w = np.ones([n, 1])
R = sp.sparse.spdiags(np.array([h[0:-1],
2.*(h[0:-1] + h[1:]),
h[1:]]), [-1, 0, 1], n-2, n-2)
QT = sp.sparse.spdiags(np.array([1. / h[0:-1],
-(1. / h[0:-1] + 1. / h[1:]),
1. / h[1:]]), [0, -1, -2], n, n-2).transpose()
# solve for the scaled second derivatives u and
# for the function values a at the knots (if p = 1, a = y)
v = 6*(1-p)*QT.dot(sp.sparse.spdiags(1. / w.flatten(), 0, len(w), len(w))).dot(QT.T) + p*R
u = linalg.spsolve(v, QT.dot(y))
a = y - 6*(1-p)*sp.sparse.spdiags(1. / w.flatten(), 0, len(w), len(w)).dot(QT.T).dot(u)
# derivatives at all but the last knot for the piecewise cubic spline
aa = a[0:-1]
cc = np.zeros(y.shape)
cc[1:n-1] = 6 * p * u
dd = np.diff(cc) / h
cc = cc[0:-1]
bb = np.diff(a) / h - cc / 2 * h - dd / 6 * h ** 2
# shape coefficients and create piece-wise polynomial
coefs = np.concatenate((dd.reshape((1, dd.size)) / 6,
cc.reshape((1, cc.size)) / 2,
bb.reshape((1, bb.size)),
aa.reshape((1, aa.size))))
ret = interpolate.interpolate.PPoly(coefs, x)
# check if we should evaluate the smoothing spline
xi = np.array(xi)
if xi.size != 0:
ret = ret(xi)
return ret
|
nilq/baby-python
|
python
|
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x36\x1e\x00\x00\x00\x00\x00\x10\x12\xd8\xc1\xe3\xd7\xa8\xd7\xd4\xb7\xb9\xae\x4c\x5b\x3c\xd7\x05\x00\x00\x00\x00\x00\x00\x00\x00\x78\x44\x98\xc9\x9e\xe4\x19\xda\x18\x49\xb4\x1f\x08\x1c\x04\x48\x96\xca\x2b\x13\xf8\xcc\x26\x93\x1a\xc0\x0e\xe9\xf7\xd9\xe7\x78\x18\x90\xef\x22\xfb\x75\x41\xa3\x8a\xb0\xfa\x3d\x68\x57\xc7\xe1\xc4\x71\x3f\x9b\x1f\x79\xbd\x0e\x99\x05\x5e\xcc\xdb\x26\x0e\x3e\xbe\x27\x05\x66\xdd\xad\x17\xba\x93\x64\xa9\xcc\x34\xae\xf6\xda\x66\x4c\x92\x09\x2f\x6c\x33\xe4\x8c\xa4\x94\x75\x12\x22\x4a\xb7\x31\x35\xad\x66\x04\xec\xfa\x5b\xad\x22\xc6\x04\xff\x01\xb7\x9e\xdd\x7b\xde\x56\x44\x99\x5d\x0c\xc5\x0b\xf1\x03\x18\x69\x59\xdc\x05\xc2\x49\xee\x74\x74\xc9\xcc\x0b\x2a\x2c\xee\xaa\xaf\xac\x6d\x2c\x1f\x19\x65\xe9\x3a\x93\xe2\xfa\x6e\x75\xb8\x5c\xdf\x1b\xad\x2d\xed\x64\x38\xb5\x97\x46\xc3\xf4\x99\x9a\xce\xc9\x83\x8a\x70\x52\xc2\xe4\x26\x84\xe4\x20\x90\x5f\x2a\xd4\x7e\x90\xf7\x33\xa3\x3f\x86\xe8\xcd\x94\xab\x1e\x73\xcc\xf9\xef\xda\x69\xac\x10\x95\x25\xc7\xa3\x33\x17\xb5\xb4\x78\xea\xcd\xc0\x68\x88\xd8\xbb\xf6\x25\xc3\x89\xd1\xae\xbe\x2b\x98\x69\x07\xb5\x72\x01\xbf\x12\x15\xf4\x73\x7e\x59\x1b\xa0\x40\x1a\x09\x27\x6d\xac\x71\xe0\x73\x35\xa3\x28\x2d\x25\x4c\x6e\x69\x0d\x7c\xbb\xfb\x11\x80\xb8\xd7\x5a\xd0\x98\x3c\x79\x09\x3d\xb4\x3e\x4b\x08\x4a\x66\xb7\xdb\xe5\xa0\x2f\x48\x1b\x25\x7a\xf6\xe7\xde\x0a\xdd\x33\xd5\xe4\x93\x19\x7b\x9e\x5f\xd5\xee\x76\x89\x3e\x33\x5c\xfc\x88\x0e\x9d\xef\x4b\x66\x98\xa0\x2f\x70\x22\x8c\x43\x1d\xc0\x3a\x49\x79\xfe\x82\xd9\x2a\xa4\xcc\x3c\xb2\x8c\x54\x0b\xae\xb4\x8f\x25\x49\x3f\x9a\xcc\x7b\xbd\xaa\xc2\xf2\x38\xc6\xb1\xd5\x55\xe7\x23\x07\x88\x97\xa1\xe4\xaf\x68\x7e\xf5\x0a\x95\xca\xa7\xcd\x65\x17\x65\xaf\xf4\xc8\x2b\x33\x42\x5e\x5a\x68\x0c\xa2\xed\xd7\x4d\xd2\x62\x8c\x1f\x2f\x30\x17\xdf\x37\x16\x74\x37\x2c\x41\x19\xf0\xf4\x9d\xc4\x6c\x7a\x68\xc0\x40\x5a\x85\xce\x6c\xa4\xf1\xdf\xc4\x74\xd0\x88\xd6\x0a\x47\xa5\x68\x53\x35\x5a\xc8\xab\xf3\x48\xc1\x8c\x82\xc3\x5e\xd3\x7c\x5e\x0e\xe2\x3a\x99\x49\x04\x5a\x95\x49\x38\x46\xeb\xf8\x4f\x98\x67\x9f\x4d\xcd\x20\xb3\xb1\x10\x51\x2e\xca\x29\xc9\x4a\x1a\x5e\x3d\x5b\xdc\xac\x37\x35\x56\x12\x99\x5e\x0b\x32\x28\x09\x10\xe4\xa8\xa3\xfc\xfb\xdc\x33\xba\x1c\x7f\xae\x11\x7c\xfc\xb6\xcb\xe2\x6a\x6b\x33\x58\xff\x30\x40\xe9\xa3\x87\xcd\xef\x72\x1b\x64\x3d\xbd\x0e\x1e\x22\x4d\x87\x20\xbe\x43\x8d\xa5\x6d\x6f\x1b\xdb\xd9\x47\x66\x10\xa9\x10\x51\x37\x60\xeb\xca\x89\xe1\x80\x36\x82\x01\x27\x18\x44\x56\x80\xb3\x65\xca\xbc\xd6\x9d\x32\xe2\x14\x60\xc2\xa2\xef\xff\xf5\x4e\xee\x36\xf0\x5f\x60\x58\x27\x4c\x57\x44\xc3\x91\xdc\x84\x22\x93\x22\x14\x2e\x61\xf1\x3c\x5a\x4e\xda\x74\x22\x03\xac\x00\xbe\xd1\x21\xd0\x26\x9d\xfa\xf1\x83\xbc\x06\xbf\xf6\x61\xc7\xf7\x98\xaf\x98\xb7\x9f\x1e\x73\xd9\xf7\xd9\xe5\x94\x9f\x48\xd2\xf0\x6b\x69\x5d\x4f\x9a\xa4\x4b\xc2\x90\x14\x90\x59\xae\x97\x30\xc6\xfd\x28\x44\x42\x49\x92\x6d\x0a\x8d\x27\x07\x48\xe4\x0b\xa8\x60\x95\xd0\x09\xc3\x05\xf3\xb4\xb8\xb3\xb1\x50\xce\x35\xfa\x20\x12\x47\xcd\x63\x0a\x4a\xde\x01\x76\xc7\xed\x3a\x5c\x3e\x84\x4f\x2b\xec\xe6\xef\x96\x3f\x0a\x1e\x5a\x1b\x53\xa3\xa9\xf4\xdc\x44\x9d\x5d\x7b\xf8\xfa\xc5\xaf\x6e\xd4\x7a\x38\xb1\x03\x48\xd4\xe7\xd2\xaf\x19\xed\x9c\x77\xc2\x04\x8e\x18\x1f\x76\xc3\xe7\xb3\x18\x33\x81\xc2\x16\xb8\x8a\x43\x3c\x6a\xd7\x8a\x59\x1a\x35\xaf\x02\x7f\x95\x73\xfe\x3e\x5e\xf5\xdb\xae\x61\xaf\x6a\xb5\xe8\x2b\xea\xc0\xf9\xc5\x20\x3f\x49\x50\x2e\xc7\x90\xc9\x21\x8f\x14\x1b\xdc\xc7\x26\xf8\x59\xa1\x0b\x80\x6e\x79\xc9\xde\x40\x23\xa7\xcb\x30\x1c\xc3\xc6\x61\xa4\xec\xfb\x92\xa6\xa7\xd8\x31\xe5\xb2\x96\x72\x01\xe2\xd1\xd3\xfd\xa9\x90\x9d\x5c\x5a\x61\x0c\xdb\x15\xf8\x61\x95\x5a\x00\xd9\x0c\xb4\xed\xa1\x9e\x3c\xd3\x63\x72\x7a\x30\x5e\x4c\x64\xd9\x76\x63\x3d\x74\xbb\xe5\x66\x7b\xf4\x30\x48\x44\x34\xaa\xe0\x18\xec\x6f\xe6\x3b\x29\x52\xcd\xd4\x95\xa0\xc3\x03\xe3\xc6\x9c\xab\x11\x26\x30\x3d\x75\xee\x89\x1d\xd2\x83\x1e\xdb\xf1\x2b\x33\x98\x51\xca\x48\xc7\x67\x65\x57\x0a\x67\xf3\x11\xec\x36\x0f\x30\x21\x0d\x18\x55\x25\x88\xcb\x32\x2f\x72\xb6\x6e\x19\x98\x5d\xd0\x60\xc4\x52\x8c\xa0\x16\x7e\x03\x7d\x61\xd4\x10\xef\x5d\x16\x3b\xbc\xe4\x6c\x33\x00\xbc\x6f\xb8\x9e\x6a\xbf\x5f\x1b\x67\x73\xe0\x76\x34\x19\x28\x69\xa6\x2c\x00\xb8\x8c\x6b\x4d\x5f\xd7\xec\xbf\x2f\xf7\x4c\x11\xcd\x88\x17\x29\xaa\xcb\x2b\x1f\xac\x48\x4e\x1d\xc6\xa8\xd3\x51\x1e\x64\xe7\xa1\x32\x78\x7f\x21\x25\x52\x32\xcb\x31\x5f\xef\xf3\x72\xe5\xb9\x90\x9a\xb1\x45\x9d\x9c\xdb\xb2\xcc\xa4\x0e\xd4\x57\xd0\x81\xbd\x5b\xc9\x69\x18\xe5\x77\x02\x78\x6c\x52\xca\xce\x87\x35\xce\x62\x6c\xd7\x09\x37\x3b\x5e\xfa\x91\x8d\xbe\xae\xc9\x86\xd0\xc2\x4c\x88\xab\x2a\x38\xfe\x0b\xfd\x35\xa2\x3a\xff\x5d\x84\x9f\x4c\xde\x50\x32\x86\x2a\xe7\x89\x54\xf7\xb9\x5b\xc0\x78\xf7\x4f\x22\x36\x3a\xa3\xa7\x21\x14\x56\x21\x01\x4b\xfc\xd5\xbf\x1c\xc2\x1f\x9d\x8a\x16\x61\x6a\x55\x39\xff\x97\x9b\xb4\x18\xf9\xed\xdc\x2c\x23\x1f\x04\xef\x00\x82\x0a\x14\xfb\x92\x55\x7c\xab\x4b\x6c\xc1\x15\xbd\xb6\xf7\x70\x16\x02\xb6\x31\x7c\x6f\xb1\xfe\x2e\xff\xf9\x1c\x0e\x43\x04\x3d\xee\xf1\x06\x84\x01\x44\x67\xcc\x99\xa5\xf5\x7b\xe8\xe3\x80\x88\xb6\x04\x54\x3e\xca\x3c\x8a\xb6\x3c\xad\x9c\x2f\x88\x9f\x77\x71\xc3\x91\xe2\x97\x17\xc2\xd2\x6a\xb8\x56\x93\x26\xe8\x7a\x69\xc2\xa9\xf7\x14\xdf\xe8\x1a\x87\xf4\x65\x72\x16\x5f\x6e\xa3\xf5\x3d\x8a\xd1\x02\xc1\xba\x25\xcd\xa5\x85\xb3\x91\x91\xe4\x26\x3e\x52\x04\xfb\x49\x4c\xe7\x32\xee\xc9\xde\x21\x75\x7f\xba\xd9\x0a\xec\xa9\xc1\xe6\xd0\xac\x36\x3f\xe9\x5f\x26\xd0\xc4\x28\x76\x20\x4e\x7c\x7b\xec\xc4\x29\xb4\xa1\xbc\x17\xbd\x92\x7a\x89\x05\xef\x83\xbd\x6b\x38\x57\x23\xb6\x8e\xd3\x96\xba\x70\x5b\x68\xc2\x76\x20\xa8\xf1\x45\xaa\xde\x12\xad\x48\xa6\x6a\x68\x6f\x4a\xc0\x85\x27\xf1\x34\x76\x5c\xcd\xb3\xd5\x7d\x31\x8e\xb8\x88\x60\x6f\x0e\x14\x18\x26\xc2\x58\xb1\x0d\xa5\xb7\x55\x0d\xf8\x44\xc7\xf3\x0b\x71\x93\x4f\x19\xf3\x04\x68\xc6\xa8\x51\x8f\x3d\xb8\x45\x71\x8a\xd2\xce\x1a\x60\x60\xa4\x1d\x87\xb9\x30\x98\x3d\x7b\x3d\xd0\x2a\x0c\x9c\xdb\x73\xd0\x7a\x30\x91\x7c\x2c\x85\x57\x41\x43\x64\x87\xe1\xf4\xc0\x74\xb0\x92\xea\xbc\x55\xaf\xfc\xd0\xcd\xb6\xb2\xb8\x1a\xc9\x6c\x2f\xe2\xe7\x50\x63\x01\x93\xc4\x62\x6c\x2e\x22\x25\x03\xdd\xf2\xb5\xe4\x6c\x84\x85\x05\x47\x92\x5a\xe1\xbe\x3f\xf8\x35\xc8\x54\x24\x31\xb6\xc8\xa3\xb6\xb9\xca\x60\x3d\xcf\xcb\xfa\x42\xea\xef\x42\xd9\x31\x4e\xc3\x64\xfe\x77\x5b\x65\xe8\xfe\xb9\xf7\xad\xc4\x11\x68\x4f\x1e\x4f\x10\xac\xd0\x90\xf8\x7c\x89\xfa\x82\x71\xce\xea\xd8\x19\x28\x43\xb0\xd4\x7b\x3b\xdf\x6e\x4a\x33\xab\x83\xa1\xf0\x04\xc8\xc5\xfa\x21\xb1\x7e\xfc\xdb\x0b\x90\xeb\x9e\xc1\xf6\x7e\xd1\x93\x54\xf4\x76\x45\x11\xb7\x96\x73\x47\xa5\x13\xce\xe3\x37\x12\x80\x17\x88\xf1\xe8\x69\x09\xbc\x58\x58\x69\x1c\xc5\x91\xed\x99\xce\x05\x5a\x42\x3a\xb3\x07\x14\x11\x1c\x23\x5f\xb7\xd7\x98\x2d\xee\x53\x19\xc1\xd9\x21\xe6\x69\x69\xee\xc6\xe2\xa7\x2d\xf8\x97\x00\x1e\xee\x3c\x69\x6e\x67\x54\x74\x84\x17\x62\x40\x3e\x14\xaf\x32\x24\xe4\x89\x87\x8d\xa8\x75\xdf\xdb\x00\x25\x46\x9f\x2f\xa6\xf6\xf0\x46\x52\x65\x47\xcf\x1f\x0a\xea\x73\x32\x67\x69\xcf\xb3\x39\x17\x19\xda\xf2\xf9\x1b\xd3\x87\x2b\x54\x4e\x96\x1b\xfe\x7e\xdc\xfe\x90\x97\x6d\x21\xb8\x3b\x64\xcc\xc1\x5e\xb9\x14\xad\x82\x00\xba\xd8\x4b\x9f\x5a\x64\x7f\xb7\xe6\x7f\xdc\xf2\x0b\x9f\x8b\x8e\x1c\x3d\xf9\x22\x8b\x80\x82\x84\x8f\xa0\xc8\x60\x8a\x6c\x1b\xd6\xff\xcb\x8f\x71\x93\x87\xe8\x2e\x39\xd7\xbb\x9f\x57\x5f\xf5\x41\x72\x86\x1d\xe5\xe0\xab\xaa\x90\x49\x21\x62\x2e\x76\x78\xbd\x71\x09\xdb\x28\x88\x78\x24\x35\x21\xfc\xc0\x90\x86\xa3\x89\x88\x9b\xae\x22\x2f\xbc\x42\x41\x15\x91\x22\x69\xd4\x96\x19\xa8\xa2\x98\x34\x35\x40\x84\x43\x27\x46\x71\x25\x53\xcb\x1b\x63\x09\xe8\x46\x29\x70\x36\x09\x00\x75\xf8\xd5\xad\xaa\xb1\x10\xcb\x89\x9f\x88\x91\x41\x50\x2a\x1f\x0b\xed\x4b\x6a\xee\x8b\x21\xbc\xc8\x9c\x6d\xe3\x0f\x74\xee\xc6\x9a\x69\x05\x68\xda\x72\x09\xc1\x5b\x31\x82\x92\x7b\x4f\x6d\x66\xb1\xe2\xb5\x0b\x1a\x8a\x1a\x99\x5c\x28\x44\x44\x71\x7d\xcc\x2f\xd5\x5b\xfc\xcc\xe4\x7f\x20\x7b\xb6\xf4\x58\x34\xa2\x8b\x86\x68\xd6\xe5\x96\x12\x39\x7a\x15\x7a\xc4\x9e\x57\x5f\x4f\x39\xfd\xe1\xff\x99\xd4\x81\xd1\x5f\xe4\xeb\x60\xd4\x34\x9e\xff\x59\x42\x55\x86\xd8\x5b\x18\x6a\x4d\x3a\x81\x77\xad\x59\x9f\xed\xd6\x52\x13\x25\x63\x21\xd5\x29\x16\xe3\xfb\x82\x9b\x15\x8f\xf4\xb3\xc6\xfd\xcb\x83\x91\x44\xaa\x9d\x2e\x98\x57\x86\x37\xb1\x82\x83\xc7\x49\x67\x87\x66\x29\xf1\x58\x7d\x7f\x5b\x9e\x34\x84\xe1\x07\x91\x8d\xab\xaa\x04\x7e\x27\x70\x46\x55\xe2\xd6\x5e\x67\xcb\xc4\xcb\xe6\x4b\xcf\x02\x35\xa5\xab\xa8\x69\x89\x03\x20\x3d\x98\x88\xd6\x26\x2e\xab\xe1\x2a\x3d\xc7\x50\xf0\x5f\x59\x12\x23\xcc\x32\x1f\x12\x2b\x9a\x1f\x63\xeb\x2f\x08\x33\x90\x8f\xbd\x7d\x15\xcc\x25\x44\x44\x31\xa9\x5b\xae\xbf\x2b\xf7\x55\xbb\x48\x25\x30\x71\x3c\xb2\x88\xfb\xcf\x8d\x30\xce\x18\x80\x68\xe9\x0c\xff\x04\x12\xc8\xe9\x94\xaa\xe1\x88\x13\x54\x00\x93\x82\xd6\x58\x57\xd5\xef\x18\x7a\x11\x2f\x2f\x5d\xb0\xac\x07\xe9\xcc\xa2\x1a\xea\x95\x78\xdc\x46\xed\xd7\x0f\x10\x04\x7f\xa6\x77\xd2\xad\x9a\xa7\xa0\x83\x30\xcd\x88\xd3\x5e\x3f\x76\x33\x6f\x5d\x74\x7b\xee\xf9\xd5\x0b\xa6\x79\xb5\xa6\x16\x54\x58\xc3\xe6\xba\x96\xcd\xb0\x3a\xa4\xe0\x4b\x31\x93\x75\xf2\x70\x7e\xde\x67\x35\xdd\x8e\x32\xc2\xed\xfb\x06\xa0\xef\x0d\xc8\x70\x96\xf8\x82\x76\x78\xc1\x2e\xf0\x22\x93\x2d\x65\x34\x84\x6d\xbd\xb6\x7b\x94\x99\x58\xe6\xe9\x93\x5a\xc5\xc5\xa8\xac\x27\xc3\x39\xa1\x34\x34\xa5\xa1\x98\x50\xf7\x73\x42\xa8\xb5\x2d\x23\x5c\xe2\x44\x3a\x17\xdb\xf5\x37\xe3\xb5\x49\xea\xd9\x5b\x3e\x34\x3e\xf3\xda\x4a\x46\x82\xa7\xc7\x41\xf4\xae\xab\xd6\x19\xc6\x13\xbf\xdf\x0a\x49\xf4\x68\xcd\x99\x18\x3a\x43\x2e\x2b\x2f\x60\x80\xd6\xe6\x9c\x57\x27\x2f\x61\x3c\x6d\x00\xe4\x5c\x47\x4e\x1c\xe2\xbf\x3e\x27\xfd\x60\x20\x20\xa7\x28\xa9\x4f\xc4\xe3\xc6\xbb\x67\x93\x0e\x63\xa1\xfe\xab\x8d\x77\xeb\xbb\xe0\x88\x41\x1c\xfa\xdb\x8c\xfe\xef\xf5\xb8\x96\xf9\xf8\x44\x7e\xe0\x3f\x23\x4e\xfe\x86\x07\xfb\x47\x63\xb4\x2b\x5f\x3a\x39\x2c\xa2\x33\x3c\x6a\xe1\x74\x6d\x22\x1d\xbc\xbb\x86\xac\x41\x38\xfb\xea\x6b\x8b\x50\x23\xd1\x39\x05\xf4\x77\xd2\x59\x52\xe1\x4b\x52\x13\xdf\x7a\x5f\xde\x12\x40\x3e\x3a\xca\x99\x5a\x8d\x0c\xb1\xf1\x7d\x3a\xac\x83\x32\xf6\xc3\x60\x04\x84\xea\x84\xd5\x85\xf0\xa0\x71\xa3\xaa\x75\xff\x7c\x79\x86\xb4\x87\xbf\xfa\xd3\xb9\xd4\xc7\x2d\x3a\x16\x78\x43\x2b\x6a\x8d\xa1\x3b\xc0\xef\xc3\xd5\xa3\x9a\x79\x3e\x68\x30\x90\x41\xcd\x42\x70\x5e\xd2\x5b\x8f\xe1\x0d\x45\xec\xfe\xc6\xc5\xf1\x29\xff\x61\x0c\xf5\xac\xa6\x43\x2e\x82\x9d\xb5\x1e\xa0\xec\x12\xf1\x68\x74\x6e\xec\xc8\x83\xb7\x78\x5e\x0a\x88\xfa\x84\x9d\x4d\xde\x81\x46\x9b\xdf\xbd\x44\xe1\xbd\x06\xab\x95\xf7\x5a\xbe\x16\xd8\x1b\xae\xfe\xb3\x27\xdb\x34\xd2\xb4\x92\xe8\xbb\x32\x05\x12\x81\xe5\x3d\xbf\x50\x83\x81\x5b\x46\xc5\xde\xd7\x9a\xe4\x59\x4d\x3c\xc6\x67\x1c\xa0\x97\x26\x90\x6b\xf6\x70\xbf\x72\x0c\x7a\x29\x03\xb5\xc7\x26\xe2\x20\x57\x0d\x51\xa1\x12\x35\xa4\xe1\x08\x58\x6c\xf6\x42\x81\x8e\xdc\xac\xdb\x72\xcd\x89\x4e\x45\x9d\x69\x05\xd7\x7a\x5c\xb8\xea\xdf\x31\x11\xec\xd2\xb1\x48\xdc\x40\x7e\xe6\xe2\x02\x69\x50\x9a\x62\x89\xd8\x79\x39\x54\x7a\xd3\x17\x44\x3b\x3d\x0d\x3f\xfc\x70\xbb\x2e\x3a\x8a\xfc\xf7\xf0\x04\x37\x45\x09\x0d\xd6\x26\xfe\xd8\x62\x97\xea\x67\x4d\x5c\x30\x1e\x4a\x19\x6a\x77\x89\xdb\xed\xdb\x4a\x95\x34\x94\x69\x08\x96\xdd\x42\x2b\xb9\x4d\xb6\xa1\x8b\x36\x9c\x9e\xd2\xf3\x56\x29\x3d\x68\x78\x49\x2a\x31\x1d\x77\x47\x39\x1b\xe0\x3f\xb8\x46\xb6\xe6\xff\x81\x0f\x0b\x2f\x0e\xaf\x9e\xfe\x4b\x2f\xf1\x56\x7d\xf1\x2f\xe3\x4f\xd7\x8a\xa7\xa2\xac\x05\x17\x54\x0c\x64\x49\x05\x14\x19\xc9\xcc\x31\x35\x6a\x2d\xeb\x11\x5b\xdd\x29\xc9\x8c\x2d\x04\xd1\x09\xa5\x27\x57\x2f\x8d\xf1\x0b\xda\x2f\x06\x17\x36\x6f\xce\x51\x45\x53\x8a\x0f\xe7\xb8\xcb\xed\xc1\xd0\x57\x12\x1e\x70\xee\xd6\xfc\x3d\x57\x5d\xc9\x68\xa0\x8b\x52\x19\x60\xf3\x9f\x44\x31\x55\xc4\x8b\x87\x5c\x0d\x7b\x8c\x52\x27\x95\x04\x39\x92\xd4\x88\x47\x63\x5f\x74\xc4\xb6\xb2\xd5\xaf\x20\x17\x1d\xe4\x19\xa6\xa2\x2f\xb5\x68\xe7\xdd\xa2\x36\x7e\xe6\x78\xdb\xa2\xc4\xef\x4c\x08\x13\x03\xf6\x74\xf7\x24\xff\x01\xb9\x79\x43\x5b\xed\x40\xdf\x42\xe5\x71\x10\x82\xf9\xdc\x39\xb0\x0d\xfd\x47\x74\x03\x4e\x24\xb2\xf7\xbb\x75\x54\x60\xe4\xe2\x52\x4d\xc9\x01\xbb\xcd\x5c\x9f\x84\x69\xc3\x30\xae\x1e\x57\x21\x51\xb0\xb8\x52\xa2\x9e\xf9\x85\x46\x7b\x4b\x70\x2a\x6d\xc2\x75\x92\x96\x43\x5e\xfc\x9a\xf9\xdc\x66\x3b\x1e\x88\xe1\x78\x83\xf9\x4b\x85\x55\x1b\xc1\x1e\x73\x98\xf1\xce\x71\xe0\xe2\x1b\xcb\xfc\x3c\xb6\x7a\xac\xef\xbd\xe6\x4c\x0f\xe5\x68\xc4\x0e\x12\xb7\x2c\x0c\xd1\xfc\xca\xf8\xa7\xfc\xbc\x79\x2a\x60\x15\xeb\x63\x83\xd6\x7a\x4c\x7d\x69\xd3\xdb\x50\x40\x6b\xa2\xf2\x8e\xae\xa6\xcd\x18\xda\x02\x5d\xed\x81\x51\xae\x29\x74\x11\x16\xe8\xe0\xe8\x3b\xdc\x62\x6d\x09\xea\xa8\x3e\xa2\x81\x64\x14\xdd\x3f\x8b\xb4\x88\xd2\x8d\x66\x84\x28\x2f\xee\xa1\xc8\x9b\xfd\xf7\x67\xe0\xa5\xf0\x5e\x6f\xf7\x6a\x01\x1e\x56\x5d\x12\x3e\xf5\x2c\x81\x0c\x3d\x5e\x9b\x5c\x38\x01\x25\xa3\x5a\xca\x53\xa3\x8b\x91\x22\x54\xeb\xb3\x1e\xea\x24\x9c\xfd\x1d\x26\xb0\x76\x36\x1c\x25\xe6\x33\xce\x38\x5e\x57\xa8\x26\xf1\x64\xf3\xe3\x9e\x63\x34\xf0\xf4\x75\x6a\xaa\xfe\x59\xc0\x0b\xde\x13\xca\xd8\x1e\xfc\xab\x22\x21\xad\x25\x55\xf4\x4f\x0f\xbe\x3d\x51\x6c\x02\x53\xa4\x40\x47\x4e\x3a\x67\xfc\x80\x0e\x29\x1a\x07\x66\x10\x27\x3c\xe2\x71\x5f\xc1\xf7\xca\x79\xde\xd6\xef\xed\x77\x01\x4b\xbb\xce\x20\x3b\x25\x2c\xae\x45\xa4\xbd\x52\x49\x0f\xaa\xce\x18\x0d\x04\xa0\xfa\x8b\x01\x9d\x76\x06\xd5\xd7\x6c\xd3\xca\xcc\xce\x7a\xe2\xb7\x1f\xb6\x88\xbe\x45\x5c\x75\x6a\x7d\xcb\x0a\xb8\x28\x6b\xd5\x8d\x8f\x2a\x0a\x1a\x6e\xda\xa7\xcd\xf9\x2b\x03\x19\x34\x15\x02\xcd\xbd\x33\x32\x31\x3e\x0d\x2f\x91\xe8\x8e\x1d\x2e\xec\x5a\x3a\x42\x98\xc4\x2b\xcb\x7a\x31\xe4\x8f\x73\x39\xb2\xf7\x0f\x9e\xd8\xb4\x30\xd6\x0c\x8a\x8e\xca\x7a\x6f\x9a\x0b\xa7\x74\x5d\xb8\x36\x75\x50\x10\x26\xc7\x42\x5e\x68\x23\x4e\x8e\x60\x27\x9a\x5e\xc0\x03\x19\xee\xc3\xf1\x1b\xb8\xe2\xaa\xce\xde\x66\xda\xe6\x99\x88\x74\x6b\x85\xb8\xd9\x0a\x4b\x9d\x8c\x29\x76\xe4\x07\x4d\x4f\x1f\x93\xf5\xcc\x6e\xfc\x8e\x38\xd3\xf9\x6f\x35\x2b\x57\x7f\xb5\x12\x29\x84\x7a\x37\xf5\x73\x22\x84\x65\x96\xb4\xe8\x1a\x59\x80\x07\x0d\x49\x42\x6d\x75\xb7\x43\xbb\x41\xc7\x46\xae\xe0\x19\x1a\x68\x50\xb7\xc1\x70\xec\x02\x1f\x01\xe6\x24\xd3\x86\xe6\x15\x48\x95\x2b\xbe\x89\xba\x49\x71\x04\xf4\x4b\x64\x22\x3f\xb9\xfa\xe3\x3a\xa0\x3b\xd2\xa5\x1c\x72\x20\x3a\x12\x38\x36\x4b\x6f\x96\x33\xc6\x3e\x74\x00\x96\xc1\xac\x20\xa3\x2e\x75\xc4\x41\x4a\x93\xca\xa4\x1f\xfc\x49\xe8\xdd\xeb\x4b\x2f\xf5\x04\x1f\x1d\xbd\x6d\x89\x55\x0c\x27\x06\x4a\x73\x72\xf7\xc6\x55\x02\xaf\x7c\x80\x75\x19\x30\xb1\x38\x7a\x04\xa3\xc7\x36\x1b\x0b\x7b\x7f\xd2\x5c\xf5\x01\xb7\x63\xaa\xe5\x8d\xee\x96\xd8\x41\x06\x28\x6a\x58\x75\x21\x58\xb4\xd1\x29\xae\xe1\xae\x34\xcc\x2a\xc2\x80\xf9\x4b\xb7\xf7\x70\x1b\x65\x99\xe1\x50\x41\x12\x89\xee\xcb\x38\xf4\xf7\x20\xd3\x02\x66\xc4\x8a\x93\x21\x0e\xf5\x57\x52\xad\x8a\x29\x74\x9f\x1f\x23\x3e\x3e\xa5\x92\x02\x7e\x0b\xdc\x2a\x54\x64\x06\x44\x94\xe1\x6e\x9f\x47\x7c\x86\x31\x0e\xd7\xd9\xf7\x9e\xf7\x13\xc1\xe4\x64\x51\x78\xec\x92\x48\xa0\xd1\xf0\x10\x84\x62\x4d\xe7\xdb\xa1\xdc\x92\xf1\x3a\xd5\x8e\x22\x4a\xa3\xb3\x8d\xad\xc7\xa7\xea\xa7\x38\x02\x2e\x32\x92\x9a\xb8\xca\xd1\x23\x9d\x63\xc0\xd9\x5f\x21\xf3\xc4\xc8\xd5\xea\xfe\x42\xfb\x57\xb4\x11\x73\x2f\x92\xaa\x99\xd5\xe4\x03\x5c\x8d\x1a\x97\xb7\xc1\xa5\x1f\x44\x39\x91\x5c\xc6\x26\xd8\x6e\xbb\xe3\xcb\x6c\xba\xf9\x4b\x5f\xa8\x1b\x2c\x00\x31\x59\xe6\x08\x9c\x1c\x52\xc0\x57\x63\xc8\x1c\x01\xe3\xcb\x94\x4b\xb5\x60\x7d\xce\x19\x83\xfc\xa6\xa7\x61\x7a\xcf\x16\x46\x9d\xda\xaf\x52\x9b\x95\xf1\x35\xe0\x95\x56\xa4\x6b\x40\x8a\x00\x31\x68\xe6\x3b\x28\x5c\x92\xf1\x68\x6e\xcc\xab\x77\x77\x1c\x81\x53\x36\x0e\x87\xf0\xa3\xaa\x84\xb7\xb0\xd8\x3d\x7b\xaf\x2d\x50\xa5\x11\xa3\xa0\xb7\x92\xa3\x00\x73\x39\xe0\x26\xd1\xfb\x85\xec\x7b\x2c\x8d\xbf\xcd\x91\x0c\x7b\x44\xcc\x12\x92\x1f\x7f\x2e\xc9\x48\x7f\x3c\x06\x7d\x3a\x2c\x95\xed\xbd\xa2\x86\x53\xd0\x15\x43\x90\x12\xc2\x38\x8b\x81\xec\x10\xab\xe4\xc4\x00\x23\xda\xc7\xf0\x13\x3c\x6c\x3a\xc0\x19\xc4\x6a\x7e\x02\x24\x9d\x2e\x0c\x81\x6b\x58\x21\xe3\xcb\xb4\xce\xd7\x57\x31\x93\x98\x9d\x87\x5b\x9f\x82\x77\xd1\xf4\x93\xec\x6c\xa1\xee\xf1\xb4\xd3\x00\xb9\x0b\x10\x0f\x63\x5f\xe8\xa0\x28\x2c\x78\xb5\xb2\x34\x7b\x8c\x1d\xf8\xaf\xd9\x5f\x8b\xa1\xc5\x64\x4e\x04\x85\x1b\xf6\x9c\xe3\x8c\xda\x60\xe5\xc0\x0c\x0e\xdc\xe7\xc7\xa1\xe8\x6e\x7c\x13\xa4\x3e\x8c\x79\x48\x25\x9d\x58\x42\x31\xc4\x4d\x4f\xa9\x42\xcf\xa2\x92\xc5\x88\x4d\x56\xe0\x3e\x87\x75\xca\xce\xa2\xfa\xf3\x4d\x54\xf1\xb3\x39\x91\x25\x9c\xef\x66\x46\xa0\xf9\x6d\x2a\x88\xf3\x8a\xf1\x24\x07\x57\x98\x64\x01\xa0\x5e\x2f\x30\xdf\xa9\x1d\x1f\xe6\xa7\x67\x91\x8a\x71\xe8\xce\x14\x79\x52\x46\xc3\x95\x8f\xa8\x88\xc7\x82\x2b\xac\x53\x96\x46\x1d\x8c\xc6\xd4\x17\x12\x39\xa7\x46\x60\x6a\xc6\x4c\x59\x21\x0c\x6e\x8b\xf5\xeb\x36\x62\xf6\xd9\x5a\x2f\x16\xad\x90\x8a\x65\xf4\x47\x64\xf5\x1d\xd5\xd3\xc9\xa1\xe5\x8a\xfb\x0c\x58\x8f\x83\x77\x6b\x37\xcb\xaa\x98\x29\xf9\x5b\xdd\x25\x0d\x57\x82\x68\x91\xb8\x59\x78\x6a\xc9\x10\xe5\xae\x30\x92\x33\xa3\xa2\x07\xc1\xce\x3b\x9a\xc9\x88\x16\x4c\xc7\x45\x67\x06\xc0\xb8\x55\x15\xce\x27\x3a\xca\xd6\x15\x62\xa8\x2b\x85\xbe\x55\x9f\x44\xd2\x6b\xab\x5d\xcd\x69\x5a\x7d\x0e\x73\x2a\x92\xb4\x91\xcd\xac\xc0\x28\xa4\xcc\x6e\x5b\x52\x73\x1d\x1d\xa0\xe4\x10\x07\x73\x7f\x52\x7d\xe2\x10\xd3\xd0\x87\x36\x4e\xb0\xca\x41\xa6\xb8\xdd\x83\x00\xef\xe5\xf9\x75\x93\xa6\x15\xe1\xc6\x21\x44\x75\x54\x29\xcd\x4b\xfc\x04\x65\xa2\x54\x40\xf9\x4b\xf6\x0b\xb1\xe0\xe9\x9f\x12\xe3\x49\x72\x95\x96\xda\x49\xd8\x16\x96\x91\x2c\x2d\xb0\x14\xd7\x71\x12\x29\xe4\x43\x78\xe3\xd8\xbb\x21\xbc\xb6\xbc\x9d\x3b\xfe\xd2\x5c\x3a\x64\x5c\x72\xc4\xb7\xe9\xc0\xc0\xa6\xb6\x23\x01\x65\x51\x4f\x0e\x1a\x28\x3c\xc2\xa6\xdf\x28\x29\x86\x05\x33\x83\xc3\x3a\x59\xe3\x7e\x99\x15\xde\xe8\xee\x86\xdb\x65\x6c\x91\x6f\x7d\x8e\xb5\x1e\x89\x41\xb0\xa3\xc5\x07\x2a\xd6\xca\x00\x99\x95\xae\x4c\x79\x9c\x80\xbf\x80\x29\x97\x61\x62\x49\xbb\xb6\xd7\xd2\x85\x8d\x28\xad\xba\x51\x6d\x71\x09\xd1\xd3\xe4\x06\x8b\xc5\xad\x9f\x5b\x5c\xed\x55\x53\x87\xbc\x63\x84\xfa\xf7\x5d\xc9\x56\xa3\xc4\x3b\x38\x7b\xf3\xf2\xfc\xc0\x91\x4c\x7c\x03\x70\xdb\x9a\x9b\xe3\x13\x1b\x92\xee\xaf\x1e\x28\x41\x3b\x78\x0a\x04\xef\x22\x4b\xa4\xca\x84\x63\xf0\xf7\x78\x5c\x26\x3a\x65\xe3\xb6\xd1\xdc\xca\xf5\x86\x50\xad\x17\x39\xa1\x7b\x88\x42\xe2\xcb\xa3\x66\xf1\xd0\x6b\x4f\xa2\x09\xd4\xd0\x74\x22\x3b\x47\xa0\xca\xec\xde\x0f\x68\x39\x18\xb4\xc6\xdd\x49\xa6\x16\x1c\x30\x70\x97\x1f\x58\xcf\x5c\x79\x32\x8d\xcf\xbc\x26\x56\x84\x72\x24\x0f\x13\x1f\x25\xd1\xe3\xc8\xad\x7c\x27\x5f\x1c\x93\xe2\x13\xcc\x41\xa9\x28\x28\x4c\x24\xd9\x84\xd4\x09\x64\xf4\x45\x61\xb2\x29\xac\x31\xfe\x6b\x29\xe7\xf8\xe1\x25\xb8\xd5\xb7\x51\xe3\xd2\x53\x8c\x86\x6d\xf9\xe4\x8d\xc2\x05\x55\x34\x7a\x16\x1e\xd4\xb8\x75\xc6\x44\x07\x02\xca\xfe\x2d\x19\xed\xea\xe8\x5e\xc3\xff\x42\x4d\xbd\x7c\xd3\x49\xf5\x36\xdf\x4e\x93\x68\xd1\x1f\x6f\x4d\x55\x18\x18\x6f\x8f\x3f\x7d\xf5\x28\x85\x74\x4c\x58\xa3\xd9\x3f\xbf\x20\x44\x7e\x13\x8e\xf1\xab\x55\xa8\x67\xb1\x14\x9d\x4c\x5f\x90\x5a\xc7\x14\x36\x95\x44\x9c\xf1\x98\xff\x9a\x66\xbb\x42\x08\xf5\x4f\xbc\xa2\x09\xa0\x5e\xa9\xaf\x02\xe2\xba\x8b\xc6\xcd\xf1\x7d\xa9\x4b\x7d\x52\xd7\x14\xd3\x96\xed\x0e\x62\x0e\xfb\x39\xc4\xb4\xc8\x5f\xe2\x84\xba\xf1\x83\x19\x7f\xde\x5a\xad\x87\x21\x16\x8a\x4b\x4f\x96\x4f\xd7\xdf\x4d\xc1\xdc\x7c\x13\xfd\xe2\x8c\xde\xb3\x07\xac\xe1\xfb\x3b\x68\xaa\x5e\xac\xa0\x5b\x5a\x32\x73\xb1\x00\xb7\x64\x8d\xb2\x0c\xa3\x38\xd7\x24\x9b\x9b\x3c\x79\xc0\xa3\x8d\xa5\xfd\xf3\x6b\x05\x25\xf9\x70\xc9\xd7\xf5\xa7\x90\xca\x88\xc2\x13\x92\x11\xa2\x0a\x02\xc0\xc5\x6b\xda\x31\x1f\x85\xfb\xb3\xb4\x0d\xa1\x89\xe2\xc3\x9e\xc9\xbb\x63\xa9\x2b\x7f\x6b\x9e\xb3\x5a\xfa\x70\x47\x5b\xfc\x2c\xb9\x14\x16\xc0\x7f\x5c\xc9\xf0\x87\x8c\x97\xeb\x25\x22\xb4\x4c\x69\x0f\x90\xa5\x1a\xf3\x13\x6b\x79\x5c\x79\x3d\x3c\xce\x64\x65\x11\x05\xb0\x48\x3a\x8a\x90\x4a\x47\x72\x2c\x10\x4a\xae\x22\xf5\xaf\x9a\xe7\x87\x9c\x0a\x06\x53\x57\x05\xab\xbe\x7e\xc6\x2d\x88\x16\x85\x04\x99\x0c\xae\x00\x4c\xc8\xdd\x42\x5b\x77\x8b\x38\x48\x1e\xfd\x45\x50\x8d\x6a\x56\x11\x18\xa0\xf4\xa8\xfc\x0f\x04\x65\xd9\x4b\x89\xda\x07\x5c\x39\xf9\xdf\x67\x35\x05\x8f\x7e\x89\x62\x9c\xf8\x5f\xbe\xce\xd2\xbd\x9a\xc8\x91\xec\xc1\xf4\x0e\x04\xc2\xab\xf7\xf9\x7c\x2a\x80\x09\x55\x6d\x0c\xfa\x1e\x01\xd4\x6e\x76\x52\x0a\x76\xad\x88\x18\x93\x34\x55\xe3\xec\xc3\x68\xd6\x44\x0e\xa2\x13\x21\xc6\xaa\xd8\x48\x1b\xd9\x76\x86\x98\x00\xe4\x0e\x29\x9a\xa5\x19\xa9\x05\x56\x4d\x50\xe0\x55\xc8\x30\x4e\x93\xea\xd6\xaa\xc5\x27\x36\x81\x81\x7d\x4e\xe4\x46\x3c\xb6\x02\x49\xe0\xf9\x2d\x3c\x7b\x61\xa6\x9a\x82\xe7\x1b\xe8\x37\xba\xbf\xd1\xbb\x12\xef\x38\x24\x97\x3c\x5b\x3a\x5d\xd9\x6c\x9d\x1d\x2f\x61\x54\x11\x1d\xfd\x50\x0f\x56\xea\x78\xd3\x46\x6d\x99\x4f\xb7\x85\xa4\xec\xd9\x78\x64\x50\x98\xd6\x2d\xb2\xb8\x4e\xf6\xde\xcf\xb8\x15\x18\xde\x44\x6f\xab\x39\x23\xb8\x15\x53\x9c\x6b\x0a\xde\x8b\x87\x95\x31\xa1\x17\x61\xff\xda\x0b\x1f\x9f\x0b\xf1\x21\xdc\xf3\x53\xe6\xda\xda\x11\xb8\x66\x84\x33\xa4\x93\xa1\xd6\xa4\xe2\x16\x56\xc2\x4d\x19\x75\xe2\x96\xe0\x1b\x40\x40\xf4\xbf\xfd\x7a\xbf\x41\x64\x29\xa0\xe1\x23\xeb\xb2\x27\x3c\xd2\xf0\x80\x79\x8c\x35\xdc\x4a\x57\xdc\xd9\xd5\x62\x8f\x0b\x61\x58\xb2\x7c\xe7\x6f\x94\xe7\x65\x9f\x22\x24\x1a\x48\x75\x68\xcd\x65\x84\x18\xa9\x1b\xae\x07\x25\xae\xde\xf9\xa7\x95\x34\x8c\x3c\x07\x47\x8e\xd6\xc9\x35\x19\x39\x79\xb8\x22\xcf\x66\x03\xd9\xb5\xac\x15\xea\xe5\xe4\xcb\x17\x53\x0c\x89\xa6\x1b\x03\xf5\x93\x47\xe0\xcd\xdd\x8b\x4e\x72\xd0\x08\xb2\xad\x37\x1e\x44\x59\x93\x68\xa1\x9f\xca\x09\xc5\x89\x50\x1c\xb1\xe2\xde\x1b\xff\x0e\x1b\x59\xff\xd6\xaf\xf6\x87\x68\xbe\xd5\x58\x18\xc6\x22\x75\xdd\xb0\xc8\x68\xe3\x43\x8c\x8e\x21\xb6\xc8\xa3\x37\x18\xd1\xea\xbb\x63\x16\xe9\x70\xad\x3c\x22\xe0\x46\x6b\x18\xc8\x16\x7a\xaf\x50\x7f\x75\x4c\x4c\x83\x4a\x95\xf9\x8a\x25\xb0\xeb\x51\x2b\x06\x18\x8c\x12\x7f\xb2\x12\xc2\x50\x54\x4d\xc3\x1a\x8f\x99\xb2\x13\x7c\xee\x0a\x88\x60\x95\x3f\x64\x53\x38\x09\xbf\xce\x85\xbd\x3d\x67\x5b\x9b\x9b\x54\x40\x64\x02\xd3\x1d\x61\x55\x82\x52\x19\x64\x7d\x38\x3c\x02\xb4\x63\x76\xeb\x2d\x4c\xc9\xb9\x98\x6f\x2c\x37\x9d\xc5\x83\x70\xab\x00\x10\x3f\x80\xef\x56\x3f\x0f\x0c\x25\xf1\xbe\x4f\xc8\xa8\x8b\xcf\x9a\x65\x3b\x62\x58\xcc\x98\xc8\x30\x08\x50\xb9\x9f\x86\xa8\x79\x3e\x80\x65\x1e\x8f\x99\xa6\x2f\x81\xf8\xdd\x38\x96\x9e\x57\xb0\xc8\xce\x86\x96\xef\x12\xf5\xa1\x17\x87\x74\xda\x87\xa6\xc5\x6f\x4d\x78\xc2\x09\x5b\xab\x99\xde\xc8\x94\x12\xfb\x23\xc0\x32\x03\xf4\x55\xda\x0d\x8e\x2d\x30\x11\x0e\x45\xee\x51\xd2\x42\xd9\xb0\x37\xa9\x6d\xb7\xcd\x0a\xe3\x9a\xbf\x73\x85\xc7\xa2\x52\x5b\xc6\xdb\xf1\xee\x9e\x1a\xc2\x2f\x38\x20\xa6\x56\x80\xb2\x2e\x4f\x3e\x5e\xbf\xa0\x07\xaa\x90\xd6\x91\x07\xff\xbf\x6c\x88\x70\xa1\x64\x65\x1a\x94\x33\x14\xb9\xed\x9f\xdb\xb8\x31\x8a\x77\x7d\x53\xf0\x67\x2b\x41\x55\x22\x4b\x44\x34\x53\x4f\xe7\x63\x20\x96\x23\xf7\x9a\xc2\x55\xca\xc6\x1b\x31\x8a\x15\x6b\xbe\xdf\x77\x8d\xad\xd8\xef\x23\xa3\xc8\xa7\xbc\x07\x8b\x3d\xc1\xde\xe5\x8f\x70\xad\xef\x26\x84\x3f\xa7\x57\x1d\xd5\x5c\x8a\x19\x21\x33\xdb\xa3\x94\x9f\x40\x2f\x30\xb3\x7f\xbc\xda\x1f\x15\xf2\x26\x22\x33\x36\xaa\x38\x8b\x07\xd7\xc4\x6a\x3a\x24\xbf\xb2\xc8\x2d\x7c\x48\x21\xd1\x7e\x74\x5c\x1f\x27\x06\x22\x6b\xf9\x42\x80\xce\x1c\x53\xfa\xed\x33\x53\x15\x15\x0b\xc0\xc8\x22\xc6\x55\x92\xae\xe9\xf5\x7c\xe4\x60\x40\xd2\x3b\xec\x61\xbe\xa5\x3f\x5f\x1a\x82\x47\x47\x3a\xad\xdb\x6f\xec\x1a\x79\x16\xd6\xe0\xcb\x86\xcb\xe3\xad\x5c\xe6\x43\xbf\xb6\xa1\x80\xbf\xb7\x1a\x33\xb6\x99\xf4\xd6\xad\x53\x75\x57\xa9\xac\xaf\x84\x7e\xbf\x30\x74\xc1\xef\x81\xba\x2b\xfa\x0d\xec\xc6\x89\x7f\x51\x31\xdb\x37\x4d\x3c\xeb\xaa\x72\x60\x4b\x8d\xd2\xa9\xc3\xc7\x22\x25\x46\x09\xd9\x3e\x94\xfc\xa7\x97\xa0\xac\xd4\x88\xa0\xb0\x43\xff\x6a\x7b\x5b\x9d\xc9\x4f\x2d\x1a\x5f\xce\xf6\x50\xfa\xb5\xcf\xd0\x91\x50\x17\xbb\x83\xe0\x99\x6d\xe6\x3f\x20\xe8\x52\x34\x0a\x62\x69\x36\xa6\xcd\xc4\xdc\x8b\xa5\xae\xf2\x38\x50\xf4\x80\x9a\xb9\x6f\xe2\x06\xc4\x53\x63\x7f\x66\xa4\x97\x62\xe2\xc4\x38\xa5\xc7\xe3\x33\xe4\x24\x30\xfa\x41\x70\x27\x57\x0b\x2a\x7a\x37\x95\xa2\xb0\x11\x0d\x3d\xd4\x14\x52\x8f\xb1\x1e\x1a\x0b\xc4\x19\x12\x26\xef\x65\x25\xeb\xef\x47\x93\x6f\x18\x4c\xfc\x11\xbd\x17\x26\x0e\x94\x8b\xcc\xad\xd9\x0e\x07\x43\x3d\x4d\x1c\xd6\xb7\x74\xe5\x86\xce\x91\x5f\x9c\xa1\xc4\xff\x78\xa8\xbd\xc9\xc3\x73\x0e\xa0\x7a\xa1\x12\xd9\x47\x6d\xf7\x74\xf1\x5a\x79\x1e\xb9\xd5\xe2\xe2\xcd\x6d\x89\x0b\xb3\x4c\xf9\x71\x92\x4d\xba\x8d\xc0\x76\x70\xcb\xe9\x58\x2a\xd5\x19\x5b\x9e\xdf\x09\xe3\x94\xe9\x84\x75\x3f\x7f\x6c\xd7\xd1\xdf\x74\x2a\x24\x03\x50\x58\x7b\x7e\x23\xff\xb7\xce\xcb\x8e\x92\x38\xb5\x07\x01\xe3\x7e\xb8\x35\x80\x17\x13\xb3\x8c\x4b\x14\x51\xd3\x81\x98\x01\xbb\xe1\x28\x06\xe7\xbb\xcc\xee\x6f\x12\x7a\x71\x3a\xf6\x51\xd9\x37\x99\xb0\x5f\x23\xb2\x1a\x0e\xb9\xb3\x40\x34\x15\x8f\x8e\x69\x9f\x6b\xd5\xa9\x33\x65\xaa\x1f\x87\x20\x2d\x0e\xda\x68\xa1\x2f\x90\xad\xd5\x1f\x74\x09\x37\x2a\x4b\x21\xe0\x49\x0c\x30\x01\x95\x24\x52\x39\xcf\xc7\x90\x24\xa3\x6c\xb2\x8c\x94\x4b\xbe\x0b\x61\x70\x88\x59\x10\x10\x22\xcb\x30\xda\x5e\x59\x67\x97\x8c\xdc\x4d\x4f\xb5\x38\x48\xe0\xc4\x2f\xc2\x01\xc9\x26\xe7\x67\x9e\xd1\xee\xd6\xea\xbd\xab\xcc\x9c\x6e\xfe\x4e\x4b\xe5\x34\xf3\xc9\x1d\x7b\x7d\xf4\x4f\xc1\x8e\x21\x09\x9e\xb4\x10\x0f\x7a\x09\xed\x91\x8c\x6a\x58\x3f\x09\xc6\xd6\x51\x7d\xf6\x5a\x84\x39\xdd\x44\x76\x12\x86\xef\x4d\x4d\x48\xb6\xd5\x93\x07\xa2\x0a\xa6\x38\xa2\x09\xa0\xcf\x46\xe7\xd6\x32\x33\x11\xc0\x8e\x75\x43\x60\x7c\x4c\x39\xb8\x26\xaa\x4b\xe2\x27\x2b\xce\xd8\x88\x21\x5e\x9c\x8c\x6f\xc1\x5c\x97\x02\xc1\x27\x75\x71\x05\xf5\x90\x55\xe2\x03\x39\x97\x56\x7e\xd5\xe9\x61\xe8\x8f\xb1\x1e\xbc\x70\x0f\x60\xa9\x38\x37\xab\xdf\x11\x90\xd2\x36\xf5\x14\x0a\xd2\x58\xfe\x85\x06\xb9\x5d\xd0\x91\xc3\x2b\x2c\x9d\xd5\xd4\x64\xc7\x13\x6d\x01\x84\x63\x1a\x85\xee\x3f\x1f\x4f\xf9\xc6\x1b\xe9\xd2\xc0\x67\xcc\x30\x78\xc5\x8c\xc9\xbf\xb4\x13\x3e\x28\xb4\x0a\xa8\x1c\x74\xc6\x01\xdb\x67\xa7\x1b\x38\x93\x2a\x8f\x79\xf7\x79\x26\xf4\xc5\x75\xed\x4f\xaa\x86\x9e\x7d\x83\xb6\x52\x11\xf1\xb9\x79\x46\x23\x8e\x02\x8b\xc0\xd0\x9d\x12\xab\xae\x88\xe4\xa3\x06\xb1\x89\x5d\x47\x3a\x21\x16\x46\xac\xc8\x1e\x68\x64\xcf\x1f\xd6\xfc\x32\x5a\x6f\x4c\xcb\x73\xce\x94\x48\x0c\x86\x1a\x42\xbc\xb0\xf1\xd8\xc0\x70\xf3\x4a\xec\x51\x3d\x38\x1e\xb0\xf6\x68\xc9\x31\xcb\xf0\x0e\x9d\xc2\x14\x3f\x0e\xa4\xc5\xfe\xec\xe8\xcc\x36\x3f\x3d\x59\x1f\xb3\x77\xf7\x5f\x5b\x12\x21\x12\x06\xcc\xb2\x12\x00\x02\xe7\xa4\xeb\xc8\x96\x0b\x85\xc9\xfa\xb1\x15\x2f\xdb\x85\x36\x16\xbb\xc5\x8b\xa5\x4e\x0f\xd0\xdd\x73\x16\x9d\x02\xba\x85\x4a\x5e\x27\x82\xff\x17\x85\xc6\x17\xf2\x1a\x03\xfd\xe7\x26\xe4\x0d\xad\xd2\x37\xab\x53\x3a\x75\x50\x8b\xa9\x31\xf2\xa3\x06\x4a\x77\x98\xce\xfb\x81\x19\xd9\xfe\x34\xd5\x89\xe1\xe2\x0f\x63\x67\x61\x06\x36\x26\xa3\xb3\x9e\xb2\xe7\xd2\xb9\x9d\x75\x5d\x0f\x5d\x37\xf4\xb2\x47\xb6\x9e\x11\xc7\xb9\xf0\x5b\x54\x8f\x46\x6a\xd6\xe9\x94\xbf\xdb\xcf\xc2\x5e\x4f\x2e\x58\x8f\x4c\xef\xcd\xd7\x9a\xd5\x68\xfa\xa4\xfe\x94\x74\x1d\x2e\xf5\x11\xad\xa2\x24\x4f\x95\xaf\xaa\x39\xeb\x2e\x54\xb0\xb8\xcf\x99\x54\xd1\xec\xf6\x47\x1f\x5b\x67\x1d\x43\x48\x6d\x92\x39\x72\x7b\xba\x06\x3e\xe2\x96\x5b\x96\x6e\x7e\xd2\x6f\xe4\x96\x28\x2a\xe5\x2d\x78\x00\x95\xa3\x2e\xcf\x27\xdd\xfb\x0e\xe8\xfb\x25\xc1\xd6\x06\x10\x8e\xc9\x9e\x8d\xeb\x86\x1e\xcd\x5c\x3c\xfa\x40\x17\xff\x9f\xf2\xbe\x62\xc9\xf7\xdc\xa9\x5f\xcb\x30\xe0\x06\xfe\xef\x29\xf5\x49\x59\xb3\xf8\x23\x9d\xec\xb5\x0a\x0f\x65\x91\x88\x29\xc2\x1b\x7c\x47\xd3\x8f\xd2\x9f\xe1\x57\x66\x86\x3a\x6d\xb9\x1f\xfc\x1e\x18\xc8\x1a\x0e\x06\xd2\xa7\xd4\x76\x1a\xae\x20\xb1\x56\xec\xb6\xc5\xa7\x86\xaf\x14\x4f\x6d\xac\x14\x7a\x84\x7b\xb3\x5a\x1f\x07\x4f\x5b\xa2\xaf\xdf\x0a\x91\x6b\x70\xaf\x22\xfc\x84\x77\xb2\x4c\xc2\xbf\x16\x47\xae\x8c\x99\x7b\x71\xef\x5e\x97\x5b\xa0\x5b\x69\xaf\x3b\x17\x82\x79\xd0\x8d\x53\xb9\xc7\x07\x1d\xcd\x21\x94\x99\x42\x0c\xc6\x4a\x23\x7e\xbe\x9d\x57\xba\xe2\x79\x54\x3f\xab\x1d\x4c\xcd\x60\xcf\x69\x97\x58\x54\x0b\xb8\x97\x91\x50\xc5\xf0\x92\x84\x08\x8d\xf4\xad\x14\x5c\x68\xa5\x3d\x35\x17\x36\x0f\x7e\x14\x6b\xdb\x9e\xca\xfe\x20\x82\x3a\xfd\xda\x19\xe4\x92\x29\xf8\xfb\xcd\x0b\xea\xda\x13\x7a\x1c\xfa\x4d\x36\xf9\xcf\xa2\x16\x70\x9b\xb8\xbb\x2f\xa6\xc2\x42\x9c\xf4\xb1\x10\x84\xb5\xcf\x2e\xd8\xbb\xcc\x95\x99\xce\x10\xee\x4b\x6b\x0e\xba\x6c\x06\xf5\x1e\xff\xa7\xae\x59\xbe\x5c\xa5\xcd\x8e\x65\x3f\x21\xb1\x1f\x94\xdb\x84\xe7\x58\x83\x8a\x67\x6d\x40\x78\x6b\x00\x37\x68\xa4\x19\x8a\x3f\x3b\x11\x7d\x5c\xb7\x9c\x0e\x4e\xcb\xb1\x3f\x95\x9a\xa7\x36\x98\x2c\xdd\x42\xc0\xb0\x5e\x1f\x20\x3f\x27\xfe\xd4\x9d\x11\x65\x3b\x27\x1a\x76\xae\xa4\x55\x5c\xc3\x8a\xb0\x58\x78\x35\x69\x11\xa3\x61\x76\x74\x55\xd3\x12\xee\x52\x62\xc9\x99\xef\x8c\x46\x14\x7a\x4c\x76\x9c\x7c\xa7\xa9\x16\x73\x64\x65\x25\x1b\x1c\x72\xec\x92\x21\x8a\x39\x13\x49\xe6\x45\xeb\xba\xc5\x39\x2b\x9e\x5e\x7b\x0a\x35\x6d\xdd\xc2\xd1\x29\x8f\x21\x40\x1e\x05\x7b\x59\x9d\xe8\xcd\x02\xc0\x4f\xc0\x36\x44\x17\xa1\xb3\x07\xfe\x50\x52\x5d\xc4\x39\x15\x32\xc7\x53\x26\x4c\xa3\xaa\x24\x02\x1e\x24\xc1\x10\x6c\x20\x51\xfa\xc9\xac\x66\x26\x04\x63\x7d\x95\x2b\x2d\xea\xcb\xd7\x81\x99\x20\x2d\xea\x85\xdd\xe5\xee\xc7\xca\xfa\x9a\x53\x67\x05\x6d\x1c\x76\x3d\x9a\x01\x0e\x76\x4f\x24\x5e\x2d\x30\xff\x31\x60\x76\x56\x7a\x05\x32\x74\x4d\x7e\x76\x22\x89\x6c\xa5\x85\x28\x67\x53\xa6\xa2\xae\xff\x47\x5b\xa3\x5c\x19\x0a\x36\xa9\x86\x92\xe7\xd1\x0f\xc1\xa7\xe8\x71\xb3\xbf\xc0\x75\x2a\xc1\x12\xae\x3a\x2c\x36\xf6\x90\x89\x62\xf8\x9f\x4a\x45\x84\x77\x1b\x71\x59\x76\xa1\xde\x5e\x82\xaa\xfc\x31\x45\xce\x69\x87\x7f\xfa\xc6\x51\x34\x9a\xda\xeb\x73\xc8\xbf\x5c\x8a\x22\xfd\x06\xc0\xd9\xff\xe4\xa0\x45\x43\x62\x12\x36\xc0\x3a\xa7\x96\xd3\x25\xf3\x33\xc8\x0f\xb6\xfc\x96\x8a\x35\xdb\x65\xf3\x8c\xc3\x5d\xee\xee\xaa\x27\x60\x85\x86\xc4\x9b\x03\xde\x9b\x13\x87\x51\x8c\x01\x47\xde\xc9\x10\x80\x77\x53\x89\xe6\x5d\xf5\xe1\xc9\x71\xf4\x89\x23\x18\xe3\x81\xc8\x93\x41\xe7\xec\x2f\x45\x28\x73\x33\x00\xc2\x89\xbe\xaf\xa4\xd1\x9c\x8e\x29\xc1\x15\x1b\x31\x2a\x6f\xcf\x5c\x6c\xe0\xc7\xa1\x86\xd8\xad\xfb\x38\x87\xce\x66\xff\x26\x31\x76\x40\x57\x5e\x7e\x6b\x6d\x93\x6e\xd5\x9b\x91\xdd\x1e\x2b\xdd\x61\xb7\x59\xe9\x6e\xa7\xb8\x78\xd2\x87\x02\x3a\xfd\x69\x93\x00\xc6\x08\xc9\x27\x0a\x3c\x75\x17\x81\x46\xa9\xe3\x7b\x39\xa1\x62\x15\xfb\x5c\xa9\xb4\x96\x29\x2d\x68\xab\x1e\xd4\xfc\x8a\x38\x27\xf2\xd0\xe4\x1a\xf3\x15\xae\x64\xe0\xb0\xd8\x4b\x7f\x67\xed\x75\x3c\x61\xc0\x8f\x04\xc1\x9f\x61\xb6\x7f\x9f\x35\xb1\xf5\xb9\x5c\xa3\x30\xc8\xd2\x24\x67\x00\x06\x17\xea\xfa\xaf\x48\x13\x72\x40\x94\x95\x49\x97\xa1\xd3\x62\x8c\x84\xce\x52\xe1\x86\xb3\x31\x97\x2d\xcb\xc5\x38\xda\x8e\xb6\xce\x38\x7a\x0f\x7f\x6d\x28\xac\x69\x5b\x61\xaf\xf8\x2c\x3c\x9d\x5c\x79\xf1\xe9\xcf\x05\xab\xa3\x87\xc7\xc5\x15\xaa\x6f\x6d\x30\x6f\x89\xa6\x90\xad\xeb\x3b\xca\xda\x85\xc8\x00\x70\x7a\x6d\x6e\x81\x56\x15\x11\x53\x76\x43\x41\x14\x98\x73\x6b\x80\x5e\xeb\x12\x8c\xcc\xcd\xcb\xfe\x7c\x1a\x41\x60\xb7\xd6\xee\x76\x53\x8a\x62\x90\x74\xdb\x64\x89\xb1\xc2\x20\x60\xe9\x8f\x29\x4e\x27\x6a\x0d\x97\x7a\x0b\x39\x37\xb1\x18\xd8\x5e\x25\xc6\x7a\x01\xd9\xb1\xcc\x38\xb9\x84\xa2\xe3\x52\x3e\xc4\x6a\xe1\xb4\x81\x26\x6e\xd3\x67\xf6\x6d\xad\xba\x9f\x56\x12\x98\x9f\x7b\xe2\x60\x82\xf8\x33\xcc\xef\xd7\x5c\x31\xe9\x1c\x9a\x8b\xa3\x7a\xec\x11\x20\xf8\x7d\xe9\x8d\x20\x30\x59\xeb\x17\xc5\x8c\x09\x7b\x2c\x12\xdf\x42\xb3\xd2\x0f\xfe\x6f\x0c\x29\xf5\xda\x1f\xe2\x5e\xc8\x10\x44\x97\x15\xa3\x0f\x3b\xdf\x5e\xdb\xcb\xf0\x46\x37\x2c\xb7\x92\x0a\x8f\x79\x5c\x92\x9f\xaf\x53\x4b\x8b\xaa\xf4\xc4\xb7\x96\x9a\x4f\x49\x1f\xda\x76\xdb\xdd\xfa\x31\x2a\x98\xe6\x54\xca\x22\x11\xb7\xce\x4b\x1e\xf4\x4c\xd6\x84\xa3\xfc\xc3\xe2\xec\x00\x92\x47\x41\xf0\x26\x55\xf3\x29\x24\x06\xf7\xd2\x84\xf2\xde\x16\x23\xf8\x5b\x47\xac\x1d\x91\xec\x32\x38\xa1\x75\x44\xd2\xf0\xfb\xb5\x19\xd4\x21\xdd\x38\x5a\x1c\x9c\x43\x9b\x7d\x27\x42\xfa\x17\x07\xa9\xaf\xfa\x6a\xc6\x91\xb9\xf7\xfc\x40\xdf\x82\x27\xd6\xa1\x2d\xb0\x12\x7a\xc9\x80\x14\x47\xd4\x31\x3d\xbb\xaa\x75\x70\x34\x2f\x63\x16\x46\xb1\xa7\x3d\x86\xa1\xe6\xad\x1b\x0a\xb3\xe9\x24\x2f\xaa\x0b\x31\xcc\x87\x76\x52\x64\xa9\xb5\x24\x69', 2)
|
nilq/baby-python
|
python
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import types
from recipe_engine.config import (
config_item_context, ConfigGroup, Single, Static)
from recipe_engine.config_types import Path
from . import api as syzygy_api
def BaseConfig(CHECKOUT_PATH, **dummy_kwargs):
return ConfigGroup(
CHECKOUT_PATH = Static(CHECKOUT_PATH),
official_build = Single(bool, empty_val=False, required=False),
unittests_gypi = Single(Path, required=False),
version_file = Single(Path, required=False),
)
config_ctx = config_item_context(BaseConfig)
@config_ctx(is_root=True)
def BASE(dummy_c):
pass
@config_ctx()
def syzygy(c):
c.official_build = False
c.unittests_gypi = c.CHECKOUT_PATH.join('syzygy', 'unittests.gypi')
c.version_file = c.CHECKOUT_PATH.join('syzygy', 'SYZYGY_VERSION')
@config_ctx(includes=['syzygy'])
def syzygy_x64(dummy_c):
pass
@config_ctx()
def syzygy_official(c):
c.official_build = True
c.unittests_gypi = c.CHECKOUT_PATH.join('syzygy', 'unittests.gypi')
c.version_file = c.CHECKOUT_PATH.join('syzygy', 'SYZYGY_VERSION')
@config_ctx()
def kasko_official(c):
c.official_build = True
c.unittests_gypi = c.CHECKOUT_PATH.join('syzygy', 'kasko', 'unittests.gypi')
c.version_file = c.CHECKOUT_PATH.join('syzygy', 'kasko', 'VERSION')
|
nilq/baby-python
|
python
|
"""Test entry point"""
import aiohttp
import pyoctoprintapi
import argparse
import asyncio
import logging
from types import MappingProxyType
LOGGER = logging.getLogger(__name__)
async def main(host, user, port, use_ssl):
"""Main function."""
LOGGER.info("Starting octoprint")
async with aiohttp.ClientSession(cookie_jar=aiohttp.CookieJar(unsafe=True)) as websession:
websession._default_headers = MappingProxyType({}) # type: ignore
client = pyoctoprintapi.OctoprintClient(host, websession, port, use_ssl, "/")
api_key = await client.request_app_key("testapp", user, 60)
client.set_api_key(api_key)
printer_info = await client.get_printer_info()
job_info = await client.get_job_info()
server_info = await client.get_server_info()
tracking_info = await client.get_tracking_info()
discovery_info = await client.get_discovery_info()
camera_info = await client.get_webcam_info()
await websession.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("host", type=str)
parser.add_argument("user", type=str)
parser.add_argument("-p", "--port", type=int, default=80)
parser.add_argument("-s", "--ssl", type=bool, default=False)
parser.add_argument("-d", "--debug", type=bool, default=False)
args = parser.parse_args()
LOG_LEVEL = logging.INFO
if args.debug:
LOG_LEVEL = logging.DEBUG
logging.basicConfig(format="%(message)s", level=LOG_LEVEL)
try:
asyncio.run(
main(args.host, args.user, args.port, args.ssl)
)
except KeyboardInterrupt:
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of CERN Search.
# Copyright (C) 2018-2021 CERN.
#
# Citadel Search is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Signal Receivers."""
from flask import current_app
from invenio_files_rest.models import ObjectVersion
from cern_search_rest_api.modules.cernsearch.api import CernSearchRecord
from cern_search_rest_api.modules.cernsearch.files import (
delete_all_record_files,
delete_file_instance,
delete_previous_record_file_if_exists,
delete_record_file,
persist_file_content,
record_from_object_version,
)
from cern_search_rest_api.modules.cernsearch.indexer import CernSearchRecordIndexer
from cern_search_rest_api.modules.cernsearch.tasks import process_file_async
def file_uploaded_listener(obj: ObjectVersion = None):
"""Process file function calls file processor async."""
current_app.logger.debug("File uploaded listener: %s", str(obj))
delete_previous_record_file_if_exists(obj)
process_file_async.delay(str(obj.bucket_id), obj.key)
def file_processed_listener(app, processor_id, file: ObjectVersion, data):
"""Finish file processing.
1. Persist extracted content
2. Index extracted content
3. Delete record file.
"""
current_app.logger.debug("File processed listener: %s with processor %s", str(file), processor_id)
file_content = __extract_content(data)
if current_app.debug:
for key in file_content:
if key == "content":
current_app.logger.debug("File processed listener: has content %s ", bool(file_content[key]))
else:
current_app.logger.debug("File processed listener: %s - %s ", key, file_content[key])
record = record_from_object_version(file)
persist_file_content(record, file_content, file.basename)
CernSearchRecordIndexer().index(record)
# delete real file from filesystem only after indexing successfully
delete_file_instance(file)
def file_deleted_listener(obj: ObjectVersion = None):
"""File deleted through api calls: cleanup files and reindex."""
current_app.logger.debug("File deleted listener: %s", str(obj))
record = record_from_object_version(obj)
delete_record_file(record, obj)
CernSearchRecordIndexer().index(record)
def record_deleted_listener(sender, record: CernSearchRecord, *args, **kwargs):
"""Record deleted through api calls: cleanup files."""
current_app.logger.debug("File deleted listener: %s", str(record))
delete_all_record_files(record)
def __extract_content(data: dict):
return data
|
nilq/baby-python
|
python
|
from pyrosetta import *
from roseasy.movers import constraint
def insert_alas(pose, position, length, insert_after=True, reset_fold_tree=True, fold_tree_root=1):
'''Insert a poly-ALA peptide before or after a given position.,
Set the fold tree to have a cutpoint before or after inserted residues.
Author: XingJie Pan
'''
assert(1 <= position <= pose.size())
# Set the fold tree with a single cutpoint
def sub_fold_tree_add_edges_no_jump(ft, root, start, stop):
'''Add edges to a sub-fold-tree that does not have
and jumps.'''
if start < root:
ft.add_edge(root, start, -1)
if stop > root:
ft.add_edge(root, stop, -1)
if reset_fold_tree:
cutpoint = position if insert_after else position - 1
ft = rosetta.core.kinematics.FoldTree()
if fold_tree_root <= cutpoint and cutpoint < pose.size():
sub_root = pose.size()
ft.add_edge(fold_tree_root, sub_root, 1)
sub_fold_tree_add_edges_no_jump(ft, sub_root, cutpoint + 1, pose.size())
sub_fold_tree_add_edges_no_jump(ft, fold_tree_root, 1, cutpoint)
elif fold_tree_root > cutpoint and cutpoint > 0:
sub_root = 1
ft.add_edge(fold_tree_root, sub_root, 1)
sub_fold_tree_add_edges_no_jump(ft, sub_root, 1, cutpoint)
sub_fold_tree_add_edges_no_jump(ft, fold_tree_root, cutpoint + 1, pose.size())
else:
sub_fold_tree_add_edges_no_jump(ft, fold_tree_root, 1, pose.size())
pose.fold_tree(ft)
# Append the residues
residue_type_set = pose.residue_type_set_for_pose()
new_rsd = rosetta.core.conformation.ResidueFactory.create_residue( residue_type_set.name_map("ALA") )
for i in range(length):
if insert_after:
pose.conformation().safely_append_polymer_residue_after_seqpos(new_rsd, position + i, True)
pose.set_omega(position + i, 180)
else:
pose.conformation().safely_prepend_polymer_residue_before_seqpos(new_rsd, position, True)
pose.set_omega(position, 180)
if insert_after:
rosetta.core.conformation.idealize_position(position + length, pose.conformation())
if position + length + 1 <= pose.size():
rosetta.core.conformation.idealize_position(position + length + 1, pose.conformation())
else:
if position - 1 > 0:
rosetta.core.conformation.idealize_position(position - 1, pose.conformation())
rosetta.core.conformation.idealize_position(position, pose.conformation())
def mutate_residues(pose, res_list, aa_list, protein_only=True):
'''Mutate a list of residues. The list of AAs could
either be 1 letter code or 3 letter code.
Author: XingJie Pan
'''
aa_name_map = {'A':'ALA', 'P':'PRO', 'V':'VAL', 'L':'LEU', 'I':'ILE', 'M':'MET',
'F':'PHE', 'Y':'TYR', 'W':'TRP', 'S':'SER', 'T':'THR', 'C':'CYS',
'K':'LYS', 'R':'ARG', 'H':'HIS', 'D':'ASP', 'E':'GLU', 'N':'ASN',
'Q':'GLN', 'G':'GLY'}
mutater = rosetta.protocols.simple_moves.MutateResidue()
for i in range(len(res_list)):
if protein_only and (not pose.residue(res_list[i]).is_protein()):
continue
name = aa_list[i] if len(aa_list[i]) == 3 else aa_name_map[aa_list[i]]
mutater.set_res_name(name)
mutater.set_target(res_list[i])
mutater.apply(pose)
def add_aas(pose, position, sequence, pdbnum=False, chain='A'):
if pdbnum:
position = pose.pdb_info().pdb2pose(chain, position)
insert_alas(pose, position, len(sequence))
close_helix_by_minimization(pose, position, position +
len(sequence) + 2, position + 1, position + len(sequence) +
1)
mutate_residues(pose, list(range(position + 1, position + 1 +
len(sequence))), list(sequence), True)
def close_helix_by_minimization(pose, movable_region_start, movable_region_end, helix_start, helix_end):
'''Close a gap inside a helix by minimization.
Return true if the gap could be closed.
'''
# Make a clone of poly ALA pose for minimization
#simple_pose_moves.mutate_pose_to_single_AA(pose, 'ALA')
rosetta.core.pose.correctly_add_cutpoint_variants(pose)
# Set hydrogen bond constraints for the linkers and helix
linker_residues = list(range(movable_region_start, helix_start + 1)) + list(range(helix_end, movable_region_end + 1))
linker_hbonds = find_bb_hbonds_involving_residues(pose, linker_residues)
pose.constraint_set().clear()
helix_hbs = [(i + 4, i) for i in range(helix_start, helix_end - 3)]
constraint.add_constraints_to_pose(pose, constraint.get_bb_hbond_constraint(linker_hbonds + helix_hbs))
# Set score function
sfxn = rosetta.core.scoring.get_score_function()
sfxn.set_weight(rosetta.core.scoring.base_pair_constraint, 1) #H-bond constraint
# Set movemap
mm = rosetta.core.kinematics.MoveMap()
for i in range(movable_region_start, movable_region_end + 1):
mm.set_bb(i, True)
# Set the minimization mover
min_opts = rosetta.core.optimization.MinimizerOptions( "lbfgs_armijo_nonmonotone", 0.01, True )
min_mover = rosetta.protocols.minimization_packing.MinMover()
min_mover.movemap(mm)
min_mover.min_options(min_opts)
# Close the chain
for chainbreak_weight in [0.5, 1, 5, 10]:
sfxn.set_weight(rosetta.core.scoring.chainbreak, chainbreak_weight)
min_mover.score_function(sfxn)
min_mover.apply(pose)
chainbreak_energy = pose.energies().total_energies()[rosetta.core.scoring.chainbreak]
if chainbreak_energy > 0.2:
return False
# Minimize without constraints
sfxn.set_weight(rosetta.core.scoring.base_pair_constraint, 0)
min_mover.score_function(sfxn)
min_mover.apply(pose)
return True
def find_bb_hbonds_involving_residues(pose, residues):
'''Find backbone hbonds involving a given set of residues.
An Hbond is defined as (donor_res, acceptor_res).
Ignore the terminal residues.
'''
hbset = rosetta.core.scoring.hbonds.HBondSet(pose, bb_only=True)
hbonds = []
for i in range(1, hbset.nhbonds() + 1):
acc = hbset.hbond(i).acc_res()
don = hbset.hbond(i).don_res()
# Ignore terminal residues
if acc in [1, pose.size()] or don in [1, pose.size()]:
continue
if acc in residues or don in residues:
hbonds.append((don, acc))
return hbonds
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
a, b, c, d = map(int, open(0).read().split())
print(abs(a-c) + abs(b-d) + 1)
|
nilq/baby-python
|
python
|
"""
Generate matched synthetic lesions dataset
Authors: Chris Foulon & Michel Thiebaut de Scotten
"""
import os
import argparse
import random
import numpy as np
import json
import csv
import nibabel as nib
import nilearn
from nilearn.masking import compute_multi_background_mask, intersect_masks
from nilearn.image import threshold_img
from sklearn.cluster import KMeans
# input: /data/Chris/lesionsFormated
def create_coverage_mask(image_path_list):
nii_list = []
for f in image_path_list:
if not os.path.isfile(f):
raise ValueError('{} is not an existing file'.format(f))
if not nii_list:
nii_list = [nib.load(f)]
else:
nii_list.append(nib.load(f))
return compute_multi_background_mask(nii_list, threshold=0, connected=False, n_jobs=-1)
def create_lesion_set(coverage_mask, roi_size, output_path=None):
mask_coord = np.where(coverage_mask.get_fdata())
mask_coord = [(mask_coord[0][i], mask_coord[1][i], mask_coord[2][i]) for i, _ in enumerate(mask_coord[0])]
k = int(np.floor(len(mask_coord) / roi_size))
if k == 0:
return None
print('Running KMeans with k = {}'.format(k))
kmeans = KMeans(k).fit(mask_coord)
kmeans_labels_img = kmeans.labels_
new_data = np.zeros(coverage_mask.shape, int)
for ind, c in enumerate(mask_coord):
# KMeans labels start at 0, to avoid the first cluster to be in the 0 background of the image we add 1
new_data[c] = kmeans_labels_img[ind] + 1
new_nii = nib.Nifti1Image(new_data, coverage_mask.affine)
if output_path is not None and output_path != '':
nib.save(new_nii, output_path)
return new_nii
def split_labels(labels_img, output_folder=None):
if not isinstance(labels_img, nib.Nifti1Image):
raise TypeError('labels_img must be an instance of nibabel.Nifti1Image')
data = labels_img.get_fdata()
affine = labels_img.affine
o_max = np.amax(data)
label_img_list = []
if output_folder is not None:
if not os.path.exists(output_folder):
os.mkdir(output_folder)
for i in np.arange(1, o_max + 1):
label = np.array(np.where(data == i))
mask = np.zeros(data.shape)
mask[label[0, ], label[1, ], label[2, ]] = i
nii_label = nib.Nifti1Image(mask, affine)
label_img_list.append(nii_label)
if output_folder is not None:
path = os.path.join(output_folder, 'label_{}.nii.gz'.format(str(i)))
nib.save(nii_label, path)
return label_img_list
def print_imgs_avg_size(list_img):
sizes = []
for img in list_img:
sizes.append(len(np.where(img.get_fdata())[0]))
print('Mean size of the images: {}'.format(np.mean(sizes)))
def main():
parser = argparse.ArgumentParser(description='Generate matched synthetic lesions dataset')
paths_group = parser.add_mutually_exclusive_group(required=True)
paths_group.add_argument('-p', '--input_path', type=str, help='Root folder of the lesion dataset')
paths_group.add_argument('-li-', '--input_list', type=str, help='Text file containing the list of lesion files')
paths_group.add_argument('-m', '--mask', type=str, help='region where the synthetic lesions will be generated')
parser.add_argument('-o', '--output', type=str, help='output folder')
parser.add_argument('-fwhm', '--smoothing_param', type=int, default='12',
help='fwhm parameter to nilearn smooth_img function')
parser.add_argument('-thr', '--smoothing_threshold', type=float, default=0.5,
help='Threshold applied on the smoothing')
# parser.add_argument('-v', '--verbose', default='info', choices=['none', 'info', 'debug'], nargs='?', const='info',
# type=str, help='print info or debugging messages [default is "info"] ')
args = parser.parse_args()
args.output = os.path.abspath(args.output)
if args.mask is not None:
args.mask = os.path.abspath(args.mask)
if not os.path.exists(args.mask):
raise ValueError('The mask {} does not exist'.format(args.mask))
coverage_mask = nib.load(args.mask)
else:
if args.input_path is not None:
les_list = [os.path.join(args.input_path, f) for f in os.listdir(args.input_path)]
else:
if not os.path.exists(args.input_list):
raise ValueError(args.input_list + ' does not exist.')
if args.input_list.endswith('.csv'):
with open(args.input_list, 'r') as csv_file:
les_list = []
for row in csv.reader(csv_file):
if len(row) > 1:
les_list += [r for r in row]
else:
les_list.append(row[0])
else:
# default delimiter is ' ', it might need to be changed
les_list = np.loadtxt(args.input_list, dtype=str, delimiter=' ')
les_list = [os.path.abspath(f) for f in les_list]
coverage_mask = create_coverage_mask(les_list)
nib.save(coverage_mask, os.path.join(args.output, 'coverage_mask.nii.gz'))
thr = args.smoothing_threshold
# match +-10% size random in the pool
# iterate on sizes from the list in master.sh
roi_size_list = ['300000', '200000', '120000', '110000', '100000', '90000', '80000', '70000', '60000', '50000',
'40000', '30000', '20000', '10000', '9000', '8000', '7000', '6000', '5000', '4000', '3000', '2000',
'1000', '900', '800', '700', '600', '500', '400', '300', '200', '100', '35000', '25000', '15000']
# just for testing
# roi_size_list = ['3000', '4000', '5000']
# roi_size_list = [6998, 4275, 2300, 11945, 96, 5322, 5604, 8229, 6334, 3765, 8225, 449, 10305, 1755, 753, 2378,
# 2834, 4726, 24041,10119, 8366, 24358, 5175, 8380, 2592, 3298, 3946, 11453, 7328, 3073, 5104,
# 1065, 2532, 4849, 5930, 27200, 304]
synth_lesion_size_dict = {}
for s in roi_size_list:
print('Running the KMeans with ROIsize = {}'.format(s))
labels_img = create_lesion_set(coverage_mask, int(s), os.path.join(args.output, 'labels_{}.nii.gz'.format(s)))
if labels_img is None:
print('cluster size too big compared to the mask')
continue
label_img_list = split_labels(labels_img)
smoothed_label_list = [nilearn.image.smooth_img(label_img, args.smoothing_param)
for label_img in label_img_list]
smoothed_thr_label_list = [threshold_img(nii, thr) for nii in smoothed_label_list]
smoothed_thr_binarized_label_list = [nilearn.image.math_img('img > {}'.format(thr), img=img)
for img in smoothed_thr_label_list]
smoothed_thr_binarized_masked_label_list = [intersect_masks([nii, coverage_mask], 1, True)
for nii in smoothed_thr_binarized_label_list]
print_imgs_avg_size(smoothed_thr_binarized_masked_label_list)
for lesion in smoothed_thr_binarized_masked_label_list:
lesion_size = len(np.where(lesion.get_fdata())[0])
if lesion_size not in synth_lesion_size_dict:
file_name = 'synth_les_{}.nii.gz'.format(lesion_size)
file_path = os.path.join(args.output, file_name)
synth_lesion_size_dict[lesion_size] = [file_path]
else:
file_name = 'synth_les_{}_{}.nii.gz'.format(lesion_size, len(synth_lesion_size_dict[lesion_size]))
file_path = os.path.join(args.output, file_name)
synth_lesion_size_dict[lesion_size].append(file_path)
nib.save(lesion, file_path)
with open(os.path.join(args.output, '__lesion_dict.json'), 'w+') as out_file:
json.dump(synth_lesion_size_dict, out_file, indent=4)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
"""
Created on Mar 18th 10:58:37 2016
train a continuous-time sequential model
@author: hongyuan
"""
import pickle
import time
import numpy
import theano
from theano import sandbox
import theano.tensor as tensor
import os
import sys
#import scipy.io
from collections import defaultdict
from theano.tensor.shared_randomstreams import RandomStreams
import modules.utils as utils
import modules.models as models
import modules.optimizers as optimizers
import modules.controllers as controllers
import modules.data_processers as data_processers
import run_models
import datetime
dtype=theano.config.floatX
#
import argparse
__author__ = 'Hongyuan Mei'
def main():
parser = argparse.ArgumentParser(
description='Trainning model ... '
)
#
parser.add_argument(
'-m', '--Model', required=True,
choices = ['hawkes', 'hawkesinhib', 'conttime'],
help='Which model to train? hawkes (SE-MPP)? hawkesinhib (D-SM-MPP)? conttime (N-SM-MPP)?'
)
parser.add_argument(
'-fd', '--FileData', required=True,
help='Path of the dataset (e.g. ./data/data_hawkes/)'
)
#
parser.add_argument(
'-tr', '--TrainRatio', #required=False,
default = 1.0, type = float,
help='How much data to train?'
)
#
parser.add_argument(
'-cl2', '--CoefL2', #required=False,
default = 0.0, type = float,
help='Coefficient of L2 norm'
)
#
parser.add_argument(
'-d', '--DimLSTM', #required=False,
default = 64, type = int,
help='Dimension of LSTM model '
)
parser.add_argument(
'-s', '--Seed', #required=False,
default = 12345, type = int,
help='Seed of random state'
)
#
parser.add_argument(
'-fp', '--FilePretrain', required=False,
help='File of pretrained model (e.g. ./tracks/track_PID=XX_TIME=YY/model.pkl)'
)
parser.add_argument(
'-tp', '--TrackPeriod', #required=False,
default = 1000, type = int,
help='Track period of training'
)
parser.add_argument(
'-me', '--MaxEpoch', #required=False,
default = 50, type = int,
help='Max epoch number of training'
)
parser.add_argument(
'-sb', '--SizeBatch', #required=False,
default = 10, type = int,
help='Size of mini-batch'
)
parser.add_argument(
'-op', '--Optimizer', #required=False,
default = 'adam', type = str,
choices = ['adam', 'sgd'],
help='Optimizer of training'
)
parser.add_argument(
'-mt', '--MultipleTrain', #required=False,
default = 1, type = int,
help='Multiple of events to sample (integral) for training'
)
parser.add_argument(
'-md', '--MultipleDev', #required=False,
default = 10, type = int,
help='Multiple of events to sample (integral) for dev'
)
parser.add_argument(
'-wt', '--WhatTrack', #required=False,
default = 'loss', type = str,
choices = ['loss', 'rmse', 'rate'],
help='What to track for early stoping ? '
)
parser.add_argument(
'-ls', '--LossType', #required=False,
default = 'loglikehood', type = str,
choices = ['loglikehood', 'prediction'],
help='What is the loss to optimized ?'
)
parser.add_argument(
'-lr', '--LearnRate', #required=False,
default = 1e-3, type = float,
help='What learning rate to use ?'
)
parser.add_argument(
'-pp', '--PartialPredict', #required=False,
default = 0, type = int,
choices = [0, 1],
help='What to only predict part of stream ? 0--False, 1--True'
)
parser.add_argument(
'-ps', '--PruneStream', #required=False,
default = 0, type = int,
help='Prune stream? Give me the index ! 0 is nothng to prune. Note : index specifies a COMBINATION of event types by its binary coding (e.g. 0--00000, 1--00001, 31-11111 where 1 means this type is pruned)!'
)
parser.add_argument(
'-ds', '--DevIncludedSetting',#required=False,
default = 0, type = int,
choices = [0,1],
help='Alternative setting (fix tuned hyper-params, train on combo of train and dev, then test)? 0--False, 1--True Note: in our project, this is ONLY used to compare prev work on MIMIC, SO and Financial datasets'
)
parser.add_argument(
'-pf', '--PredictFirst', #required=False,
default = 1, type = int,
choices = [0,1],
help='Predict the first event ? 0--False, 1--True Note: in our project, this is False ONLY on MIMIC, SO and Financial datasets'
)
parser.add_argument(
'-pl', '--PredictLambda', #required=False,
default = 0, type = int,
choices = [0,1],
help='Predict Lambda (intensity) ? 0--False, 1--True Note: this is used ONLY in intensity evaluation'
)
'''
They train model on entire training and eval on test after training, i.e., no dev/validation set
We only use this setting when compared with them on their dataset
Otherwise, we use dev/validation set to tune params and early stop, and only eval on test after the model is fixed.
'''
#
#
args = parser.parse_args()
#
#
args.TrainRatio = numpy.float32(args.TrainRatio)
assert(args.TrainRatio > 0.0 and args.TrainRatio <= 1.0)
#
args.CoefL2 = numpy.float32(args.CoefL2)
assert(args.CoefL2 >= 0.0)
args.DimLSTM = numpy.int32(args.DimLSTM)
args.Seed = numpy.int32(args.Seed)
args.TrackPeriod = numpy.int32(args.TrackPeriod)
args.MaxEpoch = numpy.int32(args.MaxEpoch)
args.SizeBatch = numpy.int32(args.SizeBatch)
args.MultipleTrain = numpy.int32(args.MultipleTrain)
args.MultipleDev = numpy.int32(args.MultipleDev)
#
if args.LossType == 'prediction':
assert(args.WhatTrack == 'rmse' or args.WhatTrack == 'rate')
else:
assert(args.WhatTrack == 'loss')
#
args.LearnRate = numpy.float32(args.LearnRate)
assert(args.LearnRate > 0.0)
#
if args.PartialPredict == 0:
args.PartialPredict = False
else:
args.PartialPredict = True
#
args.PruneStream = numpy.int32(args.PruneStream)
#
if args.DevIncludedSetting == 0:
args.DevIncludedSetting = False
else:
args.DevIncludedSetting = True
#
if args.PredictFirst == 0:
args.PredictFirst = False
else:
args.PredictFirst = True
#
if args.PredictLambda == 0:
args.PredictLambda = False
else:
args.PredictLambda = True
#
#
id_process = os.getpid()
time_current = datetime.datetime.now().isoformat()
#
flag_1 = (
args.Model == 'hawkes' or args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'
)
flag_2 = (
args.Model == 'nanmodel'
)
flag_3 = (
args.Model == 'neuraladapttimescale' or args.Model == 'hawkesinhibscale' or args.Model == 'neuralreduce' or args.Model == 'conttime'
)
#
# conttime is the one with continuous time LSTM
#
assert(flag_1 or flag_2 or flag_3)
# we stop using neuralsimple
# +time means we encode time using neural networks
#
tag_model = '_PID='+str(id_process)+'_TIME='+time_current
#
#file_log = os.path.abspath(
# './logs/log' + tag_model + '.txt'
#)
#path_save = os.path.abspath(
# './models/models' + tag_model + '/'
#)
if 'meme' in args.FileData:
tag_track = '_meme'
elif 'retweet' in args.FileData:
tag_track = '_retweet'
elif 'mimic' in args.FileData:
tag_track = '_mimic'
elif '_so' in args.FileData:
tag_track = '_so'
elif '_bookorder' in args.FileData:
tag_track = '_bookorder'
elif '_missing' in args.FileData:
tag_track = '_missing'
else:
tag_track = ''
#
path_track = './tracks'+ tag_track +'/track' + tag_model + '/'
file_log = os.path.abspath(
path_track + 'log.txt'
)
#path_save = os.path.abspath(
# path_track + 'models/'
#)
path_save = path_track
#
command_mkdir = 'mkdir -p ' + os.path.abspath(
path_track
)
os.system(command_mkdir)
#
#
## show values ##
print ("PID is : %s" % str(id_process) )
print ("TIME is : %s" % time_current )
print ("Seed is : %s" % str(args.Seed) )
#
print ("Model is : %s" % args.Model )
print ("CoefL2 is : %s" % str(args.CoefL2) )
print ("FileData is : %s" % args.FileData )
print ("TrainRatio is : %s" % str(args.TrainRatio) )
if 'neural' in args.Model or 'nanmodel' in args.Model:
print ("DimLSTM is : %s" % str(args.DimLSTM) )
print ("FilePretrain is : %s" % args.FilePretrain)
print ("TrackPeriod is : %s" % str(args.TrackPeriod) )
print ("MaxEpoch is : %s" % str(args.MaxEpoch) )
print ("SizeBatch is : %s" % str(args.SizeBatch) )
print ("Optimizer is : %s" % args.Optimizer)
print ("LossType is : %s" % args.LossType)
print ("WhatTrack is : %s" % args.WhatTrack)
print ("LearnRate is : %s" % args.LearnRate)
print ("PartialPredict is : %s" % args.PartialPredict)
print ("PruneStream is : %s" % str(args.PruneStream) )
print ("Dev Included Setting is: %s" % args.DevIncludedSetting )
print ("PredictFirst is: %s" % args.PredictFirst )
print ("PredictLambda is: %s" % args.PredictLambda )
#
flag_show_1 = (
args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuralsimple' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'
)
flag_show_2 = (
args.Model == 'hawkesinhibscale' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime'
)
#
if (flag_show_1 and flag_show_2):
print ("Multiple for training is : %s" % args.MultipleTrain)
print ("Multiple for dev is : %s" % args.MultipleDev)
#
dict_args = {
'PID': id_process,
'TIME': time_current,
'Seed': args.Seed,
#
'Model': args.Model,
'CoefL2': args.CoefL2,
'FileData': args.FileData,
'TrainRatio': args.TrainRatio,
'DimLSTM': args.DimLSTM,
'FilePretrain': args.FilePretrain,
'TrackPeriod': args.TrackPeriod,
'MaxEpoch': args.MaxEpoch,
'SizeBatch': args.SizeBatch,
'Optimizer': args.Optimizer,
'MultipleTrain': args.MultipleTrain,
'MultipleDev': args.MultipleDev,
'LossType': args.LossType,
'WhatTrack': args.WhatTrack,
'LearnRate': args.LearnRate,
'PartialPredict': args.PartialPredict,
'PruneStream': args.PruneStream,
'DevIncludedSetting': args.DevIncludedSetting,
'PredictLambda': args.PredictLambda
}
#
input_train = {
'model': args.Model,
'seed_random': args.Seed,
'path_rawdata': args.FileData,
'ratio_train': args.TrainRatio,
'path_pre_train': args.FilePretrain,
'track_period': args.TrackPeriod,
'max_epoch': args.MaxEpoch,
'size_batch': args.SizeBatch,
'dim_model': args.DimLSTM,
'optimizer': args.Optimizer,
'save_file_path': path_save,
'log_file': file_log,
'args': dict_args,
'coef_l2': args.CoefL2,
'what_to_track': args.WhatTrack,
'loss_type': args.LossType,
'learn_rate': args.LearnRate,
'partial_predict': args.PartialPredict,
'prune_stream': args.PruneStream,
'di_setting': args.DevIncludedSetting,
'predict_lambda': args.PredictLambda
}
#
if '_so' in args.FileData or '_mimic' in args.FileData or '_bookorder' in args.FileData:
input_train['predict_first'] = False
else:
if args.PredictFirst:
input_train['predict_first'] = True
else:
input_train['predict_first'] = False
#
#
flag_multiple_1 = (
args.Model == 'hawkesinhib' or args.Model == 'neural' or args.Model == 'neuralgeneral' or args.Model == 'neuraladapt' or args.Model == 'neuralsimple' or args.Model == 'neuraltime' or args.Model == 'neuralgeneraltime' or args.Model == 'neuraladapttime'
)
flag_multiple_2 = (
args.Model == 'hawkesinhibscale' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime'
)
#
if (flag_multiple_1 or flag_multiple_2):
input_train['multiple_sample_for_train'] = numpy.int32(
args.MultipleTrain
)
input_train['multiple_sample_for_dev'] = numpy.int32(
args.MultipleDev
)
#
if args.Model == 'hawkes':
run_models.train_hawkes_ctsm(input_train)
elif args.Model == 'hawkesinhib' or args.Model == 'hawkesinhibscale':
run_models.train_hawkesinhib_ctsm(input_train)
elif args.Model == 'neural':
run_models.train_neural_hawkes_ctsm(input_train)
elif args.Model == 'neuralgeneral':
run_models.train_generalized_neural_hawkes_ctsm(
input_train, tag_neural_type = 'general'
)
elif args.Model == 'neuraladapt':
run_models.train_generalized_neural_hawkes_ctsm(
input_train, tag_neural_type = 'adaptive'
)
elif args.Model == 'neuralsimple':
run_models.train_generalized_neural_hawkes_ctsm(
input_train, tag_neural_type = 'simple'
)
elif args.Model == 'neuraltime':
run_models.train_neural_hawkes_ctsm_time(
input_train
)
elif args.Model == 'neuralgeneraltime':
run_models.train_generalized_neural_hawkes_ctsm_time(
input_train, tag_neural_type = 'general'
)
elif args.Model == 'neuraladapttime' or args.Model == 'neuraladapttimescale' or args.Model == 'neuralreduce' or args.Model == 'conttime':
if args.DevIncludedSetting:
run_models.train_generalized_neural_hawkes_ctsm_time_DevIncludedSetting(
input_train, tag_neural_type = 'adaptive'
)
else:
run_models.train_generalized_neural_hawkes_ctsm_time(
input_train, tag_neural_type = 'adaptive'
)
else:
print("Model not implemented yet !!! ")
#
if __name__ == "__main__": main()
|
nilq/baby-python
|
python
|
from os import getenv
from rockset import Client, Q, F
rs = Client(api_key=getenv('ROCKSET_SECRET'), api_server='api.rs2.usw2.rockset.com')
def after_req(response):
cnt = rs.sql(
Q('NewsArchivesHits').where(F['_id']=='News').select('count')
)[0]['count']
rs.Collection.retrieve('NewsArchivesHits').add_docs(
[
{
'_id': 'News',
'count': cnt + 1
}
]
)
return(response)
|
nilq/baby-python
|
python
|
from datetime import datetime
from pydantic import BaseModel
from pydantic import Field
class TodoCreate(BaseModel):
title: str = Field(..., min_length=4, max_length=50, example="My first task")
class Todo(TodoCreate):
id: int = Field(...)
is_done: bool = Field(default=False)
created_at: datetime = Field(default=datetime.now())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
'''
Copyright 2018, VDMS
Licensed under the terms of the BSD 2-clause license. See LICENSE file for terms.
/sapi/modify endpoint. Designed to initiate a sapi API user.
```swagger-yaml
/custdashboard/modify/{dash_id}/ :
get:
description: |
Modifies a custom audit by either adding, removing or setting an audit_id
or list of audit_ids
responses:
200:
description: OK
tags:
- dashboard
parameters:
- name: dash_id
in: path
description: |
Dashboard ID of the dashboard you wish to modify
schema:
type: string
required: true
- name: modifyorder
in: query
description: |
A dict that tells the system what it should do. Contains one or two keys,
"add" with an audit_id or list of audit_id's to be added and/or "remove"
with an audit_id or list of audit_ids to be removed. This is a parsed
by ast.literal_eval.
schema:
type: string
required: true
```
'''
from flask import current_app, Blueprint, g, request, jsonify, send_from_directory
import json
import ast
import time
import os
import hashlib
import re
import requests
custdashboard_modify = Blueprint('api2_custdashboard_modify', __name__)
@custdashboard_modify.route("/custdashboard/modify", methods=['GET', 'POST'])
@custdashboard_modify.route("/custdashboard/modify/", methods=['GET', 'POST'])
@custdashboard_modify.route("/custdashboard/modify/<int:dash_id>", methods=['GET', 'POST'])
@custdashboard_modify.route("/custdashboard/modify/<int:dash_id>/", methods=['GET', 'POST'])
def api2_custdashboard_create(dash_id=None, modifyorder=None):
meta_dict = dict()
request_data = dict()
links_dict = dict()
error_dict = dict()
do_query = True
argument_error = False
api_error = False
where_clauses = list()
do_remove = False
do_add = False
remove_ids = list()
add_ids = list()
username = g.USERNAME
# Grab Audits and CustomDashboards From API to help validate.
audit_list_endpoint = g.HTTPENDPOINT + "/v2/auditlist/"
custdash_list_endpoint = g.HTTPENDPOINT + "/v2/custdashboard/list/"
valid_custdash_ids = list()
valid_audit_ids = list()
try:
audit_list_content = requests.get(audit_list_endpoint).content
custdash_list_content = requests.get(custdash_list_endpoint).content
except Exception as e:
error_dict["Error Getting Endpoint"] = "Error getting endpoint: " + \
str(e)
api_error = True
else:
try:
audit_list_content_string = audit_list_content.decode("utf-8")
custdash_list_content_string = custdash_list_content.decode(
"utf-8")
audit_list_content_object = json.loads(audit_list_content_string)
custdash_list_content_object = json.loads(
custdash_list_content_string)
except Exception as e:
api_error = True
error_dict["api_read_error"] = "Trouble reading data from endpoints. " + \
str(e)
else:
# Let's generate lists validation lists
valid_audit_ids = [id["attributes"]["audit_id"]
for id in audit_list_content_object["data"]]
valid_custdash_ids = [id["attributes"]["custdashboardid"]
for id in custdash_list_content_object["data"]]
if "dash_id" in request.args:
try:
dash_id = ast.literal_eval(request.args["dash_id"])
except Exception as e:
argument_error = True
error_dict["dash_id_parse_fail"] = "Failed to Parse Dash_id"
if type(dash_id) is int and dash_id in valid_custdash_ids and api_error == False:
# Valid dashboard id
pass
else:
argument_error = True
error_dict["dash_id_incorrect"] = "Either not a valid dash_id or not an integer"
if "modifyorder" not in request.args:
argument_error = True
error_dict["arg_error"] = "Need an order to modify with."
else:
try:
modifyorder = ast.literal_eval(request.args["modifyorder"])
except Exception as e:
argument_error = True
error_dict["modify_order_parse_fail"] = "Unabel to Parse Modify Order, it \
ast.literal_eval parsable?"
else:
if type(modifyorder) is not dict:
argument_error = True
error_dict["modify_order_bad_type"] = "Modify Order not parsed as \
dict"
else:
# Now testkeys
if "add" in modifyorder.keys() or "remove" in modifyorder.keys():
# Have at least one "proper" order
if "add" in modifyorder.keys():
# Do add stuff
if type(modifyorder["add"]) is list:
possible_id_list = [id for id in modifyorder["add"] if type(
id) is int and id > 0 and id in valid_audit_ids]
if len(possible_id_list) > 0:
# There are IDs
do_add = True
add_ids.extend(possible_id_list)
if type(modifyorder["add"]) is int:
if modifyorder["add"] > 0 and modifyorder["add"] in valid_audit_ids:
do_add = True
add_ids.extend(modifyorder["add"])
if "remove" in modifyorder.keys():
if type(modifyorder["remove"]) is list:
possible_id_list = [id for id in modifyorder["remove"] if type(
id) is int and id > 0 and id in valid_audit_ids]
if len(possible_id_list) > 0:
# There are IDs
do_remove = True
remove_ids.extend(possible_id_list)
elif type(modifyorder["remove"]) is int:
if modifyorder["remove"] > 0 and modifyorder["remove"] in valid_audit_ids:
do_remove = True
remove_ids.add(modifyorder["remove"])
if do_remove == False and do_add == False:
# None Came out right
argument_error = True
error_dict["incorrect_modify_order"] = "No modifies were accepted."
else:
# Order keys not given
argument_error = True
error_dict["order_dictionary_incorrect"] = True
meta_dict["version"] = 2
meta_dict["name"] = "Jellyfish API Version 2 Custdashboard Create "
meta_dict["status"] = "In Progress"
meta_dict["NOW"] = g.NOW
links_dict["parent"] = g.config_items["v2api"]["preroot"] + \
g.config_items["v2api"]["root"] + "/sapi"
requesttype = "custdashboard_modify"
remove_query = "delete from custdashboardmembers where fk_custdashboardid = %s and fk_audits_id = %s "
add_query = "replace into custdashboardmembers ( fk_custdashboardid, fk_audits_id ) VALUES ( %s , %s ) "
thathappened = dict()
if do_query and argument_error == False and api_error == False:
dash_modified = False
if do_add == True:
# Add all the items
thathappened["added"] = list()
for add_id in add_ids:
# I wan to Add this Id
this_arg_list = [dash_id, add_id]
g.cur.execute(add_query, this_arg_list)
id_added = g.cur.lastrowid
thathappened["added"].append(id_added)
dash_modified = True
if do_remove == True:
thathappened["removed"] = remove_ids
for remove_id in remove_ids:
# I want to Remove these IDs
this_arg_list = [dash_id, remove_id]
g.cur.execute(remove_query, this_arg_list)
dash_modified = True
request_data["dash_id"] = dash_id
else:
dash_modified = False
if dash_modified == True:
response_dict = dict()
response_dict["meta"] = meta_dict
response_dict["data"] = thathappened
response_dict["links"] = links_dict
return jsonify(**response_dict)
else:
response_dict = dict()
response_dict["meta"] = meta_dict
response_dict["errors"] = error_dict
response_dict["links"] = links_dict
return jsonify(**response_dict)
|
nilq/baby-python
|
python
|
import click
import os
from click.exceptions import ClickException
from .dashboard import read_har_json, plot_har
@click.command()
@click.argument('path', type=click.Path(exists=True))
def plot(path):
"""
Plot HTTP Archive format Timings
:param path: Path containing HAR specs in json files
"""
data = []
har_files = [file for file in os.listdir(path) if file.endswith('.json')]
if not har_files:
raise ClickException('No Json file to process in given path')
click.echo('***** Processing har files *****')
for har_file in har_files:
data.append(read_har_json(os.path.join(path, har_file), har_file))
plot_har(data)
if __name__ == '__main__':
plot()
|
nilq/baby-python
|
python
|
import FWCore.ParameterSet.Config as cms
DQMStore = cms.Service("DQMStore",
enableMultiThread = cms.untracked.bool(True),
saveByLumi = cms.untracked.bool(False),
trackME = cms.untracked.string(''),
verbose = cms.untracked.int32(0)
)
|
nilq/baby-python
|
python
|
import sys
import math
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
class QFT:
"""
Class which generates the circuit to perform the Quantum Fourier
Transform (or its inverse) as described in Mike & Ike Chapter 5.
(Michael A Nielsen and Isaac L Chuang. Quantum computation and quantum
information (10th anniv. version), 2010.)
For another example see Figure 1 of Daniel E Browne 2007 New J. Phys. 9 146
A QFT or iQFT circuit can be generated with a given instance of the
QFT class by calling the gen_circuit() method.
Attributes
----------
width : int
number of qubits
inverse : bool
Set to true to generate the inverse quantum fourier transform
kvals : bool
optional parameter that will change the angle of the controlled
rotations so that when the circuit is printed it will display
the same k values that are shown in Mike & Ike Chpt 5, Fig 5.1
(NOTE: the generated circuit will no longer be valid! This is
for visualization purposes only.)
barriers : bool
should barriers be included in the generated circuit
measure : bool
should a classical register & measurement be added to the circuit
regname : str
optional string to name the quantum and classical registers. This
allows for the easy concatenation of multiple QuantumCircuits.
qr : QuantumRegister
Qiskit QuantumRegister holding all of the quantum bits
cr : ClassicalRegister
Qiskit ClassicalRegister holding all of the classical bits
circ : QuantumCircuit
Qiskit QuantumCircuit that represents the uccsd circuit
"""
def __init__(self, width, approximation_degree, inverse=False, kvals=False, barriers=True,
measure=False, regname=None):
# number of qubits
self.nq = width
self.approximation_degree = approximation_degree
# set flags for circuit generation
self.inverse = inverse
self.kvals = kvals
self.barriers = barriers
self.measure = measure
# create a QuantumCircuit object
if regname is None:
self.qr = QuantumRegister(self.nq)
self.cr = ClassicalRegister(self.nq)
else:
self.qr = QuantumRegister(self.nq, name=regname)
self.cr = ClassicalRegister(self.nq, name='c'+regname)
# Have the option to include measurement if desired
if self.measure:
self.circ = QuantumCircuit(self.qr,self.cr)
else:
self.circ = QuantumCircuit(self.qr)
def inv_qft(self):
"""
Implement the inverse QFT on self.circ
j ranges from nq-1 -> 0
k ranges from nq-1 -> j+1
For each j qubit, a controlled cu1 gate is applied with target=j,
control=k (for each k).
cu1 = 1 0
0 e^(-2pi*i / 2^(k-j+1))
"""
for j in range(self.nq-1,-1,-1):
for k in range(self.nq-1,j,-1):
if self.kvals:
self.circ.cu1(-1*(k-j+1), self.qr[k], self.qr[j])
else:
self.circ.cu1(-1 * (2*np.pi) / (2**(k-j+1)),
self.qr[k],
self.qr[j])
self.circ.h(self.qr[j])
if self.barriers:
self.circ.barrier()
def reg_qft(self):
"""
Implement the QFT on self.circ
j ranges from 0 -> nq-1
k ranges from j+1 -> nq-1
For each j qubit, a controlled cu1 gate is applied with target=j,
control=k (for each k).
cu1 = 1 0
0 e^(2pi*i / 2^(k-j+1))
"""
for j in range(self.nq):
self.circ.h(self.qr[j])
for k in range(j+1,self.nq):
if self.kvals:
self.circ.cu1(k-j+1, self.qr[k], self.qr[j])
else:
if k-j+1<=self.approximation_degree:
self.circ.cu1((2*np.pi)/(2**(k-j+1)),self.qr[k],self.qr[j])
if self.barriers:
self.circ.barrier()
def gen_circuit(self):
"""
Create a circuit implementing the UCCSD ansatz
Given the number of qubits and parameters, construct the
ansatz as given in Whitfield et al.
Returns
-------
QuantumCircuit
QuantumCircuit object of size nq with no ClassicalRegister and
no measurements
"""
if self.inverse:
self.inv_qft()
else:
self.reg_qft()
if self.measure:
self.circ.barrier()
self.circ.measure(self.qr,self.cr)
return self.circ
|
nilq/baby-python
|
python
|
from breezycreate2 import _Create2
import time
# A simple melody that plays every time the bot is connected to.
MELODY = [('C4',11,0.3),
('C4',11,0.3),
('C4',11,0.3),
('C4',32,0.7),
('G4',32,0.7),
('F4',11,0.3),
('E4',11,0.3),
('D4',11,0.3),
('C5',64,1.2),
('G4',40,0.7),
('F4',11,0.3),
('E4',11,0.3),
('D4',11,0.3),
('C5',64,1.2),
('G4',40,0.7),
('F4',11,0.3),
('E4',11,0.3),
('F4',11,0.3),
('D4',64,2) ]
class SillyRobot:
def __init__(self):
self.robot = _Create2('COM3', 115200) # Connect to the bot through the serial connection
self.robot.full() # Put the robot in full mode
self.robot.digit_led_ascii('R4D4') # Displays the string on the robot's display
for triple in MELODY: # Play a simple melody
self.robot.play_note(triple[0], triple[1])
time.sleep(triple[2])
def close(self):
"""Closes connection to robot"""
self.robot.digit_led_ascii(" ") # Clears the led display
self.robot.destroy()
def move(self, speed, sleep):
"""Sends move command to robot to move forward or backward
Speed: -500 to 500
sleep: How long the robot should move"""
self.robot.drive(speed, 0) # Moves the robot forward at the specified speed
time.sleep(sleep) # Sleep while the robot moves
self.robot.drive(0, 0) # Stops the robot
def turn(self, speed, direction, sleep):
"""Sends move command to robot to run
speed: 0 to 500
dir: -1(CW) to 1(CCW)
sleep: How long the robot should turn
"""
if speed < 0: # If the speed input is below 0
speed = abs(speed)
self.robot.drive(speed, direction) # Have the robot turn a certain direction at a certain speed
time.sleep(sleep) # Sleep while the robot turns
self.robot.drive(0, 0) # Stop the robot
def enable_motors(self, main_speed, side_speed, vacuum_speed):
"""Turns the motors on in the rear of the robot
main_speed: Main Brush, -127 to 127, Positive spins inward
side_speed: Side Brush, -127 to 127, Positive speeds spin counterclockwise
vacuum_speed: Vacuum, 0 to 127, No Negative speeds allowed
"""
if vacuum_speed < 0:
vacuum_speed = abs(vacuum_speed)
self.robot.motors_pwm(main_speed, side_speed, vacuum_speed)
def disable_motors(self):
"""Turns the motors off in the rear of the robot"""
self.robot.motors_pwm(0, 0, 0)
def set_led(self, display_string):
"""Sets the robots led display
Must be 4 characters long
Space is represented by ' '"""
self.robot.digit_led_ascii(display_string)
def test_move(bot):
bot.move(100, 2) # Move the bot forward
bot.turn(100, -1, 1) # Turn the bot clockwise
bot.move(-100, 2) # Move the bot backwards
def test_motors(bot):
bot.enable_motors(127, 127, 127) # Turn the motors on
time.sleep(2) # Sleep for 2 seconds
bot.disable_motors() # Turn motors off
def main():
robot = SillyRobot() # Create a new robot
selection = 'n' # Sentinel variable
while selection != 'q':
print("Select an option\n" # Display a small menu
"1: Test Movement\n"
"2: Test Motors\n"
"Q: Quit")
selection = input() # Get input
print(selection)
if selection == '1': # Move the robot
test_move(robot)
elif selection == '2': # Enable the motors
test_motors(robot)
elif selection == 'q' or selection == 'Q': # Quit
selection = selection.lower()
print("Thanks for trying out the robot!")
else: # Invalid input
print("Invalid input. Please try again.")
robot.close() # Close the connection
main()
|
nilq/baby-python
|
python
|
"""
This network is build on top of SNGAN network implementation from: https://github.com/MingtaoGuo/sngan_projection_TensorFlow.git
"""
from explainer.ops import *
from tensorflow.contrib.layers import flatten
import pdb
def get_embedding_size():
return [64, 64, 4]
class Generator_Encoder_Decoder:
def __init__(self, name='GAN'):
self.name = name
def __call__(self, inputs, y, nums_class, num_channel=3):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, 64, 64, 3]
# Encoder
print("Encoder-Decoder")
print(inputs)
inputs = relu(conditional_batchnorm(inputs, "GBN1"))
embedding = conv("Gconv1", inputs, k_size=3, nums_out=4, strides=1) # [n, 64, 64, 4]
print(':', embedding)
inputs = relu(conditional_batchnorm(embedding, "GBN"))
inputs = conv("Gconv", inputs, k_size=3, nums_out=num_channel, strides=1) # [n, 64, 64, 3]
print(':', inputs)
return tf.nn.tanh(inputs), embedding
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
class Discriminator_Ordinal:
def __init__(self, name):
self.name = name
def __call__(self, inputs, y, nums_class, update_collection=None):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, 64, 64, 3]
print(inputs)
inputs = relu(conditional_batchnorm(inputs, "DBN1"))
inputs = conv("Dconv1", inputs, k_size=3, nums_out=4, strides=1) # [n, 64, 64, 4]
print(inputs)
inputs = global_sum_pooling(inputs) # [n, 4]
inputs = dense("Ddense", inputs, 1, update_collection, is_sn=False) # [n, 1]
return inputs
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
# class Generator_Encoder_Decoder:
# def __init__(self, name='GAN'):
# self.name = name
#
# def __call__(self, inputs, y, nums_class, num_channel=3):
# with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# # input: [n, 64, 64, 3]
# # Encoder
# print("Encoder-Decoder")
# print(inputs)
# inputs = relu(conditional_batchnorm(inputs, "BN1"))
# embedding = conv("conv1", inputs, k_size=3, nums_out=64, strides=1) # [n, 64, 64, 64]
#
# print(':', embedding)
#
# inputs = relu(conditional_batchnorm(embedding, "BN"))
# inputs = conv("conv", inputs, k_size=3, nums_out=num_channel, strides=1) # [n, 64, 64, 3]
# print(':', inputs)
# return tf.nn.tanh(inputs), embedding
#
# def var_list(self):
# return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
# class Discriminator_Ordinal:
# def __init__(self, name):
# self.name = name
#
# def __call__(self, inputs, y, nums_class, update_collection=None):
# with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# # input: [n, 64, 64, 3]
# print(inputs)
# inputs = D_FirstResblock("ResBlock1", inputs, 64, update_collection, is_down=True) # [n, 32, 32, 64]
# print(inputs)
# inputs = relu(inputs)
# print(inputs) # [n, 4, 4, 512]
# inputs = global_sum_pooling(inputs) # [n, 1024]
# for i in range(0, nums_class - 1):
# if i == 0:
# temp = Inner_product(inputs, y[:, i + 1], 2, update_collection) # [n, 1024]
# else:
# temp = temp + Inner_product(inputs, y[:, i + 1], 2, update_collection) # [n, 1024]
# inputs = dense("dense", inputs, 1, update_collection, is_sn=True) # [n, 1]
# inputs = temp + inputs
# return inputs
#
# def var_list(self):
# return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
class Discriminator_Contrastive:
# Compares two images and determines which "knob" has been shifted
def __init__(self, name='disentangler'):
self.name = name
def __call__(self, inputs, num_dims):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, 64, 64, 6]
print(inputs)
inputs = D_FirstResblock("ResBlock1", inputs, 64, None, is_down=True) # [n, 32, 32, 64]
print(inputs)
inputs = D_Resblock("ResBlock2", inputs, 128, None, is_down=True) # [n, 16, 16, 128]
print(inputs)
inputs = relu(inputs)
print(inputs) # [n, 16, 16, 128]
inputs = global_sum_pooling(inputs) # [n, 128]
print(inputs)
inputs = dense("dense", inputs, num_dims, None, is_sn=True) # [n, num_dims]
return inputs
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
# CSVAE modules
# One simple implementation with swiss roll data: https://github.com/kareenaaahuang/am207_final_project
# CSVAE architecture: Trying to replicate the architecture used in https://arxiv.org/abs/1812.06190
# "Our architectures consist of convolutional layers with ReLu activations which roughly follow that found in https://arxiv.org/abs/1512.09300."
# Here is the information found in Table 1 , in "Autoencoding beyond pixels using a learned similarity metric" https://arxiv.org/abs/1512.09300
# Encoder
# 5×5 64 conv. ↓, BNorm, ReLU
# 5×5 128 conv. ↓, BNorm, ReLU
# 5×5 256 conv. ↓, BNorm, ReLU
# 2048 fully-connected, BNorm, ReLU
# Dec
# 8·8·256 fully-connected, BNorm, ReLU
# 5×5 256 conv. ↑, BNorm, ReLU
# 5×5 128 conv. ↑, BNorm, ReLU
# 5×5 32 conv. ↑, BNorm, ReLU
# 5×5 3 conv., tanh
# Discriminator [This is not applicable to our implementation, because we are not using a GAN]
# 5×5 32 conv., ReLU
# 5×5 128 conv. ↓, BNorm, ReLU
# 5×5 256 conv. ↓, BNorm, ReLU
# 5×5 256 conv. ↓, BNorm, ReLU
# 512 fully-connected, BNorm, ReLU
# 1 fully-connected, sigmoid
# Architectures for the three networks that comprise VAE/GAN.
# ↓ and ↑ represent down- and upsampling respectively.
# BNorm denotes batch normalization (Ioffe & Szegedy, 2015).
# When batch normalization is applied to convolutional layers, per-channel normalization is used.
# implementation found here https://github.com/andersbll/autoencoding_beyond_pixels
class EncoderZ:
"""
This class transforms the images into a vector in the latent space, Z.
Example:
Input dimension: [n, 64, 64, 3] images
Output dimension: num_dims (z_dim in the latent space)
"""
def __init__(self, name='encoder_z'):
self.name = name
def __call__(self, inputs, num_dims):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, 64, 64, 3]
print(self.name)
print(inputs)
inputs = Encoder_Block("Encoder-ConvBlock3", inputs, 64) # [n, 32, 32, 64]
print(':', inputs)
inputs = Encoder_Block("Encoder-ConvBlock2", inputs, 128) # [n, 16, 16, 128]
print(':', inputs)
inputs = Encoder_Block("Encoder-ConvBlock1", inputs, 256) # [n, 8, 8, 256]
print(':', inputs)
inputs = global_sum_pooling(inputs) # [n, 256]
print(':', inputs)
inputs = dense("dense1", inputs, 2048) # [n, 2048]
inputs = relu(inputs)
print(':', inputs)
inputs = dense("dense", inputs, 2 * num_dims) # [n, 2*num_dims] 2 refers to mu and logvar
inputs = relu(inputs)
print(':', inputs)
mu = inputs[:, 0:num_dims]
logvar = inputs[:, num_dims:]
samples = tf.random_normal(shape=tf.shape(mu), mean=mu, stddev=tf.exp(0.5 * logvar))
return mu, logvar, samples
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
class EncoderW:
"""
This class transforms the images and labels into a vector in the latent space, W.
Example:
Input dimension: [n, 64, 64, 3] images , [n, 1] labels
Output dimension: num_dims (w_dim in the latent space)
"""
def __init__(self, name='encoder_w'):
self.name = name
def __call__(self, inputs, labels, num_dims):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# inputs: [n, 64, 64, 3], labels: [n, 1]
print(self.name)
print(inputs)
inputs = Encoder_Block("Encoder-ConvBlock3", inputs, 64) # [n, 32, 32, 64]
print(':', inputs)
inputs = Encoder_Block("Encoder-ConvBlock2", inputs, 128) # [n, 16, 16, 128]
print(':', inputs)
inputs = Encoder_Block("Encoder-ConvBlock1", inputs, 256) # [n, 8, 8, 256]
print(':', inputs)
inputs = global_sum_pooling(inputs) # [n, 256]
print(':', inputs)
inputs = tf.concat([inputs, tf.cast(tf.expand_dims(labels, -1), dtype=tf.float32)], axis=-1) # [n, 257]
inputs = dense('dense2', inputs, 128) # [n, 128]
inputs = relu(inputs)
print(':', inputs)
inputs = dense('dense1', inputs, 64) # [n, 64]
inputs = relu(inputs)
print(':', inputs)
inputs = dense("dense", inputs, 2 * num_dims) # [n, 2*num_dims] 2 refers to mu and logvar
inputs = relu(inputs)
print(':', inputs)
mu = inputs[:, 0:num_dims]
logvar = inputs[:, num_dims:]
samples = tf.random_normal(shape=tf.shape(mu), mean=mu, stddev=tf.exp(0.5 * logvar))
return mu, logvar, samples
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
class DecoderX:
"""
This class transforms an embedding into reconstructed images.
Example:
Input dimension: z_dim (latent dims from Z) + w_dim (latent dims from W)
Output dimension: [n, 64, 64, 3] original image data
"""
def __init__(self, name='decoder_x'):
self.name = name
def __call__(self, inputs):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, z_dim+w_dim]
print(self.name)
inputs = relu(inputs)
inputs = dense('dense1', inputs, 8*8*256)
inputs = tf.reshape(inputs, [-1, 8, 8, 256])
inputs = Decoder_Block("Decoder-ConvBlock1", inputs, 256) # [n, 16, 16, 256]
print(':', inputs)
inputs = Decoder_Block("Decoder-ConvBlock2", inputs, 128) # [n, 32, 32, 128]
print(':', inputs)
inputs = Decoder_Block("Decoder-ConvBlock3", inputs, 32) # [n, 64, 64, 32]
print(':', inputs)
inputs = conv("conv4", inputs, 3, 5, 1) # [n, 64, 64, 3]
inputs = tanh(inputs)
print(':', inputs)
return inputs
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
class DecoderY:
"""
This class transforms an embedding into reconstructed labels.
Example:
Input dimension: z_dim (latent dims from Z)
Output dimension: [n, nums_class] labels
"""
def __init__(self, name='decoder_y'):
self.name = name
def __call__(self, inputs, nums_class):
with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):
# input: [n, z_dim]
print(self.name)
inputs = relu(inputs)
inputs = dense('dense1', inputs, 8*8*256)
inputs = tf.reshape(inputs, [-1, 8, 8, 256])
inputs = Decoder_Block("Decoder-ConvBlock1", inputs, 256) # [n, 16, 16, 256]
print(':', inputs)
inputs = Decoder_Block("Decoder-ConvBlock2", inputs, 128) # [n, 32, 32, 128]
print(':', inputs)
inputs = Decoder_Block("Decoder-ConvBlock3", inputs, 32) # [n, 64, 64, 32]
print(':', inputs)
inputs = global_sum_pooling(inputs) # [n, 32]
print(':', inputs)
inputs = dense("dense2", inputs, nums_class) # [n, nums_class]
inputs = softmax(inputs)
print(':', inputs)
return inputs
def var_list(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from Speak import JLing_Speak
import sys
if __name__ == '__main__':
print('''
********************************************************
* JLing - 中文语音对话机器人 *
* (c) 2019 周定坤 <zhoudk@ccitrobot.com> *
********************************************************
如需退出,可以按 Ctrl-c 组合键。
''')
try:
mybot = JLing_Speak()
mybot.Speak()
except:
exit()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-17 16:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0020_image_name'),
]
operations = [
migrations.AlterUniqueTogether(
name='eventlink',
unique_together=set([('name', 'event', 'language', 'link')]),
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import sys
# build_path is mandatory, build_all is optional.
if len(sys.argv) < 2:
print "usage: %s [build_path> [build_all]" % sys.argv[0]
sys.exit(1)
# Build all is by default False.
build_all = False
if len(sys.argv) == 3 and sys.argv[2] == 'build_all':
build_all = True
build_path = sys.argv[1]
cmd = 'git clone https://github.com/daiwei89/hotbox_third_party %s' \
% build_path
print(cmd)
os.system(cmd)
if build_all:
cmd = 'cd %s; make -j third_party_core' % (build_path)
else:
cmd = 'cd %s; make -j third_party_special' % (build_path)
print(cmd)
os.system(cmd)
|
nilq/baby-python
|
python
|
from django.shortcuts import get_object_or_404
from rest_framework import generics
from mangacache.models import Chapter, Manga, Author
from mangacache.serializers import AuthorSerializer, MangaSerializer, ChapterSerializer
class AuthorList(generics.ListCreateAPIView):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
class AuthorDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
lookup_field = 'name'
class MangaList(generics.ListCreateAPIView):
queryset = Manga.objects.all()
serializer_class = MangaSerializer
# permission_classes = (permissions.IsAuthenticatedOrReadOnly)
class MangaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Manga.objects.all()
serializer_class = MangaSerializer
lookup_field = 'name'
class ChapterList(generics.ListCreateAPIView):
queryset = Chapter.objects.all()
serializer_class = ChapterSerializer
def perform_create(self, serializer):
serializer.save()
class ChapterDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Chapter.objects.all()
serializer_class = ChapterSerializer
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
filter_kwargs = {
'name': self.kwargs['name'],
'number': self.kwargs['number']
}
obj = get_object_or_404(queryset, **filter_kwargs)
self.check_object_permissions(self.request, obj)
return obj
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Small utility script to simplify generating bindings"""
import argparse
import os
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(SCRIPT_DIR))))
MOJO_SDK = os.path.join(SRC_DIR, 'mojo', 'public')
DART_SDK = os.path.join(SRC_DIR, 'third_party', 'dart-sdk', 'dart-sdk', 'bin')
DART = os.path.join(DART_SDK, 'dart')
PUB = os.path.join(DART_SDK, 'pub')
PACKAGES_DIR = os.path.join(SRC_DIR, 'mojo', 'dart', 'packages')
MOJOM_PACKAGE_DIR = os.path.join(PACKAGES_DIR, 'mojom')
MOJOM_BIN = os.path.join(MOJOM_PACKAGE_DIR, 'bin', 'mojom.dart')
def run(cwd, args):
print 'RUNNING:', ' '.join(args), 'IN:', cwd
subprocess.check_call(args, cwd=cwd)
def main():
parser = argparse.ArgumentParser(
description='Generate source-tree Dart bindings')
parser.add_argument('-f', '--force',
default = False,
help='Always generate all bindings.',
action='store_true')
parser.add_argument('-v', '--verbose',
default = False,
help='Verbose output.',
action='store_true')
args = parser.parse_args()
extra_args = []
if args.force:
extra_args += ['-f']
if args.verbose:
extra_args += ['-v']
run(MOJOM_PACKAGE_DIR, [PUB, 'get'])
run(SRC_DIR, [DART,
MOJOM_BIN,
'gen',
'-m',
MOJO_SDK,
'-r',
SRC_DIR,
'--output',
PACKAGES_DIR] + extra_args)
return 0
if __name__ == '__main__':
sys.exit(main())
|
nilq/baby-python
|
python
|
import argparse
import logging
from http.server import HTTPServer, SimpleHTTPRequestHandler
from socketserver import TCPServer
class LoggingHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args):
logging.info(format % args)
def webserve():
parser = argparse.ArgumentParser(
description='Serve files from the current directory',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='provide verbose output on progress')
parser.add_argument('-p', '--port', type=int, default=2020,
help='port number to use')
parser.add_argument('--logpath', type=str, metavar='PATH',
help='Path where logging output should be written')
args = parser.parse_args()
# Configure logging.
logging.basicConfig(filename=args.logpath, level=logging.INFO if args.verbose else logging.WARNING,
format='%(asctime)s %(levelname)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S')
with TCPServer(('', args.port), LoggingHandler) as httpd:
logging.info('serving at port {0}'.format(args.port))
httpd.serve_forever()
|
nilq/baby-python
|
python
|
#Based on paper Predicting Protein-Protein Interactions by Combing Various Sequence- Derived Features into the General Form of Chou’s Pseudo Amino Acid Composition by Zhao, Ma, and Yin
import os
import sys
#add parent and grandparent to path
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
parentdir = os.path.dirname(parentdir)
sys.path.append(parentdir)
import time
import numpy as np
from ProteinFeaturesHolder import ProteinFeaturesHolder
from GenericModule import GenericModule
from joblib import dump, load
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from GenericSVM import GenericSVM
class Zhao2012SVM(GenericModule):
def __init__(self, hyperParams = None):
GenericModule.__init__(self,hyperParams)
self.PCA = PCA(n_components=67)
self.scaler = StandardScaler()
self.modelType = None
self.featDict = self.hyperParams.get('featDict',{'all':['NMBroto_Zhao_30.tsv', 'Moran_Zhao_30.tsv', 'Geary_Zhao_30.tsv','PSEAAC_Zhao_30.tsv','Grantham_Sequence_Order_30.tsv','Schneider_Sequence_Order_30.tsv','Grantham_Quasi_30.tsv','Schneider_Quasi_30.tsv']})
def genModel(self):
self.model = GenericSVM(self.hyperParams)
def loadFeatureData(self,featureFolder):
super().loadFeatureData(featureFolder)
self.featuresData['all'].data = self.scaler.fit_transform(self.featuresData['all'].data)
self.featuresData['all'].data = self.PCA.fit_transform(self.featuresData['all'].data)
|
nilq/baby-python
|
python
|
load("//tools/base/bazel:bazel.bzl", "iml_module")
load("//tools/base/bazel:kotlin.bzl", "kotlin_test")
load("//tools/base/bazel:maven.bzl", "maven_java_library", "maven_pom")
load("//tools/base/bazel:utils.bzl", "fileset", "flat_archive")
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//tools/base/bazel/sdk:sdk_utils.bzl", "calculate_jar_name_for_sdk_package", "tool_start_script")
platforms = ["win", "linux", "mac"]
def _generate_classpath_jar_impl(ctx):
runtime_jars = depset(transitive = [java_lib[JavaInfo].transitive_runtime_jars for java_lib in [ctx.attr.java_binary]])
jars = [calculate_jar_name_for_sdk_package(jar.short_path) for jar in runtime_jars.to_list()]
mffile = ctx.actions.declare_file(ctx.attr.java_binary.label.name + "-manifest")
ctx.actions.write(output = mffile, content = "Class-Path: \n " + " \n ".join(jars) + " \n")
arguments = ["c", ctx.outputs.classpath_jar.path, "META-INF/MANIFEST.MF=" + mffile.path]
outputs = [ctx.outputs.classpath_jar]
ctx.actions.run(
inputs = [mffile],
outputs = outputs,
arguments = arguments,
executable = ctx.executable._zipper,
)
generate_classpath_jar = rule(
implementation = _generate_classpath_jar_impl,
attrs = {
"java_binary": attr.label(allow_single_file = True, mandatory = True),
"_zipper": attr.label(default = Label("@bazel_tools//tools/zip:zipper"), cfg = "host", executable = True),
"classpath_jar": attr.output(),
},
)
def sdk_java_binary(name, command_name = None, main_class = None, runtime_deps = [], default_jvm_opts = {}, visibility = None):
command_name = command_name if command_name else name
native.java_library(
name = command_name,
runtime_deps = runtime_deps,
visibility = visibility,
)
classpath_jar = command_name + "-classpath.jar"
generate_classpath_jar(java_binary = command_name, name = command_name + "-classpath", classpath_jar = classpath_jar, visibility = ["//visibility:public"])
for platform in platforms:
tool_start_script(
name = name + "_wrapper_" + platform,
platform = platform,
command_name = command_name,
default_jvm_opts = default_jvm_opts.get(platform) or "",
main_class_name = main_class,
classpath_jar = classpath_jar,
visibility = visibility,
)
def _license_aspect_impl(target, ctx):
files = []
attrs = ctx.rule.attr
files = []
if "require_license" in attrs.tags:
out = ctx.actions.declare_file(target.notice.name + ".NOTICE", sibling = target.notice.file.files.to_list()[0])
ctx.actions.run_shell(
outputs = [out],
inputs = target.notice.file.files.to_list(),
arguments = [target.notice.file.files.to_list()[0].path, out.path],
command = "cp $1 $2",
)
files = [out]
all_deps = (attrs.deps if hasattr(attrs, "deps") else []) + \
(attrs.runtime_deps if hasattr(attrs, "runtime_deps") else []) + \
(attrs.exports if hasattr(attrs, "exports") else [])
transitive_notices = []
for dep in all_deps:
transitive_notices = transitive_notices + [dep.notices]
return struct(notices = depset(files, transitive = transitive_notices))
license_aspect = aspect(
implementation = _license_aspect_impl,
attr_aspects = ["deps", "runtime_deps", "exports"],
)
def _combine_licenses_impl(ctx):
inputs = depset(transitive = [dep.notices for dep in ctx.attr.deps]).to_list()
ctx.actions.run(
inputs = inputs,
outputs = [ctx.outputs.out],
arguments = [ctx.outputs.out.path] + [f.path for f in inputs],
executable = ctx.executable._combine_notices,
)
combine_licenses = rule(
implementation = _combine_licenses_impl,
attrs = {
"deps": attr.label_list(aspects = [license_aspect]),
"out": attr.output(mandatory = True),
"_combine_notices": attr.label(executable = True, cfg = "host", default = Label("//tools/base/bazel/sdk:combine_notices")),
},
)
def _package_component_impl(ctx):
inputs = []
args = ["c", ctx.outputs.out.path]
for bin in ctx.attr.bins:
file = bin.files.to_list()[0]
args.append("tools/bin/%s=%s" % (file.basename, file.path))
inputs += [file]
runtime_jars = depset(transitive = [java_lib[JavaInfo].transitive_runtime_jars for java_lib in ctx.attr.java_libs])
runtime_jar_names = {}
for jar in runtime_jars.to_list() + [j.files.to_list()[0] for j in ctx.attr.other_libs]:
name = calculate_jar_name_for_sdk_package(jar.short_path)
existing = runtime_jar_names.get(name)
if existing:
fail("Multiple jars have same name for SDK component with the same name! name= " + name + " jars= " + existing.path + " " + jar.path)
runtime_jar_names[name] = jar
args.append("tools/lib/%s=%s" % (name, jar.path))
inputs += [jar]
for other_file, other_location in ctx.attr.others.items():
args.append(other_location + "=" + other_file.files.to_list()[0].path)
inputs += other_file.files.to_list()
ctx.actions.run(
inputs = inputs,
outputs = [ctx.outputs.out],
executable = ctx.executable._zipper,
arguments = args,
progress_message = "Creating archive...",
mnemonic = "archiver",
)
package_component = rule(
implementation = _package_component_impl,
attrs = {
"bins": attr.label_list(),
"classpaths": attr.label_list(),
"java_libs": attr.label_list(),
"other_libs": attr.label_list(allow_files = True),
"others": attr.label_keyed_string_dict(allow_files = True),
"_zipper": attr.label(
default = Label("@bazel_tools//tools/zip:zipper"),
cfg = "host",
executable = True,
),
},
outputs = {"out": "%{name}.zip"},
)
def sdk_package(name, binaries, sourceprops, visibility):
version_file = "//tools/buildSrc/base:version.properties"
native.genrule(
name = "generate_source_props",
srcs = [sourceprops, version_file],
outs = ["source.properties"],
cmd = """
version=$$(sed -n '/^cmdlineToolsVersion/s/.* //p' $(location {version_file}));
sed "s/{{VERSION}}/$$version/" $(location {sourceprops}) > $(location source.properties)
""".format(version_file = version_file, sourceprops = sourceprops),
)
combine_licenses(name = name + "_combined_licenses", out = "NOTICE.txt", deps = binaries)
for platform in platforms:
package_component(
name = "%s_%s" % (name, platform),
bins = [bin + "_wrapper_" + platform for bin in binaries],
java_libs = binaries,
other_libs = [bin + "-classpath.jar" for bin in binaries],
others = {
"source.properties": "tools/source.properties",
name + "_combined_licenses": "tools/NOTICE.txt",
"README.libs": "tools/lib/README",
},
visibility = visibility,
)
native.filegroup(
name = name,
srcs = ["%s_%s.zip" % (name, platform) for platform in platforms],
visibility = visibility,
)
|
nilq/baby-python
|
python
|
import sncosmo
from astropy.cosmology import w0waCDM
import numpy as np
from lsst.sims.catUtils.dust import EBV
import os
from scipy.interpolate import griddata
from sn_tools.sn_telescope import Telescope
from lsst.sims.photUtils import Bandpass, Sed
from astropy import units as u
import pandas as pd
from sn_tools.sn_io import check_get_dir,check_get_file
from sn_tools.sn_utils import LoadGamma
from sn_tools.sn_calcFast import srand
from astropy.table import Table, Column
"""
def SALT2Templates(SALT2Dir='SALT2.Guy10_UV2IR', blue_cutoff=3800.):
for vv in ['salt2_template_0', 'salt2_template_1']:
fName = '{}/{}_orig.dat'.format(SALT2Dir, vv)
data = np.loadtxt(fName, dtype={'names': ('phase', 'wavelength', 'flux'),
'formats': ('f8', 'i4', 'f8')})
print(data)
data['flux'][data['wavelength'] <= blue_cutoff] = 0.0
print(data)
np.savetxt('{}/{}.dat'.format(SALT2Dir, vv),
data, fmt=['%1.2f', '%4d', '%.7e', ])
"""
class Cutoffs:
def __init__(self, x1=-2.0, color=0.2, daymax=0.0,
blue_cutoff=380., redcutoff=800.,
mjdCol='observationStartMJD', filterCol='filter',
exptimeCol='visitExposureTime', nexpCol='numExposures',
m5Col='fiveSigmaDepth',SALT2Dir='',
url_index='https://me.lsst.eu/gris/DESC_SN_pipeline'):
model = 'salt2-extended'
version = '1.0'
self.url_index = url_index
source = sncosmo.get_source(model, version=version)
if SALT2Dir != '':
check_get_dir(url_index,SALT2Dir,SALT2Dir)
self.SALT2Templates(SALT2Dir=SALT2Dir, blue_cutoff=10.*blue_cutoff)
source = sncosmo.SALT2Source(modeldir=SALT2Dir)
dustmap = sncosmo.OD94Dust()
lsstmwebv = EBV.EBVbase()
self.mjdCol = mjdCol
self.filterCol = filterCol
self.exptimeCol=exptimeCol
self.nexpCol =nexpCol
self.m5Col =m5Col
self.x1 = x1
self.color = color
self.daymax = daymax
# SN model
self.SN = sncosmo.Model(source=source,
effects=[dustmap, dustmap],
effect_names=['host', 'mw'],
effect_frames=['rest', 'obs'])
# SN parameters
self.SN.set(t0=daymax)
self.SN.set(c=color)
self.SN.set(x1=x1)
# SN normalisation
# cosmology
H0 = 72.0
Omega_m = 0.3
Omega_l = 0.70
w0 = -1.0
wa = 0.0
self.cosmo = self.cosmology(H0, Omega_m, Omega_l, w0, wa)
# x0 normalisation
self.x0_grid = self.x0(-19.0906)
self.x0_from_grid = griddata((self.x0_grid['x1'], self.x0_grid['color']),
self.x0_grid['x0_norm'], (x1, color), method='nearest')
# wavelength for the model
wave_min = 3000.
wave_max = 11501.
self.wave = np.arange(wave_min, wave_max, 1.)
# telescope
self.telescope = Telescope(airmass=1.2)
lambda_min = dict(zip('grizy', [300., 670., 300., 300., 300.]))
# band registery in sncosmo
for band in 'grizy':
throughput = self.telescope.atmosphere[band]
print(band, lambda_min[band])
idx = throughput.wavelen <= lambda_min[band]
# throughput.sb[idx] = 0.
bandcosmo = sncosmo.Bandpass(
throughput.wavelen, throughput.sb, name='LSST::'+band, wave_unit=u.nm)
sncosmo.registry.register(bandcosmo, force=True)
#load gammas - necessary to estimate flux errors (photometry)
gammaDir = 'reference_files'
gammaName = 'gamma.hdf5'
gammas = LoadGamma('grizy', gammaDir, gammaName, url_index, self.telescope)
self.gamma = gammas.gamma
def SALT2Templates(self,SALT2Dir='SALT2.Guy10_UV2IR', blue_cutoff=3800.):
for vv in ['salt2_template_0', 'salt2_template_1']:
fName = '{}/{}_orig.dat'.format(SALT2Dir, vv)
data = np.loadtxt(fName, dtype={'names': ('phase', 'wavelength', 'flux'),
'formats': ('f8', 'i4', 'f8')})
print(data)
data['flux'][data['wavelength'] <= blue_cutoff] = 0.0
print(data)
np.savetxt('{}/{}.dat'.format(SALT2Dir, vv),
data, fmt=['%1.2f', '%4d', '%.7e', ])
def x0(self, absMag):
"""
Method to load x0 data
Parameters
---------------
config: dict
parameters to load and (potentially) regenerate x0s
Returns
-----------
"""
# check whether X0_norm file exist or not (and generate it if necessary)
x0normFile = 'reference_files/X0_norm_{}.npy'.format(absMag)
if not os.path.isfile(x0normFile):
# if this file does not exist, grab it from a web server
check_get_file(self.url_index, 'reference_files',
'X0_norm_{}.npy'.format(absMag))
return np.load(x0normFile)
def cosmology(self, H0, Omega_m, Omega_l, w0, wa):
cosmo = w0waCDM(H0=H0,
Om0=Omega_m,
Ode0=Omega_l,
w0=w0, wa=wa)
return cosmo
def set_x0(self, z):
# luminosity distance
lumidist = self.cosmo.luminosity_distance(z).value*1.e3
x0 = self.x0_from_grid / lumidist ** 2
alpha = 0.13
beta = 3.
x0 *= np.power(10., 0.4*(alpha * self.x1 - beta * self.color))
self.SN.set(x0=x0)
def __call__(self,obs,zrange=np.arange(0.01,1.2,0.01),selphase=False):
prodid='Test'
lc_out = 'LC_{}.hdf5'.format(prodid)
simu_out = 'Simu_{}.hdf5'.format(prodid)
metadata ={}
meta_names = ['x1','color','daymax','z','index_hdf5']
rmeta = []
for z in zrange:
lcdf = self.lc(obs, z,selphase=selphase)
table_lc = Table.from_pandas(lcdf)
index_hdf5 = '{}_{}_{}_{}'.format(self.x1,self.color,self.daymax,np.round(z,2))
meta_vals = [self.x1,self.color,self.daymax,np.round(z,2),index_hdf5,]
table_lc.meta = dict(zip(meta_names,meta_vals))
rmeta+=[table_lc.meta]
table_lc.write(lc_out, 'LC_{}'.format(index_hdf5), append=True, compression=True)
Table(rmeta,names=meta_names).write(
simu_out, 'summary', append=True, compression=True)
def update_meta(self,metadata,metadict):
print('hhhh',metadata,metadict.keys())
metc = dict(metadata)
if not metc:
metc = metadict
else:
#self.sn_meta[iproc]= self.sn_meta[iproc].update(metadict)
for key in metadict.keys():
print('kkey',key,metadict[key])
metc[key] += metadict[key]
print('alllo',metc)
return metc
def lc(self, obs, z,selphase=False):
# no dust
ebvofMW = 0.0
self.SN.set(mwebv=ebvofMW)
# z val
self.SN.set(z=z)
# x0 normalisation
self.set_x0(z)
# Select obs depending on min and max phases
# blue and red cutoffs applied
"""
obs = self.cutoff(obs, self.sn_parameters['daymax'],
self.sn_parameters['z'],
self.sn_parameters['min_rf_phase'],
self.sn_parameters['max_rf_phase'],
self.sn_parameters['blue_cutoff'],
self.sn_parameters['red_cutoff'])
"""
if selphase:
obs = self.selectObsPhase(obs, z)
# Get the fluxes (vs wavelength) for each obs
lcdf = pd.DataFrame(obs)
lcdf[self.filterCol] = 'LSST::'+lcdf[self.filterCol]
lcdf['x1'] = self.x1
lcdf['color'] = self.color
lcdf['daymax'] = self.daymax
lcdf = lcdf.round({'daymax': 2})
fluxes_cosmo = self.SN.bandflux(
lcdf[self.filterCol], lcdf[self.mjdCol], zpsys='ab', zp=2.5*np.log10(3631))
fluxcov_cosmo = self.SN.bandfluxcov(
lcdf[self.filterCol], lcdf[self.mjdCol], zpsys='ab', zp=2.5*np.log10(3631))
cov = np.sqrt(np.diag(fluxcov_cosmo[1]))
lcdf['flux'] = fluxes_cosmo
lcdf['mag'] = -2.5*np.log10(fluxes_cosmo/3631.)
lcdf['variance_model'] = np.diag(fluxcov_cosmo[1])
lcdf['z']=z
#estimate gammas
lcdf = lcdf.groupby([self.filterCol]).apply(
lambda x: self.interp_gamma_flux(x)).reset_index()
lcdf['snr_m5'] = 1./srand(lcdf['gamma'],lcdf['mag'],lcdf[self.m5Col])
# complete the LC
lcdf['magerr'] = (2.5/np.log(10.))/lcdf['snr_m5'] # mag error
lcdf['fluxerr'] = lcdf['flux']/lcdf['snr_m5'] # flux error
lcdf['zp'] = 2.5*np.log10(3631) # zp
lcdf['zpsys'] = 'ab' # zpsys
lcdf['phase'] = (lcdf[self.mjdCol]-self.daymax
)/(1.+z) # phase
# rename some of the columns
lcdf = lcdf.rename(
columns={self.mjdCol: 'time', self.filterCol: 'band', self.m5Col: 'm5', self.exptimeCol: 'exptime'})
lcdf = lcdf.round({'z': 2})
return lcdf
def interp_gamma_flux(self, grp, gammaName='gamma'):
"""
Method to estimate gamma and mag_to_flux values from interpolation
Parameters
---------------
grp: pandas group
data to process
Returns
----------
original group with two new cols:
gamma: gamma values
flux_e_sec: flux in pe.sec-1
"""
single_exptime = grp[self.exptimeCol]/grp[self.nexpCol]
# gamma interp
filterName = grp.name.split(':')[-1]
grp.loc[:, gammaName] = self.gamma[filterName](
(grp[self.m5Col].values, single_exptime, grp[self.nexpCol]))
return grp
def estimateFluxes(self, wavelength, fluxes, obs, throughput):
wavelength = np.repeat(wavelength[np.newaxis, :], len(fluxes), 0)
SED_time = Sed(wavelen=wavelength, flambda=fluxes)
fluxes = []
transes = []
nvals = range(len(SED_time.wavelen))
print('jjj', nvals)
# Arrays of SED, transmissions to estimate integrated fluxes
seds = [Sed(wavelen=SED_time.wavelen[i], flambda=SED_time.flambda[i])
for i in nvals]
transes = np.asarray([throughput[obs[self.filterCol][i]]
for i in nvals])
int_fluxes = np.asarray(
[seds[i].calcFlux(bandpass=transes[i]) for i in nvals])
print(int_fluxes, obs[self.filterCol])
def selectObsPhase(self, obs, z):
obs_sel = None
for b in 'grizy':
idx = obs[self.filterCol] == b
sel = obs[idx]
if len(sel) > 0:
phases = (sel[self.mjdCol]-self.daymax)/(1.+z)
idxa = np.argmin(np.abs(phases))
if obs_sel is None:
obs_sel = np.array(sel[idxa])
else:
obs_sel = np.hstack([obs_sel, np.array(sel[idxa])])
return obs_sel
def plot(self, obs, z):
import matplotlib.pyplot as plt
self.pltDef(plt)
# flux estimates
obs = self.selectObsPhase(obs, z)
obsdf = pd.DataFrame(obs)
obsdf[self.filterCol] = 'LSST::'+obsdf[self.filterCol]
throughput = self.telescope.atmosphere
# z val
self.SN.set(z=z)
# x0 normalisation
self.set_x0(z)
fluxes = 10.*self.SN.flux(obsdf[self.mjdCol], self.wave)
self.estimateFluxes(self.wave/10., fluxes, obs, throughput)
fig, ax = plt.subplots()
fig.suptitle('z = {}'.format(z))
for bb in 'grizy':
ax.plot(
10.*throughput[bb].wavelen, throughput[bb].sb)
axa = ax.twinx()
# axa.plot(self.wave, fluxes[0, :], color='k')
for fflux in fluxes:
idx = fflux > 10e-25
axa.plot(self.wave[idx], fflux[idx], color='k')
axa.fill_between(self.wave[idx], 0., fflux[idx], alpha=0.05)
ax.set_ylim([0., None])
axa.set_ylim([0., None])
ax.set_xlabel('wavelength [nm]')
ax.set_ylabel('sb (0-1)')
axa.set_ylabel('Flux [ergs / s / cm$^2$ / Angstrom]')
plt.show()
def pltDef(self, plt):
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['axes.labelsize'] = 12
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['font.size'] = 12
fake_data = 'Fake_DESC.npy'
if not os.path.isfile(fake_data):
# data do not exist -> have to generate them
fake_config = 'input/Fake_cadence/Fake_cadence.yaml'
cmd = 'python run_scripts/fakes/make_fake.py --config {} --output {}'.format(
fake_config, fake_data.split('.')[0])
os.system(cmd)
z = 0.77
"""
lambda_g_min = 6700.
web_path=' https://me.lsst.eu/gris/DESC_SN_pipeline'
check_get_dir(web_path,SALT2Dir,SALT2Dir)
blue_cutoff = lambda_g_min/(1.+z)
blue_cutoff = 3600.
# make the SALT2 model with this cutoff
SALT2Templates(SALT2Dir=SALT2Dir, blue_cutoff=blue_cutoff)
"""
blue_cutoff = 380.
SALT2Dir = 'SALT2.Guy10_UV2IR'
mysimu = Cutoffs(SALT2Dir=SALT2Dir,blue_cutoff=blue_cutoff)
obs = np.load('Fake_DESC.npy')
mysimu.plot(obs, z=0.85)
plt.show()
"""
fluxdf = pd.DataFrame()
for zval in np.arange(0.01,1.2,0.01):
fluxdf = pd.concat((fluxdf,mysimu(obs, zval,selphase=False)))
print(fluxdf)
"""
#save data as two astropy tables: Simu*, LC*
lctable = mysimu(obs,selphase=False)
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fluxdf = fluxdf[fluxdf['flux']>0.]
filtercolors = dict(zip('ugrizy', ['b', 'c', 'g', 'y', 'r', 'm']))
for b in 'grizy':
idx = fluxdf['band'] == 'LSST::{}'.format(b)
sel = fluxdf[idx]
ax.plot(sel['z'],np.sqrt(sel['variance_model'])/sel['flux'],marker='.',color=filtercolors[b],label='{} band'.format(b))
ax.plot(sel['z'],sel['fluxerr']/sel['flux'],marker='.',color=filtercolors[b],ls='--')
ax.legend()
"""
plt.show()
|
nilq/baby-python
|
python
|
from random import randint
from tools.population_creator import (ImpossibleToCompleteError, Individual,
create_individual)
def mutation(individual: Individual):
"""
:param individual: особь
:return: возвращает мутировавшую особь
Для мутации хромосомы сначала с помощью генератора случайных чисел
выбирается количество генов, которые не будут подвержены мутации.
Это значение лежит в интервале от 60% генов до N - 1 ген. То есть,
как минимум один ген мутирует всегда.
После выбираются сами гены, которые не будут подтвержены мутации.
На основании полученных в прошлом шаге номеров генов, формируется
шаблон хромосомы и функцией create_individual достраивается до
полной хромосомы.
"""
length: int = len(individual)
template: list = [None] * length
no_change_amount = range(randint(int(length * 0.5), int(length * 0.9)))
no_change_index = []
for _ in no_change_amount:
while True:
random_index = randint(0, length - 1)
if random_index not in no_change_index:
no_change_index.append(random_index)
break
no_change_index.sort()
for i in no_change_index:
template[i] = individual[i]
try:
mutant = create_individual(length + 1, template=template)
except ImpossibleToCompleteError:
mutant = None
return mutant
|
nilq/baby-python
|
python
|
import os
from django.conf import settings
from pdf2image import convert_from_path
class PdfRasterizer:
def __init__(self):
self._dpi = settings.PDF_RASTERIZER["dpi"]
self._fmt = settings.PDF_RASTERIZER["format"]
self._thread_count = settings.PDF_RASTERIZER["thread_count"]
def rasterize_pdf(self, subfolder_path):
# Typically, pdf2image will write generated images to a temporary path, after
# which you can manipulate them. By providing 'output_file' and 'output_folder',
# we can skip that second step and make pdf2image write directly to our desired
# output folder, using our desired file name pattern.
return convert_from_path(
os.path.join(subfolder_path, "source.pdf"),
dpi=self._dpi,
fmt=self._fmt,
thread_count=self._thread_count,
output_file="image-",
output_folder=subfolder_path,
)
|
nilq/baby-python
|
python
|
'''
This downloads the data about which locations Twitter provide top the top 10
trending item lists from and stores the data in the database
'''
from TrendAnalyser import TrendAnalyser
TA = TrendAnalyser()
print TA._update_woeid_data()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
tests - helper functions
~~~~~~~~~~~~~~~~~~~~~~~~
test cases for olaf helper function
:copyright: (c) 2015 by Vivek R.
:license: BSD, see LICENSE for more details.
"""
import os
import random
import string
import unittest
from click.testing import CliRunner
import olaf
class TestOlaf(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def tearDown(self):
pass
@staticmethod
def get_random_string():
return ''.join(random.choice(
string.ascii_lowercase + string.digits) for _ in range(10))
def test_is_valid_path(self):
random_string = self.get_random_string()
with self.runner.isolated_filesystem():
temp_path = os.path.join(os.getcwd(), random_string)
with self.assertRaises(OSError):
olaf.is_valid_path(temp_path)
os.mkdir(temp_path)
self.assertTrue(olaf.is_valid_path(temp_path))
def test_is_valid_site(self):
with self.runner.isolated_filesystem():
with self.assertRaises(OSError):
olaf.is_valid_site()
open(os.path.join(os.getcwd(), 'config.py'), 'a').close()
self.assertTrue(olaf.is_valid_site())
def test_get_themes_list(self):
with self.runner.isolated_filesystem():
current_path = os.path.join(os.getcwd())
# invalid theme path
self.assertEqual(olaf.get_themes_list(current_path), [])
# create random number of theme folders
random_no = random.randint(1, 20)
for num in range(random_no):
temp_folder = os.path.join(
os.path.join(current_path, self.get_random_string()))
os.mkdir(temp_folder)
open(os.path.join(temp_folder, 'temp.txt'), 'a').close()
# check for newly created themes above
self.assertEqual(
len(olaf.get_themes_list(current_path)), random_no)
def test_get_theme_by_name(self):
# valid theme
self.assertIsNotNone(olaf.get_theme_by_name('basic'))
# invalid theme
self.assertIsNone(olaf.get_theme_by_name(self.get_random_string()))
with self.runner.isolated_filesystem():
# create a random theme
random_theme_name = self.get_random_string()
current_path = os.path.join(os.getcwd())
theme_path = os.path.join(current_path, 'themes', random_theme_name)
os.makedirs(theme_path)
open(os.path.join(theme_path, 'temp.txt'), 'a').close()
# check with random theme created above
self.assertIsNotNone(olaf.get_theme_by_name(random_theme_name))
def test_create_project_site(self):
with self.runner.isolated_filesystem():
random_project_name = self.get_random_string()
self.assertTrue(olaf.create_project_site(random_project_name))
files_to_check = ['__init__.py', 'config.py', 'disqus.html',
olaf.contents_dir,
os.path.join(olaf.contents_dir, olaf.posts_dir),
os.path.join(olaf.contents_dir, olaf.pages_dir)]
for f in files_to_check:
path = os.path.join(os.getcwd(), random_project_name, f)
self.assertTrue(os.path.exists(path))
def test_get_default_theme_name(self):
random_theme_name = self.get_random_string()
self.assertEqual(
olaf.get_default_theme_name(random_theme_name), random_theme_name)
self.assertEqual(
olaf.get_default_theme_name(None), olaf.default_theme)
# with self.runner.isolated_filesystem():
# with open(os.path.join(os.getcwd(), 'config.py'), 'w+') as f:
# f.write('SITE={"theme": "' + random_theme_name + '"}')
# self.assertEqual(
# olaf.get_default_theme_name(None), random_theme_name)
|
nilq/baby-python
|
python
|
import random
from timeit import default_timer as timer
from clkhash.key_derivation import generate_key_lists
from clkhash.schema import get_schema_types
from clkhash.bloomfilter import calculate_bloom_filters
from clkhash.randomnames import NameList
from anonlink.entitymatch import *
from anonlink.util import popcount_vector, generate_clks, generate_bitarray
from anonlink.distributed_processing import calculate_filter_similarity
some_filters = generate_clks(10000)
def compute_popcount_speed(n):
"""
Just do as much counting of bits.
"""
clks = [generate_bitarray(1024) for _ in range(n)]
start = timer()
popcounts = popcount_vector(clks)
end = timer()
elapsed_time = end - start
print("{:6d} x 1024 bit popcounts in {:.6f} seconds".format(n, elapsed_time))
speed_in_MiB = n / (1024 * 8 * elapsed_time)
print("Popcount speed: {:.2f} MiB/s".format(speed_in_MiB))
return speed_in_MiB
def print_comparison_header():
print("Size 1 | Size 2 | Comparisons | Compute Time | Million Comparisons per second")
def compute_comparison_speed(n1=100, n2=100):
"""
Using the greedy solver, how fast can hashes be computed using one core.
"""
filters1 = [some_filters[random.randrange(0, 8000)] for _ in range(n1)]
filters2 = [some_filters[random.randrange(2000, 10000)] for _ in range(n2)]
start = timer()
result3 = calculate_mapping_greedy(filters1, filters2)
end = timer()
elapsed_time = end - start
print("{:6d} | {:6d} | {:12d} | {:8.3f}s | {:12.3f}".format(
n1, n2, n1*n2, elapsed_time, (n1*n2)/(1e6*elapsed_time)))
return elapsed_time
def compute_comparison_speed_parallel(n1=100, n2=100):
"""
Using the greedy solver in chunks, how fast can hashes be computed.
"""
filters1 = [some_filters[random.randrange(0, 8000)] for _ in range(n1)]
filters2 = [some_filters[random.randrange(2000, 10000)] for _ in range(n2)]
start = timer()
calculate_filter_similarity(filters1, filters2)
end = timer()
elapsed_time = end - start
print("{:6d} | {:6d} | {:12d} | {:8.3f}s | {:12.3f}".format(
n1, n2, n1*n2, elapsed_time, (n1*n2)/(1e6*elapsed_time)))
return elapsed_time
def compare_python_c(ntotal=10000, nsubset=6000, frac=0.8):
"""Compare results and running time of python and C++ versions.
:param ntotal: Total number of data points to generate
:param nsubset: Number of points for each database
:param frac: Fraction of overlap between subsets
:raises: AssertionError if the results differ
:return: dict with 'c' and 'python' keys with values of the total time taken
for each implementation
"""
nml = NameList(ntotal)
sl1, sl2 = nml.generate_subsets(nsubset, frac)
keys = generate_key_lists(('test1', 'test2'), len(nml.schema))
filters1 = calculate_bloom_filters(sl1, get_schema_types(nml.schema), keys)
filters2 = calculate_bloom_filters(sl2, get_schema_types(nml.schema), keys)
# Pure Python version
start = timer()
result = python_filter_similarity(filters1, filters2)
end = timer()
python_time = end - start
# C++ cffi version
start = timer()
result3 = cffi_filter_similarity_k(filters1, filters2, 1, 0.0)
end = timer()
cffi_time = end - start
assert result == result3, "Results are different between C++ cffi and Python"
# Results are the same
return {
"c": cffi_time,
"python": python_time
}
def benchmark(size, compare):
if compare:
print(compare_python_c(ntotal=1000, nsubset=600))
compute_popcount_speed(100000)
print_comparison_header()
possible_test_sizes = [
1000, 2000, 3000, 4000,
5000, 6000, 7000, 8000, 9000,
10000,
20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
1000000,
2000000
]
for test_size in possible_test_sizes:
if test_size <= size:
compute_comparison_speed_parallel(
test_size, test_size
)
print("Single Core:")
compute_comparison_speed(5000, 5000)
if __name__ == '__main__':
benchmark(20000, False)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from conf.config import *
import ssl
import socket
import os
class Tool:
def __init__(self):
self.description = "Get the SSL certificate information"
self.options = {
'domain': {
"value": "",
"required": True,
"description": "The target domain (e.g. google.com)"
},
'sslPort': {
"value": "443",
"required": True,
"description": "Specify SSL Port (e.g. 443)"
}
}
self.output = {
"status": "",
"data": "",
"save": True
}
def run(self):
self._result = {}
self._pair = {}
hostname = self.options["domain"]["value"]
sslp = int(self.options["sslPort"]["value"])
print ('\n' + Y + '[!]' + Y + ' SSL Certificate Information : ' + W + '\n')
try:
# check the ssl port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
s.connect((hostname, sslp))
s.close()
# get the ssl certificate
ctx = ssl.create_default_context()
s = socket.socket()
s.settimeout(10)
sslSock = ctx.wrap_socket(s, server_hostname=self.options["domain"]["value"])
try:
sslSock.connect((hostname, sslp))
info = sslSock.getpeercert()
except Exception as e:
info = ssl.get_server_certificate((hostname, sslp))
f = open('{}.pem'.format(hostname), 'w')
f.write(info)
f.close()
cert_dict = ssl._ssl._test_decode_cert('{}.pem'.format(hostname))
info = cert_dict
os.remove('{}.pem'.format(hostname))
# process the ssl info
for k, v in info.items():
if isinstance(v, tuple):
self.unpack(v)
for k, v in self._pair.items():
print(G + '[+]' + C + ' {} : '.format(str(k)) + W + str(v))
self._result.update({str(k): str(v)})
self._pair.clear()
else:
print(G + '[+]' + C + ' {} : '.format(str(k)) + W + str(v))
self._result.update({str(k): str(v)})
print("")
self.output['status'] = "success"
self.output['data'] = self._result
except Exception as e:
print (R + '[-]' + C + ' Error : ' + W + str(e) + '\n')
self.output['status'] = "fail"
self.output['data'] = str(e)
# unpack the tuple
def unpack(self, v):
convert = False
for item in v:
if isinstance(item, tuple):
for subitem in item:
if isinstance(subitem, tuple):
for elem in subitem:
if isinstance(elem, tuple):
self.unpack(elem)
else:
convert = True
if convert == True:
self._pair.update(dict([subitem]))
else:
print(G + '[+]' + C + ' {} : '.format(str(v)) + W + str(item))
self._result.update({str(v): str(item)})
|
nilq/baby-python
|
python
|
"""This module is a wrapper for the PuLP library, which is capable of
solving LP/MILP instances by using different kinds of solvers (like Gurobi or CBC).
The wrapper defines custom MILP and LP classes in order to simplify the instantiation of
problems from coefficient vectors and matrices."""
from .solverresult import SolverResult
from .milp import MILP, LP, GurobiMILP
|
nilq/baby-python
|
python
|
# Copyright 2022 Masatoshi Suzuki (@singletongue)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gzip
import json
from unicodedata import normalize
from bs4 import BeautifulSoup
from tqdm import tqdm
SECTIONS_TO_IGNORE = ["脚注", "出典", "参考文献", "関連項目", "外部リンク"]
TAGS_TO_REMOVE = ["table"]
TAGS_TO_EXTRACT = ["p"]
# TAGS_TO_EXTRACT = ["p", "li"]
INNER_TAGS_TO_REMOVE = ["sup"]
def normalize_text(text):
text = normalize("NFKC", text)
text = " ".join(text.split())
text = "".join(char for char in text if char.isprintable())
text = text.strip()
return text
def extract_paragraphs_from_html(html):
soup = BeautifulSoup(html, features="lxml")
section_title = "__LEAD__"
section = soup.find(["section"])
while section:
if section.h2 is not None:
section_title = section.h2.text
for tag in section.find_all(TAGS_TO_REMOVE):
tag.clear()
for tag in section.find_all(TAGS_TO_EXTRACT):
for inner_tag in tag.find_all(INNER_TAGS_TO_REMOVE):
inner_tag.clear()
paragraph_text = normalize_text(tag.text)
yield (section_title, paragraph_text)
section = section.find_next_sibling(["section"])
def main(args):
with gzip.open(args.page_htmls_file, "rt") as f, gzip.open(args.output_file, "wt") as fo:
for line in tqdm(f):
input_item = json.loads(line.rstrip("\n"))
page_id = input_item["pageid"]
rev_id = input_item["revid"]
title = input_item["title"]
html = input_item["html"]
paragraph_index = 0
for (section_title, paragraph_text) in extract_paragraphs_from_html(html):
if section_title in SECTIONS_TO_IGNORE:
continue
if len(paragraph_text) < args.min_paragraph_length:
continue
if len(paragraph_text) > args.max_paragraph_length:
continue
output_item = {
"id": "{}-{}-{}".format(page_id, rev_id, paragraph_index),
"pageid": page_id,
"revid": rev_id,
"paragraph_index": paragraph_index,
"title": title,
"section": section_title,
"text": paragraph_text,
}
print(json.dumps(output_item, ensure_ascii=False), file=fo)
paragraph_index += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--page_htmls_file", type=str, required=True)
parser.add_argument("--output_file", type=str, required=True)
parser.add_argument("--min_paragraph_length", type=int, default=10)
parser.add_argument("--max_paragraph_length", type=int, default=1000)
args = parser.parse_args()
main(args)
|
nilq/baby-python
|
python
|
#!/usr/bin/python2
import sys
import math
import socket
import random
import time
import errno
# put-get flag to service success
def service_up():
print("[service is worked] - 101")
exit(101)
# service is available (available tcp connect) but protocol wrong could not put/get flag
def service_corrupt():
print("[service is corrupt] - 102")
exit(102)
# waited time (for example: 5 sec) but service did not have time to reply
def service_mumble():
print("[service is mumble] - 103")
exit(103)
# service is not available (maybe blocked port or service is down)
def service_down():
print("[service is down] - 104")
exit(104)
if len(sys.argv) != 5:
print("\nUsage:\n\t" + sys.argv[0] + " <host> (put|check) <flag_id> <flag>\n")
print("Example:\n\t" + sys.argv[0] + " \"127.0.0.1\" put \"abcdifghr\" \"123e4567-e89b-12d3-a456-426655440000\" \n")
print("\n")
exit(0)
host = sys.argv[1]
port = 4441
command = sys.argv[2]
f_id = sys.argv[3]
flag = sys.argv[4]
# test shot service
# daf;ld'lfsdasd
# will be mumble (2) - for test jury
# while True: time.sleep(1);
def put_flag():
global host, port, f_id, flag
# try put
try:
# print("try connect " + host + ":" + str(port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((host, port))
result = s.recv(1024)
# print(result)
s.send("put" + "\n")
result = s.recv(1024)
s.send(f_id + "\n")
result = s.recv(1024)
s.send(flag + "\n")
result = s.recv(1024)
s.close()
except socket.timeout:
service_down()
except socket.error as serr:
if serr.errno == errno.ECONNREFUSED:
service_down()
else:
print(serr)
service_corrupt()
except Exception as e:
print(e)
service_corrupt()
def check_flag():
global host, port, f_id, flag
# try get
flag2 = ""
try:
# print("try connect " + host + ":" + str(port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((host, port))
result = s.recv(1024)
# print(result)
s.send("get\n")
result = s.recv(1024)
s.send(f_id + "\n")
result = s.recv(1024)
flag2 = result.strip()
flag2 = flag2.split("FOUND FLAG: ");
if len(flag2) == 2:
flag2 = flag2[1]
else:
flag2 = ''
s.close()
except socket.timeout:
service_down()
except socket.error as serr:
if serr.errno == errno.ECONNREFUSED:
service_down()
else:
print(serr)
service_corrupt()
except Exception as e:
print(e)
service_corrupt()
if flag != flag2:
service_corrupt()
if command == "put":
put_flag()
check_flag()
service_up()
if command == "check":
check_flag()
service_up()
|
nilq/baby-python
|
python
|
from django.urls import path
from . import views
app_name = 'sqds_officers'
urlpatterns = [
path('<str:api_id>/geotb/', views.GeoTBPlayerView.as_view(), name='geo_tb'),
path('<str:api_id>/sepfarm/', views.SepFarmProgressView.as_view(), name='sep_farm')
]
|
nilq/baby-python
|
python
|
import cv2
def undistort_image(img, mtx, dist):
'''
Undistorts image given a camera matrix and distortion coefficients
'''
undist_img = cv2.undistort(img, mtx, dist, None, mtx)
return undist_img
|
nilq/baby-python
|
python
|
lista_inteiros = [
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[9, 1, 8, 9, 9, 7, 2, 1, 6, 8],
[1, 3, 2, 2, 8, 6, 5, 9,6, 7],
[3, 8, 2, 8, 6, 7, 7, 3, 1, 9],
[4, 8, 8, 8, 5, 1, 10, 3, 1, 7],
[1, 3, 7, 2, 2, 1, 5, 1, 9, 9],
[10, 2, 2, 1, 3, 5, 1, 9, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
]
def encontra_duplicado(parametro):
numeros_checados = set()
primeiro_duplicado = -1
for numero in parametro:
if numero in numeros_checados:
primeiro_duplicado = numero
break
numeros_checados.add(numero)
return primeiro_duplicado
for c in lista_inteiros:
print(c, encontra_duplicado(c))
|
nilq/baby-python
|
python
|
# Copyright 2015-2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from ..azure_common import BaseTest, arm_template, cassette_name
from c7n_azure.resources.key_vault import (KeyVaultUpdateAccessPolicyAction, WhiteListFilter,
KeyVaultFirewallRulesFilter,
KeyVaultFirewallBypassFilter)
from c7n_azure.session import Session
from c7n_azure.utils import GraphHelper
from mock import patch, Mock
from msrestazure.azure_exceptions import CloudError
from netaddr import IPSet
from parameterized import parameterized
import pytest
from requests import Response
from c7n.utils import local_session
class KeyVaultTest(BaseTest):
def setUp(self):
super(KeyVaultTest, self).setUp()
def test_key_vault_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'whitelist',
'key': 'test'}
],
'actions': [
{'type': 'update-access-policy',
'operation': 'add',
'access-policies': []}
]
}, validate=True)
self.assertTrue(p)
@arm_template('keyvault.json')
@cassette_name('common')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
def test_compare_permissions(self):
p1 = {"keys": ['get'], "secrets": ['get'], "certificates": ['get']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertTrue(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"keys": ['delete']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"secrets": ['delete']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"certificates": ['delete']}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
p1 = {}
p2 = {"keys": ['Get', 'List'], "secrets": ['Get', 'List'], "certificates": ['Get', 'List']}
self.assertTrue(WhiteListFilter.compare_permissions(p1, p2))
p1 = {"keys": ['get'], "secrets": ['get'], "certificates": ['get']}
p2 = {}
self.assertFalse(WhiteListFilter.compare_permissions(p1, p2))
# Requires Graph access
@arm_template('keyvault.json')
@pytest.mark.skiplive
def test_whitelist(self):
"""Tests basic whitelist functionality"""
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['account1@sample.com']}
]}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('keyvault-no-policies.json')
def test_whitelist_zero_access_policies(self):
"""Tests that a keyvault with 0 access policies is processed properly
and doesn't raise an exception.
"""
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault2*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['account1@sample.com']}
]}
]
})
resources = p.run()
self.assertEqual(len(resources), 0)
@arm_template('keyvault.json')
@patch.object(GraphHelper, 'get_principal_dictionary')
def test_whitelist_not_authorized(self, get_principal_dictionary):
"""Tests that an exception is thrown when both:
The Microsoft Graph call fails.
This is mocked because it is impractical to have
identities with varying levels of graph access for
live test runs or recordings"""
mock_response = Mock(spec=Response)
mock_response.status_code = 403
mock_response.text = 'forbidden'
get_principal_dictionary.side_effect = CloudError(mock_response)
p = self.load_policy({
'name': 'test-key-vault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'},
{'not': [
{'type': 'whitelist',
'key': 'principalName',
'users': ['account1@sample.com']}
]}
]
})
with self.assertRaises(CloudError) as e:
p.run()
self.assertEqual(403, e.exception.status_code)
def test_update_access_policy_action(self):
with patch(self._get_key_vault_client_string() + '.update_access_policy')\
as access_policy_action_mock:
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value_type': 'normalize',
'value': 'cckeyvault1*'}],
'actions': [
{'type': 'update-access-policy',
'operation': 'replace',
'access-policies': [{
'tenant-id': '00000000-0000-0000-0000-000000000000',
'object-id': '11111111-1111-1111-1111-111111111111',
'permissions': {'keys': ['Get']}}]}]
})
p.run()
access_policy_action_mock.assert_called()
def test_transform_access_policies(self):
mock_access_policies = [{"object-id": "mockObjectId",
"tenant-id": "mockTenantId",
"permissions": {"keys": ["Get"]}}]
transformed_access_policies = KeyVaultUpdateAccessPolicyAction._transform_access_policies(
mock_access_policies).get("accessPolicies")[0]
self.assertTrue("objectId" in transformed_access_policies)
self.assertTrue("tenantId" in transformed_access_policies)
self.assertTrue("permissions" in transformed_access_policies)
def _get_key_vault_client_string(self):
client = local_session(Session) \
.client('azure.mgmt.keyvault.KeyVaultManagementClient').vaults
return client.__module__ + '.' + client.__class__.__name__
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_include(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['1.0.0.0']}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_not_include_all_ranges(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['1.0.0.0', '127.0.0.1']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['128.0.0.0/1']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_not_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'include': ['127.0.0.0/8']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_equal(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'equal': ['0.0.0.0-126.255.255.255', '128.0.0.0-255.255.255.255']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_rules_not_equal(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cckeyvault1*'},
{'type': 'firewall-rules',
'equal': ['0.0.0.0-126.255.255.255', '128.0.0.0-255.255.255.254']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@arm_template('keyvault.json')
@cassette_name('common')
def test_firewall_bypass(self):
p = self.load_policy({
'name': 'test-azure-keyvault',
'resource': 'azure.keyvault',
'filters': [
{'type': 'firewall-bypass',
'mode': 'equal',
'list': ['AzureServices']}],
})
resources = p.run()
self.assertEqual(1, len(resources))
class KeyVaultFirewallFilterTest(BaseTest):
def test_query_empty_network_acl(self):
resource = {'properties': {}}
expected = IPSet(['0.0.0.0/0'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_default_action_allow(self):
resource = {'properties': {'networkAcls': {'defaultAction': 'Allow'}}}
expected = IPSet(['0.0.0.0/0'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def test_query_default_action_deny(self):
resource = {'properties': {'networkAcls': {'defaultAction': 'Deny',
'ipRules': [{'value': '10.0.0.0/16'},
{'value': '8.8.8.8'}]}}}
expected = IPSet(['8.8.8.8', '10.0.0.0/16'])
self.assertEqual(expected, self._get_filter()._query_rules(resource))
def _get_filter(self, mode='equal'):
data = {mode: ['10.0.0.0/8', '127.0.0.1']}
return KeyVaultFirewallRulesFilter(data, Mock())
class KeyVaultFirewallBypassFilterTest(BaseTest):
scenarios = [
[{}, []],
[{'networkAcls': {'defaultAction': 'Allow', 'bypass': ''}}, ['AzureServices']],
[{'networkAcls': {'defaultAction': 'Deny', 'bypass': ''}}, []],
[{'networkAcls': {'defaultAction': 'Deny', 'bypass': 'AzureServices'}},
['AzureServices']],
]
@parameterized.expand(scenarios)
def test_run(self, properties, expected):
resource = {'properties': properties}
f = KeyVaultFirewallBypassFilter({'mode': 'equal', 'list': []})
self.assertEqual(expected, f._query_bypass(resource))
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class ScalprumConfig(AppConfig):
name = 'scalprum'
|
nilq/baby-python
|
python
|
from typing import cast
import pytest
from parse import compile
from json import dumps as jsondumps
from behave.model import Table, Row
from grizzly.context import GrizzlyContext
from grizzly.types import RequestMethod, RequestDirection
from grizzly.tasks import TransformerTask, LogMessage, WaitTask
from grizzly.tasks.clients import HttpClientTask
from grizzly.steps import * # pylint: disable=unused-wildcard-import # noqa: F403
from grizzly_extras.transformer import TransformerContentType
from ....fixtures import BehaveFixture
def test_parse_method() -> None:
p = compile(
'value {method:Method} world',
extra_types=dict(
Method=parse_method,
),
)
for method in RequestMethod:
assert p.parse(f'value {method.name} world')['method'] == method
with pytest.raises(ValueError):
p.parse('value asdf world')
def test_parse_direction() -> None:
p = compile(
'value {direction:Direction} world',
extra_types=dict(
Direction=parse_direction,
),
)
for direction in RequestDirection:
assert p.parse(f'value {direction.name} world')['direction'] == direction
with pytest.raises(ValueError):
p.parse('value asdf world')
def test_step_task_request_with_name_to_endpoint_until(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
assert len(grizzly.scenario.tasks) == 0
with pytest.raises(AssertionError) as ae:
step_task_request_with_name_to_endpoint_until(behave, RequestMethod.POST, 'test', '/api/test', '$.`this`[?status="ready"]')
assert 'this step is only valid for request methods with direction FROM' in str(ae)
behave.text = 'foo bar'
with pytest.raises(AssertionError) as ae:
step_task_request_with_name_to_endpoint_until(behave, RequestMethod.GET, 'test', '/api/test', '$.`this`[?status="ready"]')
assert 'this step does not have support for step text' in str(ae)
behave.text = None
with pytest.raises(ValueError) as ve:
step_task_request_with_name_to_endpoint_until(behave, RequestMethod.GET, 'test', '/api/test', '$.`this`[?status="ready"]')
assert 'content type must be specified for request' in str(ve)
step_task_request_with_name_to_endpoint_until(behave, RequestMethod.GET, 'test', '/api/test | content_type=json', '$.`this`[?status="ready"]')
assert len(grizzly.scenario.tasks) == 1
rows: List[Row] = []
rows.append(Row(['endpoint'], ['{{ variable }}']))
rows.append(Row(['endpoint'], ['foo']))
rows.append(Row(['endpoint'], ['bar']))
behave.table = Table(['endpoint'], rows=rows)
step_task_request_with_name_to_endpoint_until(behave, RequestMethod.GET, 'test', '/api/{{ endpoint }} | content_type=json', '$.`this`[?status="{{ endpoint }}"]')
assert len(grizzly.scenario.tasks) == 4
tasks = cast(List[UntilRequestTask], grizzly.scenario.tasks)
templates: List[str] = []
assert tasks[-1].request.endpoint == '/api/bar'
assert tasks[-1].condition == '$.`this`[?status="bar"]'
templates += tasks[-1].get_templates()
assert tasks[-2].request.endpoint == '/api/foo'
assert tasks[-2].condition == '$.`this`[?status="foo"]'
templates += tasks[-2].get_templates()
assert tasks[-3].request.endpoint == '/api/{{ variable }}'
assert tasks[-3].condition == '$.`this`[?status="{{ variable }}"]'
templates += tasks[-3].get_templates()
assert len(templates) == 2
assert sorted(templates) == sorted([
'$.`this`[?status="{{ variable }}"]',
'/api/{{ variable }}',
])
@pytest.mark.parametrize('method', RequestDirection.TO.methods)
def test_step_task_request_file_with_name_endpoint(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
step_task_request_file_with_name_endpoint(behave, method, '{}', 'the_name', 'the_container')
@pytest.mark.parametrize('method', RequestDirection.FROM.methods)
def test_step_task_request_file_with_name_endpoint_wrong_direction(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
with pytest.raises(AssertionError) as ae:
step_task_request_file_with_name_endpoint(behave, method, '{}', 'the_name', 'the_container')
assert f'{method.name} is not allowed' in str(ae)
@pytest.mark.parametrize('method', RequestDirection.TO.methods)
def test_step_task_request_file_with_name(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
with pytest.raises(ValueError):
step_task_request_file_with_name(behave, method, '{}', f'{method.name}-test')
step_task_request_file_with_name_endpoint(behave, method, '{}', f'{method.name}-test', f'/api/test/{method.name.lower()}')
step_task_request_file_with_name(behave, method, '{}', f'{method.name}-test')
@pytest.mark.parametrize('method', RequestDirection.FROM.methods)
def test_step_task_request_file_with_name_wrong_direction(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
with pytest.raises(AssertionError) as ae:
# step_request_to_payload_file_with_name_endpoint(behave, method, '{}', f'{method.name}-test', f'/api/test/{method.name.lower()}')
step_task_request_file_with_name(behave, method, '{}', f'{method.name}-test')
assert f'{method.name} is not allowed' in str(ae)
@pytest.mark.parametrize('method', RequestDirection.TO.methods)
def test_step_task_request_text_with_name_to_endpoint_to(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
behave.text = '{}'
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.TO, '/api/test')
with pytest.raises(AssertionError) as ae:
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.FROM, '/api/test')
assert f'"from endpoint" is not allowed for {method.name}, use "to endpoint"' in str(ae)
@pytest.mark.parametrize('method', RequestDirection.FROM.methods)
def test_step_task_request_text_with_name_to_endpoint_from(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
behave.text = '{}'
with pytest.raises(AssertionError) as ae:
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.TO, '/api/test')
assert f'step text is not allowed for {method.name}' in str(ae)
with pytest.raises(AssertionError) as ae:
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.FROM, '/api/test')
assert f'step text is not allowed for {method.name}' in str(ae)
@pytest.mark.parametrize('method', RequestDirection.FROM.methods)
def test_step_task_request_text_with_name_to_endpoint_no_text(behave_fixture: BehaveFixture, method: RequestMethod) -> None:
behave = behave_fixture.context
behave.text = None
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.FROM, '/api/test')
with pytest.raises(AssertionError) as ae:
step_task_request_text_with_name_to_endpoint(behave, method, 'test-name', RequestDirection.TO, '/api/test')
assert f'"to endpoint" is not allowed for {method.name}, use "from endpoint"' in str(ae)
def test_step_task_request_text_with_name_to_endpoint_no_direction(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
with pytest.raises(AssertionError) as ae:
step_task_request_text_with_name_to_endpoint(behave, 'GET', 'test-name', 'asdf', '/api/test')
assert 'invalid direction specified in expression' in str(ae)
def test_step_task_request_text_with_name(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
behave.text = '{}'
with pytest.raises(ValueError):
step_task_request_text_with_name(behave, RequestMethod.POST, 'test-name')
step_task_request_text_with_name_to_endpoint(behave, RequestMethod.POST, 'test-name', RequestDirection.TO, '/api/test')
behave.text = None
with pytest.raises(ValueError):
step_task_request_text_with_name(behave, RequestMethod.GET, 'test-name')
with pytest.raises(AssertionError):
step_task_request_text_with_name(behave, RequestMethod.POST, 'test-name')
behave.text = '{}'
step_task_request_text_with_name(behave, RequestMethod.POST, 'test-name')
def test_step_task_wait_seconds(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError):
step_task_wait_seconds(behave, -1.0)
step_task_wait_seconds(behave, 1.337)
assert isinstance(grizzly.scenario.tasks[-1], WaitTask)
assert grizzly.scenario.tasks[-1].time == 1.337
def test_step_task_print_message(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
step_task_print_message(behave, 'hello {{ world }}')
assert isinstance(grizzly.scenario.tasks[-1], LogMessage)
assert grizzly.scenario.tasks[-1].message == 'hello {{ world }}'
def test_step_task_transform(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(ValueError) as ve:
step_task_transform(
behave,
jsondumps({
'document': {
'id': 'DOCUMENT_8483-1',
'title': 'TPM Report 2020',
},
}),
TransformerContentType.JSON,
'$.document.id',
'document_id',
)
assert 'TransformerTask: document_id has not been initialized' in str(ve)
grizzly.state.variables['document_id'] = 'None'
step_task_transform(
behave,
jsondumps({
'document': {
'id': 'DOCUMENT_8483-1',
'title': 'TPM Report 2020',
},
}),
TransformerContentType.JSON,
'$.document.id',
'document_id',
)
task = grizzly.scenario.tasks[-1]
assert isinstance(task, TransformerTask)
assert task.content_type == TransformerContentType.JSON
assert task.expression == '$.document.id'
assert task.variable == 'document_id'
assert len(grizzly.scenario.orphan_templates) == 0
step_task_transform(
behave,
jsondumps({
'document': {
'id': 'DOCUMENT_8483-1',
'title': 'TPM Report {{ year }}',
},
}),
TransformerContentType.JSON,
'$.document.id',
'document_id',
)
templates = grizzly.scenario.tasks[-1].get_templates()
assert len(templates) == 1
assert templates[-1] == jsondumps({
'document': {
'id': 'DOCUMENT_8483-1',
'title': 'TPM Report {{ year }}',
},
})
def test_step_task_client_get_endpoint(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError) as ae:
step_task_client_get_endpoint(behave, 'mq.example.com', 'test')
assert 'could not find scheme in "mq.example.com"' in str(ae)
with pytest.raises(AssertionError) as ae:
step_task_client_get_endpoint(behave, 'mq://mq.example.com', 'test')
assert 'no client task registered for mq' in str(ae)
with pytest.raises(ValueError) as ve:
step_task_client_get_endpoint(behave, 'http://www.example.org', 'test')
assert 'HttpClientTask: variable test has not been initialized' in str(ve)
grizzly.state.variables['test'] = 'none'
assert len(grizzly.scenario.tasks) == 0
step_task_client_get_endpoint(behave, 'http://www.example.org', 'test')
assert len(grizzly.scenario.tasks) == 1
assert isinstance(grizzly.scenario.tasks[-1], HttpClientTask)
grizzly.state.variables['endpoint_url'] = 'https://example.org'
step_task_client_get_endpoint(behave, 'https://{{ endpoint_url }}', 'test')
task = grizzly.scenario.tasks[-1]
assert task.endpoint == '{{ endpoint_url }}'
def test_step_task_date(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
with pytest.raises(AssertionError) as ae:
step_task_date(behave, '{{ datetime.now() }} | offset=1D', 'date_variable')
assert 'variable date_variable has not been initialized' in str(ae)
grizzly.state.variables['date_variable'] = 'none'
step_task_date(behave, '{{ datetime.now() }} | offset=1D', 'date_variable')
assert len(grizzly.scenario.tasks) == 1
assert isinstance(grizzly.scenario.tasks[-1], DateTask)
task = grizzly.scenario.tasks[-1]
assert task.value == '{{ datetime.now() }}'
assert task.variable == 'date_variable'
assert task.arguments.get('offset') == '1D'
templates = task.get_templates()
assert len(templates) == 1
assert templates[0] == '{{ datetime.now() }}'
def test_step_task_client_put_endpoint_file_destination(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = cast(GrizzlyContext, behave.grizzly)
behave.text = 'hello'
assert len(grizzly.scenario.orphan_templates) == 0
assert len(grizzly.scenario.tasks) == 0
with pytest.raises(AssertionError) as ae:
step_task_client_put_endpoint_file_destination(behave, 'file.json', 'http://example.org/put', 'uploaded-file.json')
assert 'step text is not allowed for this step expression' in str(ae.value)
behave.text = None
with pytest.raises(AssertionError) as ae:
step_task_client_put_endpoint_file_destination(behave, 'file-{{ suffix }}.json', 'http://{{ url }}', 'uploaded-file-{{ suffix }}.json')
assert 'source file cannot be a template' == str(ae.value)
step_task_client_put_endpoint_file_destination(behave, 'file-test.json', 'http://{{ url }}', 'uploaded-file-{{ suffix }}.json')
assert len(grizzly.scenario.tasks) == 1
task = grizzly.scenario.tasks[-1]
assert isinstance(task, HttpClientTask)
assert task.source == 'file-test.json'
assert task.destination == 'uploaded-file-{{ suffix }}.json'
assert task.endpoint == '{{ url }}'
templates = task.get_templates()
assert len(templates) == 2
assert sorted(templates) == sorted([
'{{ url }}',
'uploaded-file-{{ suffix }}.json',
])
def test_step_task_async_group_start(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = behave_fixture.grizzly
assert getattr(grizzly.scenario, 'async_group', '') is None
step_task_async_group_start(behave, 'async-test-1')
assert grizzly.scenario.async_group is not None
assert grizzly.scenario.async_group.name == 'async-test-1'
with pytest.raises(AssertionError) as ae:
step_task_async_group_start(behave, 'async-test-2')
assert str(ae.value) == 'async request group "async-test-1" has not been closed'
def test_step_task_async_group_end(behave_fixture: BehaveFixture) -> None:
behave = behave_fixture.context
grizzly = behave_fixture.grizzly
assert len(grizzly.scenario.tasks) == 0
assert getattr(grizzly.scenario, 'async_group', '') is None
with pytest.raises(AssertionError) as ae:
step_task_async_group_close(behave)
assert str(ae.value) == 'no async request group is open'
step_task_async_group_start(behave, 'async-test-1')
with pytest.raises(AssertionError) as ae:
step_task_async_group_close(behave)
assert str(ae.value) == 'there are no requests in async group "async-test-1"'
assert grizzly.scenario.async_group is not None
step_task_request_text_with_name_to_endpoint(behave, RequestMethod.GET, 'test', direction=RequestDirection.FROM, endpoint='/api/test')
assert len(grizzly.scenario.tasks) == 0
step_task_async_group_close(behave)
assert len(grizzly.scenario.tasks) == 1
assert grizzly.scenario.async_group is None
|
nilq/baby-python
|
python
|
from enum import Enum
from parse import parse
from datetime import datetime
import json
class CDFLogType(Enum):
NEW_COREHDF_INSTANCE = 1
PERSON_DETECTED = 2
NOTHING_DETECTED = 3
CANNOT_BE_INFERRED = 4
class CDFLog:
def __init__(self, logfile: str = 'log.txt'):
self.file_handler = open(logfile, 'r')
self.preprocess_cache = []
def get_last_logs(self, n: int, force_reload=False, reverse=False):
temp = []
if not len(self.preprocess_cache) or force_reload:
while line := self.file_handler.readline():
self.preprocess_cache.append(line)
for line in self.preprocess_cache[-n:]:
temp.append(CDFContext(line))
return temp if not reverse else list(reversed(temp))
def get_detected(self, limit: int = -1, force_reload=False):
temp = []
logs = self.get_last_logs(0, force_reload=force_reload, reverse=True)
for log in logs:
if log.infer_type() == CDFLogType.PERSON_DETECTED:
print('A')
# intentional bypass for limit = -1
if limit == 0 :
break
else :
limit -= 1
temp.append(log)
return temp
def clear_logs(self, limit: int = -1):
temp = []
class CDFContext:
def __init__(self, report: str):
parse_result = parse('[{level}/{time}] {message}', report)
self.level = parse_result['level']
self.datetime = datetime.strptime(parse_result['time'].split(',')[0], '%Y-%m-%d %H:%M:%S')
self.datetime_raw = parse_result['time']
self.message = parse_result['message']
def infer_type(self):
if self.message.startswith('Created'):
return CDFLogType.NEW_COREHDF_INSTANCE
if self.message.startswith('Detected'):
return CDFLogType.PERSON_DETECTED
if self.message.startswith('No person'):
return CDFLogType.NOTHING_DETECTED
return CDFLogType.CANNOT_BE_INFERRED
def json(self):
return json.dumps({'level': self.level,
'datetime': self.datetime_raw,
'message': self.message})
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
NAME: github-reqs.py
AUTHOR: Ulyouth
VERSION: 1.0.0
DATE: 15.10.2020
DESC: A PyBullet-based script to check which GitHub logins are valid
using requests library.
"""
from chkutils import ChkUtils
def chkMain(ss, test, rst, captcha, data):
# Good practice, since 'data' can be both a list or string variable,
# depending on the number of elements in each line
if isinstance(data, list):
user = data[0]
pswd = data[1]
else:
# -200 = Exception = Terminate program!
return [-200, 'Invalid list format']
# Class containing a list of useful functions.
chk = ChkUtils()
# Login GET link.
lnk = 'https://github.com/login'
# Retrieve the login page.
r = chk.getnlog(ss, lnk, 'login.htm', 'github', user)
# Obtain the necessary login tokens.
auth_tok = chk.grab(r.text, 'authenticity_token" value="', '"')
tstamp = chk.grab(r.text, 'timestamp" value="', '"')
tsecret = chk.grab(r.text, 'timestamp_secret" value="', '"')
# Check if any tokens are missing.
if len(auth_tok) == 0 or len(tstamp) == 0 or len(tsecret) == 0:
# -1 = Error = Retry!
return [-1, 'Missing token']
elif test == 1:
# Print the tokens if running in test mode.
print('> authenticity_token: ' + auth_tok)
print('> timestamp: ' + tstamp)
print('> timestamp_secret: ' + tsecret)
# Login POST link
lnk = 'https://github.com/session'
# Login POST data dict
data = {'commit': 'Sign in',
'authenticity_token': auth_tok,
# Not sure whats the 'ga_id' for, but it works using always the
# same value.
'ga_id': '1348735984.1584973938',
'login': user,
'password': pswd,
'webauthn-support': 'supported',
'webauthn-iuvpaa-support': 'unsupported',
'return_to': '',
'allow_signup': '',
'client_id': '',
'integration': '',
'required_field_d202': '',
'timestamp': tstamp,
'timestamp_secret': tsecret }
# Attempt to login.
r = chk.postnlog(ss, lnk, 'login.htm', 'github', user, data = data)
# Evaluate the login attempt.
if r.text.find('Signed in as') != -1:
return [100, user] # 100 = Valid password (display in green)
elif r.text.find('Incorrect username or password.') != -1:
return [200, user] # 200 = Invalid password (display in red)
elif r.text.find('There have been several failed attempts') != -1:
return [-2, user] # -2 = Error = Retry!
else:
return [0, user] # 0 = Unknown = Skip (display in yellow)
|
nilq/baby-python
|
python
|
def build_nn(params):
seq_length, vocabulary_size, layers, embedding_dim, upside_dim, downside_dim, lr, dropout = \
params['seq_length'], params['vocabulary_size'], params['layers'], params['embedding_dim'], params['upside_dim'], params['downside_dim'], params['lr'], params['dropout']
from tensorflow.keras import Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GRU, Dense, Embedding, concatenate
embedding = Embedding(input_dim=vocabulary_size, input_length=seq_length, output_dim=embedding_dim, mask_zero=True)
upsideInput = Input(shape=(seq_length, ), name='upside_inp')
upside_i = embedding(upsideInput)
for i in range(layers):
upside_i = GRU(upside_dim, return_sequences=i < layers - 1, name='upside_%d' % (i + 1), dropout=dropout)(upside_i)
downsideInput = Input(shape=(seq_length, ), name='downside_inp')
downside_i = embedding(downsideInput)
for i in range(layers):
downside_i = GRU(downside_dim, return_sequences=i < layers - 1, name='downside_%d' % (i + 1), dropout=dropout)(downside_i)
output = Dense(1, activation='sigmoid')(concatenate([upside_i, downside_i]))
model = Model(
inputs=[upsideInput, downsideInput],
outputs=[output]
)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=lr), metrics=['accuracy'])
return model
|
nilq/baby-python
|
python
|
import numpy as np
from keras.datasets import cifar10
from keras.models import Sequential, Model
from keras.layers import Input, Dense, LeakyReLU, BatchNormalization, ReLU
from keras.layers import Conv2D, Conv2DTranspose, Reshape, Flatten
from keras.optimizers import Adam
from keras import initializers
from keras.utils import plot_model, np_utils
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
image_generator = ImageDataGenerator(rescale=1)
import numpy as np
image_data = image_generator.flow_from_directory('/content/image',
#color_mode = "grayscale",
target_size = (64,64),
batch_size = 9993,
class_mode = None)
X_train = image_data.next()
print('data shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
X_train = np.float32(X_train)
X_train = (X_train / 255 - 0.5) * 2
X_train = np.clip(X_train, -1, 1)
latent_dim = 100
init = initializers.RandomNormal(stddev=0.02)
generator = Sequential()
generator.add(Dense(4*4*512, input_shape=(latent_dim,), kernel_initializer=init))
generator.add(Reshape((4, 4, 512)))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
generator.add(Conv2DTranspose(256, kernel_size=5, strides=2, padding='same'))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
generator.add(Conv2DTranspose(128, kernel_size=5, strides=2, padding='same'))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
generator.add(Conv2DTranspose(64, kernel_size=5, strides=2, padding='same'))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))
generator.add(Conv2DTranspose(3, kernel_size=5, strides=2, padding='same',
activation='tanh'))
img_shape = X_train[0].shape
discriminator = Sequential()
discriminator.add(Conv2D(64, kernel_size=5, strides=2, padding='same',
input_shape=(img_shape), kernel_initializer=init))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(128, kernel_size=5, strides=2, padding='same'))
discriminator.add(BatchNormalization())
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(256, kernel_size=5, strides=2, padding='same'))
discriminator.add(BatchNormalization())
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(512, kernel_size=5, strides=2, padding='same'))
discriminator.add(BatchNormalization())
discriminator.add(LeakyReLU(0.2))
discriminator.add(Flatten())
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(Adam(lr=0.0003, beta_1=0.5), loss='binary_crossentropy',
metrics=['binary_accuracy'])
discriminator.trainable = False
z = Input(shape=(latent_dim,))
img = generator(z)
decision = discriminator(img)
d_g = Model(inputs=z, outputs=decision)
d_g.compile(Adam(lr=0.0004, beta_1=0.5), loss='binary_crossentropy',
metrics=['binary_accuracy'])
epochs = 4000
batch_size = 32
smooth = 0.1
real = np.ones(shape=(batch_size, 1))
fake = np.zeros(shape=(batch_size, 1))
d_loss = []
g_loss = []
for e in range(epochs + 1):
for i in range(len(X_train) // batch_size):
# Train Discriminator weights
discriminator.trainable = True
# Real samples
X_batch = X_train[i*batch_size:(i+1)*batch_size]
d_loss_real = discriminator.train_on_batch(x=X_batch,
y=real * (1 - smooth))
# Fake Samples
z = np.random.normal(loc=0, scale=1, size=(batch_size, latent_dim))
X_fake = generator.predict_on_batch(z)
d_loss_fake = discriminator.train_on_batch(x=X_fake, y=fake)
# Discriminator loss
d_loss_batch = 0.5 * (d_loss_real[0] + d_loss_fake[0])
# Train Generator weights
discriminator.trainable = False
g_loss_batch = d_g.train_on_batch(x=z, y=real)
print(
'epoch = %d/%d, batch = %d/%d, d_loss=%.3f, g_loss=%.3f' % (e + 1, epochs, i, len(X_train) // batch_size, d_loss_batch, g_loss_batch[0]),
100*' ',
end='\r'
)
d_loss.append(d_loss_batch)
g_loss.append(g_loss_batch[0])
print('epoch = %d/%d, d_loss=%.3f, g_loss=%.3f' % (e + 1, epochs, d_loss[-1], g_loss[-1]), 100*' ')
if e % 10 == 0:
samples = 10
x_fake = generator.predict(np.random.normal(loc=0, scale=1, size=(samples, latent_dim)))
for k in range(samples):
plt.subplot(2, 5, k + 1, xticks=[], yticks=[])
plt.imshow(((x_fake[k] + 1)* 127).astype(np.uint8))
plt.tight_layout()
plt.savefig('fig'+e+'.png')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import sys
import glob
import hashlib
import itertools
sys.path.insert(0, os.pardir)
from testing_harness import PyAPITestHarness
import openmc
class TallySliceMergeTestHarness(PyAPITestHarness):
def _build_inputs(self):
# The summary.h5 file needs to be created to read in the tallies
self._input_set.settings.output = {'summary': True}
# Initialize the tallies file
tallies_file = openmc.Tallies()
# Define nuclides and scores to add to both tallies
self.nuclides = ['U-235', 'U-238']
self.scores = ['fission', 'nu-fission']
# Define filters for energy and spatial domain
low_energy = openmc.Filter(type='energy', bins=[0., 0.625e-6])
high_energy = openmc.Filter(type='energy', bins=[0.625e-6, 20.])
merged_energies = low_energy.merge(high_energy)
cell_21 = openmc.Filter(type='cell', bins=[21])
cell_27 = openmc.Filter(type='cell', bins=[27])
distribcell_filter = openmc.Filter(type='distribcell', bins=[21])
self.cell_filters = [cell_21, cell_27]
self.energy_filters = [low_energy, high_energy]
# Initialize cell tallies with filters, nuclides and scores
tallies = []
for cell_filter in self.energy_filters:
for energy_filter in self.cell_filters:
for nuclide in self.nuclides:
for score in self.scores:
tally = openmc.Tally()
tally.estimator = 'tracklength'
tally.add_score(score)
tally.add_nuclide(nuclide)
tally.add_filter(cell_filter)
tally.add_filter(energy_filter)
tallies.append(tally)
# Merge all cell tallies together
while len(tallies) != 1:
halfway = int(len(tallies) / 2)
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Specify a name for the tally
tallies[0].name = 'cell tally'
# Initialize a distribcell tally
distribcell_tally = openmc.Tally(name='distribcell tally')
distribcell_tally.estimator = 'tracklength'
distribcell_tally.add_filter(distribcell_filter)
distribcell_tally.add_filter(merged_energies)
for score in self.scores:
distribcell_tally.add_score(score)
for nuclide in self.nuclides:
distribcell_tally.add_nuclide(nuclide)
# Add tallies to a Tallies object
tallies_file = openmc.Tallies((tallies[0], distribcell_tally))
# Export tallies to file
self._input_set.tallies = tallies_file
super(TallySliceMergeTestHarness, self)._build_inputs()
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = openmc.StatePoint(statepoint)
# Extract the cell tally
tallies = [sp.get_tally(name='cell tally')]
# Slice the tallies by cell filter bins
cell_filter_prod = itertools.product(tallies, self.cell_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[tf[1].type],
filter_bins=[tf[1].get_bin(0)]), cell_filter_prod)
# Slice the tallies by energy filter bins
energy_filter_prod = itertools.product(tallies, self.energy_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[tf[1].type],
filter_bins=[(tf[1].get_bin(0),)]), energy_filter_prod)
# Slice the tallies by nuclide
nuclide_prod = itertools.product(tallies, self.nuclides)
tallies = map(lambda tn: tn[0].get_slice(nuclides=[tn[1]]), nuclide_prod)
# Slice the tallies by score
score_prod = itertools.product(tallies, self.scores)
tallies = map(lambda ts: ts[0].get_slice(scores=[ts[1]]), score_prod)
tallies = list(tallies)
# Initialize an output string
outstr = ''
# Append sliced Tally Pandas DataFrames to output string
for tally in tallies:
df = tally.get_pandas_dataframe()
outstr += df.to_string()
# Merge all tallies together
while len(tallies) != 1:
halfway = int(len(tallies) / 2)
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Append merged Tally Pandas DataFrame to output string
df = tallies[0].get_pandas_dataframe()
outstr += df.to_string()
# Extract the distribcell tally
distribcell_tally = sp.get_tally(name='distribcell tally')
# Sum up a few subdomains from the distribcell tally
sum1 = distribcell_tally.summation(filter_type='distribcell',
filter_bins=[0,100,2000,30000])
# Sum up a few subdomains from the distribcell tally
sum2 = distribcell_tally.summation(filter_type='distribcell',
filter_bins=[500,5000,50000])
# Merge the distribcell tally slices
merge_tally = sum1.merge(sum2)
# Append merged Tally Pandas DataFrame to output string
df = merge_tally.get_pandas_dataframe()
outstr += df.to_string()
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def _cleanup(self):
super(TallySliceMergeTestHarness, self)._cleanup()
f = os.path.join(os.getcwd(), 'tallies.xml')
if os.path.exists(f): os.remove(f)
if __name__ == '__main__':
harness = TallySliceMergeTestHarness('statepoint.10.h5', True)
harness.main()
|
nilq/baby-python
|
python
|
from onnxquantizer import Quantizer
import config as cfg
import os
import cv2
import numpy as np
def prehandle(img_path, dst_size):
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
resized = cv2.resize(img, dsize=(dst_size[1], dst_size[0]), interpolation=cv2.INTER_LINEAR)
return resized
def main():
#load rpn model
rpn_model = Quantizer(model_path=cfg.FACE_DET_MODEL_PATH,
ini_file=cfg.FACE_DET_SAVE_QUANTI_PATH,
input_quanti_bits=cfg.FACE_DET_INPUT_QUANTI_BITS,
quanti_bits=cfg.FACE_DET_QUANTI_BITS,
output_quanti_method=cfg.QUANTI_OUTPUT_METHOD,
weight_quanti_type=cfg.WEIGHT_QUANTI_TYPE,
save_new_model_path=cfg.NEW_FACE_DET_MODEL_PATH,
quanti_layer_type=cfg.QANTI_LAYER_TYPE,
middle_layer_output_shape=cfg.FACE_DET_MIDDLE_LAYER_OUTPUT_SHAPE,
merge_layer_type=cfg.MERGE_LAYER_TYPE,
merge_layer_indexs=cfg.FACE_DET_MERGE_LAYER_SHAPE_INDEX,
merge_layer_shapes=cfg.FACE_DET_MERGE_LAYER_SHAPE,
dequan_layer_name=cfg.FACE_DET_DEQUANTI_LAYER_NAME,
do_detection=False,
input_do_quanti=False)
file_list = os.listdir(cfg.QUANTI_DATA_PATH)
for file_name in file_list:
file_name = cfg.QUANTI_DATA_PATH + file_name
input_data = prehandle(file_name, cfg.IMG_SHAPE)
input_data = input_data.transpose(2, 0, 1)
input_data_ = input_data.flatten().reshape(cfg.FACE_DET_INPUT_SHAPE) # = pfe_output.detach().numpy()
rpn_model.forword(np.array(input_data_.astype(np.float32)))
print('*********************************************')
# break
print('save param...')
rpn_model.save_param()
return
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import logging
import logging.config
def configure_logging(config, disable_existing=False):
"""
Set up (process-global!) loggers according to given app configuration.
Look for 'logging' key in [app] config section, which should be the path to
a logging config file in the format expected by logging.config.fileConfig.
"""
logging_config = config.getpath('app.logging', None)
if logging_config is not None:
logging.config.fileConfig(
logging_config, disable_existing_loggers=disable_existing)
|
nilq/baby-python
|
python
|
# A simple demo of the mesh manager.
# Generates and renders a single tile with some ferns and trees
#
# INSTRUCTIONS:
#
# Launch from outside terrain, meaning launch with:
# python terrain/meshManager/main.py
import sys
sys.path.append(".")
from panda3d.core import *
from panda3d.core import Light,AmbientLight,DirectionalLight
from panda3d.core import NodePath
from panda3d.core import Vec3,Vec4,Mat4,VBase4,Point3
from direct.task.Task import Task
from direct.showbase.ShowBase import ShowBase
from terrain.meshManager import meshManager
from terrain.meshManager import treeFactory
from terrain.meshManager import fernFactory
base = ShowBase()
base.disableMouse()
class Flat():
def height(self,x,y): return 0
factories=[treeFactory.TreeFactory(),fernFactory.FernFactory()]
t=meshManager.MeshManager(factories)
tf=treeFactory.TreeFactory()
ff=fernFactory.FernFactory()
factories=[tf,ff]
meshManager=meshManager.MeshManager(factories)
size=600.0
tileFactory=meshManager.tileFactory(size)
x=0.0
y=0.0
tile=Flat()
tileNode=tileFactory(x,y,tile)
tileNode.reparentTo(base.render)
dlight = DirectionalLight('dlight')
dlnp = render.attachNewNode(dlight)
dlnp.setHpr(0, 0, 0)
render.setLight(dlnp)
alight = AmbientLight('alight')
alnp = render.attachNewNode(alight)
render.setLight(alnp)
#rotating light to show that normals are calculated correctly
def updateLight(task):
base.camera.setHpr(task.time/50.0*360,0,0)
#base.camera.setP(0)
base.camera.setPos(size/2,size/2,5)
#base.camera.setPos(tileNode,2,task.time*4,5)
base.camera.setP(8)
#t.update(base.camera)
h=task.time/20.0*360+180
dlnp.setHpr(0,h,0)
h=h+90
h=h%360
h=min(h,360-h)
#h is now angle from straight up
hv=h/180.0
hv=1-hv
sunset=max(0,1.0-abs(hv-.5)*8)
sunset=min(1,sunset)
if hv>.5: sunset=1
#sunset=sunset**.2
sunset=VBase4(0.8, 0.5, 0.0, 1)*sunset
sun=max(0,hv-.5)*2*4
sun=min(sun,1)
dColor=(VBase4(0.8, 0.7, 0.7, 1)*sun*2+sunset)
dlight.setColor(dColor)
aColor=VBase4(0.1, 0.3, 0.8, 1)*sun*2.6+VBase4(0.2, 0.2, 0.3, 1)*2.0
alight.setColor(aColor*(5-dColor.length())*(1.0/5))
return Task.cont
taskMgr.add(updateLight, "rotating Light")
base.run()
|
nilq/baby-python
|
python
|
import os
import re
import discord
from discord import MessageType
from discord.commands import slash_command, Option, message_command
from discord.ext import commands
from sqlalchemy import desc
from . import XpSys
import PictureCreator
from PictureCreator.utils import ConvrterToCI
from models.Emojies import Emojie
from models.IgnorLists import IgnoreList
from models.Members import Member
from models.database import Session
from models.BoostLists import BoostList
session = Session()
class Profile(commands.Cog):
def __init__(self, bot):
self.bot = bot
XpSys.init()
@message_command(name="Получить профиль")
async def getProfile(self, ctx, message: discord.Message):
author = message.author
path = "Temp/{}.png".format(author.id)
info = session.query(Member) \
.filter(Member.MemberId == author.id) \
.filter(Member.ServerId == author.guild.id).first()
PictureCreator.CreateProfile(author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@slash_command(
name='profile',
description="Выводит профиль пользователя."
)
async def profile(self, ctx,
member: Option(discord.Member, description="Выберите пользователя, чей профиль вывести",
required=False, default=None)):
if member:
author = member
else:
author = ctx.author
path = "Temp/{}.png".format(author.id)
info = session.query(Member) \
.filter(Member.MemberId == author.id) \
.filter(Member.ServerId == author.guild.id).first()
PictureCreator.CreateProfile(author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@slash_command(
name='setbg',
description="Устанавливает задний фон для профиля. Приложить изображение или ссылку на изображение.",
)
async def setbg(self, ctx,
img: Option(discord.Attachment, "Изображение для заднего фона", required=False, default=None),
url: Option(str, "Ссылка на задний фон", required=False, default=None)):
if url:
try:
PictureCreator.SetBG(ctx.guild.id, ctx.author.id, url)
except:
await ctx.send('Некорректная ссылка на изображение.')
return
elif img:
if 'image' not in img.content_type:
await ctx.send('Некорректное изображение.')
return
PictureCreator.SetBG(ctx.guild.id, ctx.author.id, img.url)
else:
try:
os.remove("src/Images/Usr/{}/{}/profile.png".format(ctx.guild.id, ctx.author.id))
except:
pass
path = "Temp/{}.png".format(ctx.author.id)
info = session.query(Member) \
.filter(Member.MemberId == ctx.author.id) \
.filter(Member.ServerId == ctx.author.guild.id).first()
PictureCreator.CreateProfile(ctx.author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@slash_command(
name='settext',
description="Задаёт подпись профиля."
)
async def settext(self, ctx, text: Option(str, description="Подпись профиля.", required=False, default="")):
member = session.query(Member) \
.filter(Member.MemberId == ctx.author.id) \
.filter(Member.ServerId == ctx.guild.id).first()
member.Info = text
path = "Temp/{}.png".format(ctx.author.id)
info = session.query(Member) \
.filter(Member.MemberId == ctx.author.id) \
.filter(Member.ServerId == ctx.author.guild.id).first()
PictureCreator.CreateProfile(ctx.author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@message_command(name="Получить аватар")
async def getAvatar(self, ctx, message: discord.Message):
path = PictureCreator.utils.GetAvatarFromUrl(PictureCreator.GetAvatar(message.author, size=4096))
file = discord.File(path, filename="avatar.gif")
await ctx.send("Avatar " + message.author.name, file=file)
@slash_command(
name='avatar',
description="Выводит аватар пользователя."
)
async def avatar(self, ctx, member: Option(discord.Member, "Пользователь", required=False, default=None)):
if member:
author = member
else:
author = ctx.author
path = PictureCreator.utils.GetAvatarFromUrl(PictureCreator.GetAvatar(author, size=4096))
file = discord.File(path, filename="avatar.gif")
await ctx.send("Avatar " + author.name, file=file)
@message_command(name="Получить ранг")
async def getRank(self, ctx, message: discord.Message):
author = message.author
path = "Temp/{}.png".format(author.id)
info = session.query(Member) \
.filter(Member.MemberId == author.id) \
.filter(Member.ServerId == author.guild.id).first()
PictureCreator.CreateRank(author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@slash_command(
name='rank',
description="Выводит ранг пользователя.",
)
async def rank(self, ctx, member: Option(discord.Member, "Пользователь", required=False, default=None)):
if member:
author = member
else:
author = ctx.author
path = "Temp/{}.png".format(author.id)
info = session.query(Member) \
.filter(Member.MemberId == author.id) \
.filter(Member.ServerId == author.guild.id).first()
PictureCreator.CreateRank(author, info).save(path)
file = discord.File(path, filename="profile.png")
await ctx.send(file=file)
os.remove(path)
@slash_command(
name='top',
description="Выводит рейтинг сервера."
)
async def top(self, ctx,
cat: Option(str, "Категория рейтинга", default='Опыт', choices=["Опыт", "Упоминания", "Эмоджи"],
required=False),
page: Option(int, 'Страница рейтинга', min_value=1, default=1, required=True)):
members = []
page = int(page)
if cat.isnumeric():
page = int(cat)
cat = "Опыт"
page -= 1
if cat == 'Опыт':
for member in session.query(Member) \
.filter(Member.IsAlive) \
.filter(Member.ServerId == ctx.guild.id) \
.order_by(desc(Member.TotalXp)).limit(5).offset(5 * page):
mem = ctx.guild.get_member(member.MemberId)
members.append({
"mem": mem,
"data": ConvrterToCI(round(member.TotalXp, 2)) + "xp",
"url": PictureCreator.GetAvatar(mem, size=64)
})
elif cat == "Упоминания":
for member in session.query(Member) \
.filter(Member.IsAlive) \
.filter(Member.ServerId == ctx.guild.id) \
.order_by(desc(Member.Mentions)).limit(5).offset(5 * page):
mem = ctx.guild.get_member(member.MemberId)
members.append({
"mem": mem,
"url": PictureCreator.GetAvatar(mem, size=64),
"data": str(member.Mentions) + " mentions"
})
elif cat == "Эмоджи":
for emojie in session.query(Emojie) \
.filter(Emojie.ServerId == ctx.guild.id) \
.order_by(desc(Emojie.CountUsage)).limit(5).offset(5 * page):
emoji = await ctx.guild.fetch_emoji(emojie.Id)
members.append({
"mem": emoji,
"url": emoji.url,
"data": str(emojie.CountUsage) + " detected"
})
else:
await ctx.send("Параметр не найден!")
return
path = "Temp/top{}.png".format(page)
PictureCreator.GetTop(members, page).save(path)
file = discord.File(path, filename="top.png")
await ctx.send(file=file)
os.remove(path)
@commands.Cog.listener()
async def on_message(self, message):
if message.author.bot or message.type == MessageType.new_member:
return
ignoreList = session.query(IgnoreList) \
.filter(IgnoreList.ServerId == message.guild.id) \
.filter(IgnoreList.ChannelId == message.channel.id).first()
if not ignoreList:
if len(message.mentions):
for i in list(set(message.mentions)):
if not i.bot and not message.author.bot and not i.id == message.author.id:
XpSys.AddMention(memberId=i.id, serverId=message.guild.id)
xp = len(message.content) / 10
try:
if session.query(BoostList) \
.filter(BoostList.ChannelId == message.channel.parent.id) \
.filter(BoostList.ServerId == message.guild.id).first():
xp *= 2
except AttributeError:
if session.query(BoostList) \
.filter(BoostList.ChannelId == message.channel.id) \
.filter(BoostList.ServerId == message.guild.id).first():
xp *= 2
await XpSys.AddExp(memberId=message.author.id, ServerID=message.guild.id, count=xp, channel=message.channel)
ctx = await self.bot.get_context(message)
for emoji in list(set(re.findall("<\D+\d+>", message.content))):
try:
emj = await commands.EmojiConverter().convert(ctx, emoji)
emojie = session.query(Emojie) \
.filter(Emojie.ServerId == emj.guild.id) \
.filter(Emojie.Id == emj.id).first()
if emojie:
emojie.IncrementUsage()
else:
emojie = Emojie(serverId=emj.guild.id, id=emj.id)
session.add(emojie)
session.commit()
except commands.errors.BadArgument:
pass
@commands.Cog.listener()
async def on_guild_remove(self, guild):
for mem in guild.members:
if not mem.bot:
XpSys.DelMem(mem.id, guild.id)
def setup(client):
client.add_cog(Profile(client))
|
nilq/baby-python
|
python
|
config = {
"--beam-delta":[0.5,float],
"--delta":[0.000976562,float],
"--determinize-lattice":['true',str],
"--hash-ratio":[2,float],
"--minimize":['false',str],
"--phone-determinize":['true',str],
"--prune-interval":[25,int],
"--word-determinize":['true',str],
"--minimize":['false',str],
}
|
nilq/baby-python
|
python
|
import socket
from unittest import TestCase
from ..subprocess_server_manager import SubprocessServerManager, SubprocessServer
from ..exceptions import ImproperlyConfigured
class BaseSocketTestCase(TestCase):
@ staticmethod
def get(host: str, port: int) -> bytes:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((host, port))
recieved = b''
while data := s.recv(1024):
recieved += data
return recieved
class TestSubprocessServer(BaseSocketTestCase):
def setUp(self):
self.subprocess_server = SubprocessServer(
'127.0.0.1',
6000,
'test message',
)
self.subprocess_server.start()
def tearDown(self):
self.subprocess_server.stop()
def test_server_responds_immediately_after_start_returns(self):
msg = str(self.get('127.0.0.1', 6000), 'utf-8')
self.assertEqual(msg, 'test message')
def test_server_behaves_same_after_restart(self):
self.subprocess_server.restart()
msg = str(self.get('127.0.0.1', 6000), 'utf-8')
self.assertEqual(msg, 'test message')
class TestSubprocessServerManager(TestCase):
"""
Integration test of the SubprocessServerManager. Tests that the manager
class spins up many servers in response to a schema.
"""
def setUp(self):
self.manager = SubprocessServerManager({
'test_server_1': {
'host': '127.0.0.1',
'port': 6001,
'message': 'test server 1 message'
},
'test_server_2': {
'host': '127.0.0.1',
'port': 6002,
'message': 'test server 2 message'
},
'long_message': {
'host': '127.0.0.1',
'port': 6003,
'message': (
'test server 2 messageWe are experiencing strong winds and '
'freezing temperatures." Freezing is describing the '
'temperature, so it is an adjective.'
),
},
})
def tearDown(self):
self.manager.stop()
def test_schema_validation(self):
bad_schemas = [
{
# missing port
'server 1': {
'host': '127.0.0.1',
'message': 'hi',
},
'server 2': {
'host': '127.0.0.1',
'message': 'hi',
},
},
{
# missing host
'server 1': {
'port': 5000,
'message': 'hi',
},
'server 2': {
'port': 5000,
'message': 'hi',
},
},
{
# port is str, not int
'server 1': {
'host': '127.0.0.1',
'port': '1000',
'message': 'hi',
},
'server 2': {
'host': '127.0.0.1',
'port': '1000',
'message': 'hi',
},
},
]
for schema in bad_schemas:
with self.assertRaises(ImproperlyConfigured):
SubprocessServerManager(schema)
def test_two_servers_cannot_request_same_port(self):
schema = {
's1': {
'host': '127.0.0.1',
'port': 1000,
'message': 'hi',
},
's2': {
'host': '127.0.0.1',
'port': 1000,
'message': 'hi',
},
}
with self.assertRaises(ImproperlyConfigured):
SubprocessServerManager(schema)
def test_starts_and_stops(self):
self.manager.start()
|
nilq/baby-python
|
python
|
from builtins import zip
from builtins import range
from builtins import object
import os
import numpy as np
import warnings
import matplotlib.pyplot as plt
import rubin_sim.maf.utils as utils
__all__ = ['applyZPNorm', 'PlotHandler', 'BasePlotter']
def applyZPNorm(metricValue, plotDict):
if 'zp' in plotDict:
if plotDict['zp'] is not None:
metricValue = metricValue - plotDict['zp']
if 'normVal' in plotDict:
if plotDict['normVal'] is not None:
metricValue = metricValue / plotDict['normVal']
return metricValue
class BasePlotter(object):
"""
Serve as the base type for MAF plotters and example of API.
"""
def __init__(self):
self.plotType = None
# This should be included in every subsequent defaultPlotDict (assumed to be present).
self.defaultPlotDict = {'title': None, 'xlabel': None, 'label': None,
'labelsize': None, 'fontsize': None, 'figsize': None}
def __call__(self, metricValue, slicer, userPlotDict, fignum=None):
pass
class PlotHandler(object):
def __init__(self, outDir='.', resultsDb=None, savefig=True,
figformat='pdf', dpi=600, thumbnail=True, trimWhitespace=True):
self.outDir = outDir
self.resultsDb = resultsDb
self.savefig = savefig
self.figformat = figformat
self.dpi = dpi
self.trimWhitespace = trimWhitespace
self.thumbnail = thumbnail
self.filtercolors = {'u': 'cyan', 'g': 'g', 'r': 'y',
'i': 'r', 'z': 'm', 'y': 'k', ' ': None}
self.filterorder = {' ': -1, 'u': 0, 'g': 1, 'r': 2, 'i': 3, 'z': 4, 'y': 5}
def setMetricBundles(self, mBundles):
"""
Set the metric bundle or bundles (list or dictionary).
Reuse the PlotHandler by resetting this reference.
The metric bundles have to have the same slicer.
"""
self.mBundles = []
# Try to add the metricBundles in filter order.
if isinstance(mBundles, dict):
for mB in mBundles.values():
vals = mB.fileRoot.split('_')
forder = [self.filterorder.get(f, None) for f in vals if len(f) == 1]
forder = [o for o in forder if o is not None]
if len(forder) == 0:
forder = len(self.mBundles)
else:
forder = forder[-1]
self.mBundles.insert(forder, mB)
self.slicer = self.mBundles[0].slicer
else:
for mB in mBundles:
vals = mB.fileRoot.split('_')
forder = [self.filterorder.get(f, None) for f in vals if len(f) == 1]
forder = [o for o in forder if o is not None]
if len(forder) == 0:
forder = len(self.mBundles)
else:
forder = forder[-1]
self.mBundles.insert(forder, mB)
self.slicer = self.mBundles[0].slicer
for mB in self.mBundles:
if mB.slicer.slicerName != self.slicer.slicerName:
raise ValueError('MetricBundle items must have the same type of slicer')
self._combineMetricNames()
self._combineRunNames()
self._combineMetadata()
self._combineConstraints()
self.setPlotDicts(reset=True)
def setPlotDicts(self, plotDicts=None, plotFunc=None, reset=False):
"""
Set or update (or 'reset') the plotDict for the (possibly joint) plots.
Resolution is:
auto-generated items (colors/labels/titles)
< anything previously set in the plotHandler
< defaults set by the plotter
< explicitly set items in the metricBundle plotDict
< explicitly set items in the plotDicts list passed to this method.
"""
if reset:
# Have to explicitly set each dictionary to a (separate) blank dictionary.
self.plotDicts = [{} for b in self.mBundles]
if isinstance(plotDicts, dict):
# We were passed a single dictionary, not a list.
plotDicts = [plotDicts] * len(self.mBundles)
autoLabelList = self._buildLegendLabels()
autoColorList = self._buildColors()
autoCbar = self._buildCbarFormat()
autoTitle = self._buildTitle()
if plotFunc is not None:
autoXlabel, autoYlabel = self._buildXYlabels(plotFunc)
# Loop through each bundle and generate a plotDict for it.
for i, bundle in enumerate(self.mBundles):
# First use the auto-generated values.
tmpPlotDict = {}
tmpPlotDict['title'] = autoTitle
tmpPlotDict['label'] = autoLabelList[i]
tmpPlotDict['color'] = autoColorList[i]
tmpPlotDict['cbarFormat'] = autoCbar
# Then update that with anything previously set in the plotHandler.
tmpPlotDict.update(self.plotDicts[i])
# Then override with plotDict items set explicitly based on the plot type.
if plotFunc is not None:
tmpPlotDict['xlabel'] = autoXlabel
tmpPlotDict['ylabel'] = autoYlabel
# Replace auto-generated plot dict items with things
# set by the plotterDefaults, if they are not None.
plotterDefaults = plotFunc.defaultPlotDict
for k, v in plotterDefaults.items():
if v is not None:
tmpPlotDict[k] = v
# Then add/override based on the bundle plotDict parameters if they are set.
tmpPlotDict.update(bundle.plotDict)
# Finally, override with anything set explicitly by the user right now.
if plotDicts is not None:
tmpPlotDict.update(plotDicts[i])
# And save this new dictionary back in the class.
self.plotDicts[i] = tmpPlotDict
# Check that the plotDicts do not conflict.
self._checkPlotDicts()
def _combineMetricNames(self):
"""
Combine metric names.
"""
# Find the unique metric names.
self.metricNames = set()
for mB in self.mBundles:
self.metricNames.add(mB.metric.name)
# Find a pleasing combination of the metric names.
order = ['u', 'g', 'r', 'i', 'z', 'y']
if len(self.metricNames) == 1:
jointName = ' '.join(self.metricNames)
else:
# Split each unique name into a list to see if we can merge the names.
nameLengths = [len(x.split()) for x in self.metricNames]
nameLists = [x.split() for x in self.metricNames]
# If the metric names are all the same length, see if we can combine any parts.
if len(set(nameLengths)) == 1:
jointName = []
for i in range(nameLengths[0]):
tmp = set([x[i] for x in nameLists])
# Try to catch special case of filters and put them in order.
if tmp.intersection(order) == tmp:
filterlist = ''
for f in order:
if f in tmp:
filterlist += f
jointName.append(filterlist)
else:
# Otherwise, just join and put into jointName.
jointName.append(''.join(tmp))
jointName = ' '.join(jointName)
# If the metric names are not the same length, just join everything.
else:
jointName = ' '.join(self.metricNames)
self.jointMetricNames = jointName
def _combineRunNames(self):
"""
Combine runNames.
"""
self.runNames = set()
for mB in self.mBundles:
self.runNames.add(mB.runName)
self.jointRunNames = ' '.join(self.runNames)
def _combineMetadata(self):
"""
Combine metadata.
"""
metadata = set()
for mB in self.mBundles:
metadata.add(mB.metadata)
self.metadata = metadata
# Find a pleasing combination of the metadata.
if len(metadata) == 1:
self.jointMetadata = ' '.join(metadata)
else:
order = ['u', 'g', 'r', 'i', 'z', 'y']
# See if there are any subcomponents we can combine,
# splitting on some values we expect to separate metadata clauses.
splitmetas = []
for m in self.metadata:
# Try to split metadata into separate phrases (filter / proposal / constraint..).
if ' and ' in m:
m = m.split(' and ')
elif ', ' in m:
m = m.split(', ')
else:
m = [m, ]
# Strip white spaces from individual elements.
m = set([im.strip() for im in m])
splitmetas.append(m)
# Look for common elements and separate from the general metadata.
common = set.intersection(*splitmetas)
diff = [x.difference(common) for x in splitmetas]
# Now look within the 'diff' elements and see if there are any common words to split off.
diffsplit = []
for d in diff:
if len(d) > 0:
m = set([x.split() for x in d][0])
else:
m = set()
diffsplit.append(m)
diffcommon = set.intersection(*diffsplit)
diffdiff = [x.difference(diffcommon) for x in diffsplit]
# If the length of any of the 'differences' is 0, then we should stop and not try to subdivide.
lengths = [len(x) for x in diffdiff]
if min(lengths) == 0:
# Sort them in order of length (so it goes 'g', 'g dithered', etc.)
tmp = []
for d in diff:
tmp.append(list(d)[0])
diff = tmp
xlengths = [len(x) for x in diff]
idx = np.argsort(xlengths)
diffdiff = [diff[i] for i in idx]
diffcommon = []
else:
# diffdiff is the part where we might expect our filter values to appear;
# try to put this in order.
diffdiffOrdered = []
diffdiffEnd = []
for f in order:
for d in diffdiff:
if len(d) == 1:
if list(d)[0] == f:
diffdiffOrdered.append(d)
for d in diffdiff:
if d not in diffdiffOrdered:
diffdiffEnd.append(d)
diffdiff = diffdiffOrdered + diffdiffEnd
diffdiff = [' '.join(c) for c in diffdiff]
# And put it all back together.
combo = (', '.join([''.join(c) for c in diffdiff]) + ' ' +
' '.join([''.join(d) for d in diffcommon]) + ' ' +
' '.join([''.join(e) for e in common]))
self.jointMetadata = combo
def _combineConstraints(self):
"""
Combine the constraints.
"""
constraints = set()
for mB in self.mBundles:
if mB.constraint is not None:
constraints.add(mB.constraint)
self.constraints = '; '.join(constraints)
def _buildTitle(self):
"""
Build a plot title from the metric names, runNames and metadata.
"""
# Create a plot title from the unique parts of the metric/runName/metadata.
plotTitle = ''
if len(self.runNames) == 1:
plotTitle += list(self.runNames)[0]
if len(self.metadata) == 1:
plotTitle += ' ' + list(self.metadata)[0]
if len(self.metricNames) == 1:
plotTitle += ': ' + list(self.metricNames)[0]
if plotTitle == '':
# If there were more than one of everything above, use joint metadata and metricNames.
plotTitle = self.jointMetadata + ' ' + self.jointMetricNames
return plotTitle
def _buildXYlabels(self, plotFunc):
"""
Build a plot x and y label.
"""
if plotFunc.plotType == 'BinnedData':
if len(self.mBundles) == 1:
mB = self.mBundles[0]
xlabel = mB.slicer.sliceColName + ' (' + mB.slicer.sliceColUnits + ')'
ylabel = mB.metric.name + ' (' + mB.metric.units + ')'
else:
xlabel = set()
for mB in self.mBundles:
xlabel.add(mB.slicer.sliceColName)
xlabel = ', '.join(xlabel)
ylabel = self.jointMetricNames
elif plotFunc.plotType == 'MetricVsH':
if len(self.mBundles) == 1:
mB = self.mBundles[0]
ylabel = mB.metric.name + ' (' + mB.metric.units + ')'
else:
ylabel = self.jointMetricNames
xlabel = 'H (mag)'
else:
if len(self.mBundles) == 1:
mB = self.mBundles[0]
xlabel = mB.metric.name
if mB.metric.units is not None:
if len(mB.metric.units) > 0:
xlabel += ' (' + mB.metric.units + ')'
ylabel = None
else:
xlabel = self.jointMetricNames
ylabel = set()
for mB in self.mBundles:
if 'ylabel' in mB.plotDict:
ylabel.add(mB.plotDict['ylabel'])
if len(ylabel) == 1:
ylabel = list(ylabel)[0]
else:
ylabel = None
return xlabel, ylabel
def _buildLegendLabels(self):
"""
Build a set of legend labels, using parts of the runName/metadata/metricNames that change.
"""
if len(self.mBundles) == 1:
return [None]
labels = []
for mB in self.mBundles:
if 'label' in mB.plotDict:
label = mB.plotDict['label']
else:
label = ''
if len(self.runNames) > 1:
label += mB.runName
if len(self.metadata) > 1:
label += ' ' + mB.metadata
if len(self.metricNames) > 1:
label += ' ' + mB.metric.name
labels.append(label)
return labels
def _buildColors(self):
"""
Try to set an appropriate range of colors for the metric Bundles.
"""
if len(self.mBundles) == 1:
if 'color' in self.mBundles[0].plotDict:
return [self.mBundles[0].plotDict['color']]
else:
return ['b']
colors = []
for mB in self.mBundles:
color = 'b'
if 'color' in mB.plotDict:
color = mB.plotDict['color']
else:
if mB.constraint is not None:
# If the filter is part of the sql constraint, we'll
# try to use that first.
if 'filter' in mB.constraint:
vals = mB.constraint.split('"')
for v in vals:
if len(v) == 1:
# Guess that this is the filter value
if v in self.filtercolors:
color = self.filtercolors[v]
colors.append(color)
# If we happened to end up with the same color throughout
# (say, the metrics were all in the same filter)
# then go ahead and generate random colors.
if (len(self.mBundles) > 1) and (len(np.unique(colors)) == 1):
colors = [np.random.rand(3,) for mB in self.mBundles]
return colors
def _buildCbarFormat(self):
"""
Set the color bar format.
"""
cbarFormat = None
if len(self.mBundles) == 1:
if self.mBundles[0].metric.metricDtype == 'int':
cbarFormat = '%d'
else:
metricDtypes = set()
for mB in self.mBundles:
metricDtypes.add(mB.metric.metricDtype)
if len(metricDtypes) == 1:
if list(metricDtypes)[0] == 'int':
cbarFormat = '%d'
return cbarFormat
def _buildFileRoot(self, outfileSuffix=None):
"""
Build a root filename for plot outputs.
If there is only one metricBundle, this is equal to the metricBundle fileRoot + outfileSuffix.
For multiple metricBundles, this is created from the runNames, metadata and metric names.
If you do not wish to use the automatic filenames, then you could set 'savefig' to False and
save the file manually to disk, using the plot figure numbers returned by 'plot'.
"""
if len(self.mBundles) == 1:
outfile = self.mBundles[0].fileRoot
else:
outfile = '_'.join([self.jointRunNames, self.jointMetricNames, self.jointMetadata])
outfile += '_' + self.mBundles[0].slicer.slicerName[:4].upper()
if outfileSuffix is not None:
outfile += '_' + outfileSuffix
outfile = utils.nameSanitize(outfile)
return outfile
def _buildDisplayDict(self):
"""
Generate a display dictionary.
This is most useful for when there are many metricBundles being combined into a single plot.
"""
if len(self.mBundles) == 1:
return self.mBundles[0].displayDict
else:
displayDict = {}
group = set()
subgroup = set()
order = 0
for mB in self.mBundles:
group.add(mB.displayDict['group'])
subgroup.add(mB.displayDict['subgroup'])
if order < mB.displayDict['order']:
order = mB.displayDict['order'] + 1
displayDict['order'] = order
if len(group) > 1:
displayDict['group'] = 'Comparisons'
else:
displayDict['group'] = list(group)[0]
if len(subgroup) > 1:
displayDict['subgroup'] = 'Comparisons'
else:
displayDict['subgroup'] = list(subgroup)[0]
displayDict['caption'] = ('%s metric(s) calculated on a %s grid, '
'for opsim runs %s, for metadata values of %s.'
% (self.jointMetricNames,
self.mBundles[0].slicer.slicerName,
self.jointRunNames, self.jointMetadata))
return displayDict
def _checkPlotDicts(self):
"""
Check to make sure there are no conflicts in the plotDicts that are being used in the same subplot.
"""
# Check that the length is OK
if len(self.plotDicts) != len(self.mBundles):
raise ValueError('plotDicts (%i) must be same length as mBundles (%i)'
% (len(self.plotDicts), len(self.mBundles)))
# These are the keys that need to match (or be None)
keys2Check = ['xlim', 'ylim', 'colorMin', 'colorMax', 'title']
# Identify how many subplots there are. If there are more than one, just don't change anything.
# This assumes that if there are more than one, the plotDicts are actually all compatible.
subplots = set()
for pd in self.plotDicts:
if 'subplot' in pd:
subplots.add(pd['subplot'])
# Now check subplots are consistent.
if len(subplots) <= 1:
reset_keys = []
for key in keys2Check:
values = [pd[key] for pd in self.plotDicts if key in pd]
if len(np.unique(values)) > 1:
# We will reset some of the keys to the default, but for some we should do better.
if key.endswith('Max'):
for pd in self.plotDicts:
pd[key] = np.max(values)
elif key.endswith('Min'):
for pd in self.plotDicts:
pd[key] = np.min(values)
elif key == 'title':
title = self._buildTitle()
for pd in self.plotDicts:
pd['title'] = title
else:
warnings.warn('Found more than one value to be set for "%s" in the plotDicts.' % (key) +
' Will reset to default value. (found values %s)' % values)
reset_keys.append(key)
# Reset the most of the keys to defaults; this can generally be done safely.
for key in reset_keys:
for pd in self.plotDicts:
pd[key] = None
def plot(self, plotFunc, plotDicts=None, displayDict=None, outfileRoot=None, outfileSuffix=None):
"""
Create plot for mBundles, using plotFunc.
plotDicts: List of plotDicts if one wants to use a _new_ plotDict per MetricBundle.
"""
if not plotFunc.objectPlotter:
# Check that metricValues type and plotter are compatible (most are float/float, but
# some plotters expect object data .. and some only do sometimes).
for mB in self.mBundles:
if mB.metric.metricDtype == 'object':
metricIsColor = mB.plotDict.get('metricIsColor', False)
if not metricIsColor:
warnings.warn('Cannot plot object metric values with this plotter.')
return
# Update x/y labels using plotType.
self.setPlotDicts(plotDicts=plotDicts, plotFunc=plotFunc, reset=False)
# Set outfile name.
if outfileRoot is None:
outfile = self._buildFileRoot(outfileSuffix)
else:
outfile = outfileRoot
plotType = plotFunc.plotType
if len(self.mBundles) > 1:
plotType = 'Combo' + plotType
# Make plot.
fignum = None
for mB, plotDict in zip(self.mBundles, self.plotDicts):
if mB.metricValues is None:
# Skip this metricBundle.
msg = 'MetricBundle (%s) has no attribute "metricValues".' % (mB.fileRoot)
msg += ' Either the values have not been calculated or they have been deleted.'
warnings.warn(msg)
else:
fignum = plotFunc(mB.metricValues, mB.slicer, plotDict, fignum=fignum)
# Add a legend if more than one metricValue is being plotted or if legendloc is specified.
legendloc = None
if 'legendloc' in self.plotDicts[0]:
legendloc = self.plotDicts[0]['legendloc']
if len(self.mBundles) > 1:
try:
legendloc = self.plotDicts[0]['legendloc']
except KeyError:
legendloc = 'upper right'
if legendloc is not None:
plt.figure(fignum)
plt.legend(loc=legendloc, fancybox=True, fontsize='smaller')
# Add the super title if provided.
if 'suptitle' in self.plotDicts[0]:
plt.suptitle(self.plotDicts[0]['suptitle'])
# Save to disk and file info to resultsDb if desired.
if self.savefig:
if displayDict is None:
displayDict = self._buildDisplayDict()
self.saveFig(fignum, outfile, plotType, self.jointMetricNames, self.slicer.slicerName,
self.jointRunNames, self.constraints, self.jointMetadata, displayDict)
return fignum
def saveFig(self, fignum, outfileRoot, plotType, metricName, slicerName,
runName, constraint, metadata, displayDict=None):
fig = plt.figure(fignum)
plotFile = outfileRoot + '_' + plotType + '.' + self.figformat
if self.trimWhitespace:
fig.savefig(os.path.join(self.outDir, plotFile), dpi=self.dpi,
bbox_inches='tight', format=self.figformat)
else:
fig.savefig(os.path.join(self.outDir, plotFile), dpi=self.dpi, format=self.figformat)
# Generate a png thumbnail.
if self.thumbnail:
thumbFile = 'thumb.' + outfileRoot + '_' + plotType + '.png'
plt.savefig(os.path.join(self.outDir, thumbFile), dpi=72, bbox_inches='tight')
# Save information about the file to resultsDb.
if self.resultsDb:
if displayDict is None:
displayDict = {}
metricId = self.resultsDb.updateMetric(metricName, slicerName, runName, constraint,
metadata, None)
self.resultsDb.updateDisplay(metricId=metricId, displayDict=displayDict, overwrite=False)
self.resultsDb.updatePlot(metricId=metricId, plotType=plotType, plotFile=plotFile)
|
nilq/baby-python
|
python
|
'''
Created on Jan, 2017
@author: hugo
'''
from __future__ import absolute_import
import multiprocessing
from gensim.models import Doc2Vec
class MyDoc2Vec(object):
def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1):
super(MyDoc2Vec, self).__init__()
self.dim = dim
self.hs = hs
self.window = window
self.negative = negative
self.epoches = epoches
self.dm = dm
self.dm_concat = dm_concat
def train(self, corpus):
self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \
workers=multiprocessing.cpu_count(), hs=self.hs,\
negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat)
self.model.build_vocab(corpus())
for each in range(self.epoches):
self.model.train(corpus())
return self
def predict(model, corpus):
doc_codes = {}
for doc_words, doc_name in corpus():
doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist()
return doc_codes
def save_doc2vec(model, outfile):
model.save(outfile)
def load_doc2vec(mod_file):
return Doc2Vec.load(mod_file)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8-*-
import random
import re
import sys
sys.path.append('/home/pi/Desktop/autoh/Lights')
from serial_led import serialControl
WORDS = ["TURN", "THE", "LIGHT", "ON"]
def lightno(mic):
text=mic.activeListen()
if text=="ONE" or text=="1":
mic.say("Turning light one on")
serialControl("2000")
elif text=="TWO" or text=="2":
mic.say("Turning light two on")
serialControl("3000")
elif text=="THREE" or text=="3":
mic.say("Turning light three on")
serialControl("4000")
elif text=="FOUR" or text=="4":
mic.say("Turning light four on")
serialControl("5000")
else:
mic.say("Sorry I don't think I can do that")
lightno(mic)
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, by relaying the
meaning of life.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)"""
messages = ["WHICH ONE, SIR?",
"WHICH LIGHT DO YOU WANT ME TO TURN ON? "]
message = random.choice(messages)
mic.say(message)
lightno(mic)
def isValid(text):
return bool(re.search(r'\bturn the light on\b', text, re.IGNORECASE))
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# Copyright 2022 Sam Steele
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests, sys
from datetime import datetime, date, timedelta, time
from config import *
if not RA_API_KEY:
logging.error("RA_API_KEY not set in config.py")
sys.exit(1)
points = []
connect(RA_DATABASE)
end = datetime.utcnow().timestamp()
start = end - 604800
try:
response = requests.get('https://retroachievements.org/API/API_GetAchievementsEarnedBetween.php',
params={'z': RA_USERNAME, 'y': RA_API_KEY, 'u': RA_USERNAME, 'f': start, 't': end})
response.raise_for_status()
except requests.exceptions.HTTPError as err:
logging.error("HTTP request failed: %s", err)
sys.exit(1)
data = response.json()
logging.info("Got %s achievements from RetroAchievements", len(data))
for achievement in data:
date = datetime.strptime(achievement['Date'], "%Y-%m-%d %H:%M:%S")
points.append({
"measurement": "achievement",
"time": date.isoformat(),
"tags": {
"player_id": RA_USERNAME,
"platform": achievement['ConsoleName'],
"player_name": RA_USERNAME,
"title": achievement['GameTitle'],
"application_id": str(achievement['GameID']),
"apiname": str(achievement['AchievementID']),
},
"fields": {
"name": achievement['Title'],
"description": achievement['Description'],
"icon": f'https://retroachievements.org{achievement["BadgeURL"]}'
}
})
write_points(points)
|
nilq/baby-python
|
python
|
import copy
import enum
import logging
from pathlib import Path
import re
__version__ = "0.0.9"
__author__ = "rigodron, algoflash, GGLinnk"
__license__ = "MIT"
__status__ = "developpement"
# raised when the action replay ini file contains a bad formated entry
class InvalidIniFileEntryError(Exception): pass
# raised when trying to resolve an invalid dol file offset
class InvalidImgOffsetError(Exception): pass
# raised when trying to resolve an out of section Virtual address
class InvalidVirtualAddressError(Exception): pass
# raised when Virtual address + length Overflow out of sections
class SectionsOverflowError(Exception): pass
# raised when Virtual address + length is out of main program space memory
class OutOfMemoryError(Exception): pass
# raised when Virtual address of used section is unaligned to 32 bytes
class InvalidSectionAlignError(Exception): pass
# raised when Section offset does not match current file datas
class InvalidSectionOffsetError(Exception): pass
def align_bottom(address:int, align:int):
if address % align == 0: return address
return address - address % align
def align_top(address:int, align:int):
if address % align == 0: return address
return address + align - (address % align)
class SectionType(enum.IntFlag):
DATA = 0
TEXT = 1
BSS = 2
SYS = 3
UNMAPPED = 4
class IntervalDiv(enum.IntFlag):
LEFT = 0
IN = 1
RIGHT = 2
class MemoryObject:
__locked_address_space = None
__type = None
__name = None
__address = None
__end_address = None
__length = None
__datas = None
def __init__(self, address:int, section_type:SectionType = SectionType.UNMAPPED, name:str = None, length:int = None, end_address:int = None, locked_address_space:bool = True):
if length is None:
if end_address is None:
raise Exception("Error - length or end_address has to be specified.")
self.__end_address = end_address
self.__length = end_address - address
else:
self.__length = length
self.__end_address = address + length
if section_type == section_type.SYS or not locked_address_space:
self.__locked_address_space = False
else:
self.__locked_address_space = True
if not 0x80003100 <= address < 0x81200000 or not 0x80003100 < self.__end_address <= 0x81200000:
raise OutOfMemoryError(f"Error - Out of memory address: {address:08x}:{self.__end_address:08x}: should be in 0x80003100:0x81200000.")
self.__type = section_type
self.__name = name
self.__address = address
def __str__(self):
return f"| {str(self.name()).ljust(11)} | {self.address():08x} | {self.end_address():08x} | {self.length():08x} |"
def __sub__(interval:'MemoryObject', intervals_to_remove:list):
"""
Get non-overlapping intervals from interval by removing intervals_to_remove
input: interval = MemoryObject
input: intervals_to_remove = [ MemoryObject, ... ]
return [MemoryObject, ...] or None
* sorted by address
"""
interval = copy.deepcopy(interval)
intervals_to_remove.sort(key=lambda x: x.address())
result_memory_objects = []
for interval_to_remove in intervals_to_remove:
if interval_to_remove < interval: continue # end before
if interval_to_remove > interval: break # begin after
if interval in interval_to_remove: return result_memory_objects if result_memory_objects != [] else None # total overlap
# begin truncate
if interval_to_remove.address() <= interval.address():
interval.set_address(interval_to_remove.end_address())
continue
result_memory_objects.append(MemoryObject(interval.address(), interval.type(), interval.name(), end_address=interval_to_remove.address()))
# end truncate
if interval_to_remove.end_address() >= interval.end_address():
return result_memory_objects
# interval.address() < interval_to_remove < interval.end_address()
interval.set_address( interval_to_remove.end_address() )
continue
if interval.length() > 0:
result_memory_objects.append(interval)
return result_memory_objects if result_memory_objects != [] else None
def __lt__(a, b): return a.end_address() <= b.address()
def __le__(a, b): return b.address() < a.end_address() <= b.end_address() and a.address() < b.address()
def __ge__(a, b): return b.address() <= a.address() < b.end_address() and a.end_address() > b.end_address()
def __gt__(a, b): return a.address() >= b.end_address()
def __contains__(a, b): return b.address() >= a.address() and b.end_address() <= a.end_address()
def __and__(a, b): return a.address() < b.end_address() and a.end_address() > b.address() # Intersect
def __truediv__(a, b):
"""
Description: Split a using b by creating before_b, in_b, after_b intervals
input: a = MemoryObject or inherited class
input: b = MemoryObject or inherited class
return: {IntervalDiv: splited_copy, ... } or None
"""
if not a & b: return None
result = {}
if a.address() < b.address():
new_left = copy.deepcopy(a)
new_left.set_end_address(b.address())
new_left.set_datas( new_left.datas()[:new_left.length()] )
a.set_address(b.address())
a.set_datas( a.datas()[-a.length():] )
result[IntervalDiv.LEFT] = new_left
if a.end_address() > b.end_address():
new_right = copy.deepcopy(a)
new_right.set_address(b.end_address())
new_right.set_datas( new_right.datas()[-new_right.length():] )
a.set_end_address(b.end_address())
a.set_datas( a.datas()[:a.length()] )
result[IntervalDiv.RIGHT] = new_right
result[IntervalDiv.IN] = a
return result if len(result) > 0 else None
#__eq__(a, b)
def type(self): return self.__type
def name(self): return self.__name
def address(self): return self.__address
def end_address(self): return self.__end_address
def length(self): return self.__length
def datas(self): return self.__datas
def set_name(self, name:str): self.__name = name
def set_address(self, address:int):
if self.__locked_address_space and not 0x80003100 <= address < 0x81200000:
raise OutOfMemoryError(f"Error - Out of memory address: {address:08x} should be 0x80003100 <= address < 0x81200000.")
self.__address = address
self.__length = self.__end_address - address
def set_end_address(self, address:int):
if self.__locked_address_space and not 0x80003100 < address <= 0x81200000:
raise OutOfMemoryError(f"Error - Out of memory end_address: {address:08x} should be 0x80003100 < end_address <= 0x81200000.")
self.__end_address = address
self.__length = address - self.__address
def set_datas(self, datas:bytes):
self.__datas = datas
def set_type(self, section_type:SectionType):
self.__type = section_type
def update_datas(self, memory_object:'MemoryObject'):
if not memory_object in self:
raise Exception("Error - Invalid update adresses.")
if len(memory_object.datas()) != memory_object.length():
raise Exception("Error - length does not match the datas length.")
self.__datas = bytearray(self.__datas)
offset = memory_object.address() - self.address()
self.__datas[offset: offset + memory_object.length()] = memory_object.datas()
def to_memory_object(self): return MemoryObject(self.address(), self.type(), self.name(), length=self.length())
def align(self):
self.set_address( align_bottom(self.address(), 32) )
self.set_end_address( align_top(self.end_address(), 32) )
class Section(MemoryObject):
__index = None
__offset = None
__is_used = None
def __init__(self, index:int, offset:int, address:int, length:int, section_type:SectionType = None):
if section_type is None:
section_type = SectionType.TEXT if index < 7 else SectionType.DATA
super().__init__(address, section_type, length=length, locked_address_space=False)
self.__index = index
self.__offset = offset
if self.is_used():
# Section virtual address has to be aligned to 32 bytes.
if self.address() % 32 != 0:
raise InvalidSectionAlignError(f"Error - Section {index} is not aligned to 32 bytes.")
def index(self): return self.__index
def offset(self): return self.__offset
def set_index(self, index:int): self.__index = index
def set_offset(self, offset:int): self.__offset = offset
def is_used(self):
return (self.__offset != 0) and (self.address() != 0) and (self.length() != 0)
def format_raw(self):
section_raw_name = f"text{self.index()}".ljust(7) if self.type() == SectionType.TEXT else f"data{self.index()}".ljust(7)
return f"| {section_raw_name} | {self.offset():08x} | {self.address():08x} | {self.length():08x} | {str(self.is_used()).ljust(5)} |\n"
def resolve_img2virtual(self, offset:int):
if offset >= self.offset() and offset < self.offset() + self.length():
return self.address() + offset - self.offset()
return None
def resolve_virtual2img(self, address:int):
if address >= self.address() and address < self.end_address():
return self.offset() + address - self.address()
return None
class Bss(MemoryObject):
# list of memory objects out of sections
__splited = None
def __init__(self, address:int, length:int):
super().__init__(address, SectionType.BSS, "bss", length=length)
def format(self):
return f"bss: address:{self.address():08x} length:{self.length():08x}"
def split(self, memory_objects:list):
self.__splited = self - memory_objects
if self.__splited is not None: # If .bss is mapped
for i, splited in enumerate(self.__splited):
splited.set_name(f".bss{i}")
return self.__splited
def splited(self): return self.__splited
def get_unmapped_intervals(merged_intervals:list, memory_objects:list):
"""
Description: This function is usefull to find new sections to create for an .ini file processing
input: merged_intervals = [MemoryObject, ...]
* non overlapping, with length > 0 (There is always sections in dols)
input: memory_objects = [ActionReplayCode, ...]
* could overlap
return [MemoryObject, ...] else None
* unmapped sections intervals where we found ARCodes sorted by address
* it means that this intervals are used but are not in already existing intervals (merged_intervals)
"""
memory_objects.sort(key=lambda x:x.address())
unoverlapped_list = []
for memory_object in memory_objects:
unoverlapped = memory_object - merged_intervals
if unoverlapped is not None:
unoverlapped_list += unoverlapped
if len(unoverlapped_list) == 0:
return None
merged_intervals = copy.deepcopy(merged_intervals)
unoverlapped_list.sort(key=lambda x:x.address())
def _get_unmapped_intervals(merged_intervals:list, unoverlapped_list:list):
"""
input: merged_intervals: [MemoryObject, ...]
* contains intervals separated by empty interval
input: unoverlapped_list: [MemoryObject, ...]
* contains intervals < merged_intervals or intervals > merged_intervals
return [MemoryObject, ...]
* each of the returned memory objects describe an unmapped interval used by unoverlapped_list
"""
if len(merged_intervals) == 0:
return [MemoryObject(unoverlapped_list[0].address(), end_address=unoverlapped_list[-1].end_address())]
merged_interval = merged_intervals.pop(0)
new_unmapped = []
for i, memory_object in enumerate(unoverlapped_list):
if memory_object < merged_interval:
if new_unmapped == []:
new_unmapped = [memory_object]
continue
else:
new_unmapped[0].set_end_address(memory_object.end_address())
continue
else:
if len(unoverlapped_list[i:]) == 0: return new_unmapped
return new_unmapped + _get_unmapped_intervals(merged_intervals, unoverlapped_list[i:])
return new_unmapped
return _get_unmapped_intervals(merged_intervals, unoverlapped_list)
def get_overlapping_arcodes(action_replay_list:list):
"""
input: action_replay_list = [ActionReplayCode, ...]
return [(ActionReplayCode, ActionReplayCode), ...] else None
Get overlapping action replay code in memory. Return couples of arcodes that patch sames memory addresses.
"""
if len(action_replay_list) < 2: return None
action_replay_list.sort(key=lambda x:x.address())
# Find overlaps between ARCodes
overlaps_list = []
last_arcode = action_replay_list[0]
for action_replay_code in action_replay_list[1:]:
# Intersect
if last_arcode & action_replay_code:
overlaps_list.append( (last_arcode, action_replay_code) )
last_arcode = action_replay_code
return overlaps_list if overlaps_list != [] else None
def parse_action_replay_ini(path:Path):
"""
input: path of ini
return [ActionReplayCode, ...]
Parse an ini file. All ARCodes present in the ini will be enabled without taking care of [ActionReplay_Enabled] section.
* empty lines are removed
* lines beginning with $ are concidered as comments and are removed
* lines beginning with [ are concidered as comments and are removed
* others lines have to be in format: "0AXXXXXX XXXXXXXX" with A in [0,1,2,3,4,5] and X in [0-9a-fA-F]
"""
return [ActionReplayCode(action_replay_line, i + 1) for i, action_replay_line in enumerate(path.read_text().splitlines()) if len(action_replay_line) != 0 and action_replay_line[0] not in ["$", "["]]
class ActionReplayCode(MemoryObject):
__PATTERN = re.compile("^(0[012345][0-9a-zA-Z]{6}) ([0-9a-zA-Z]{8})$") # class variable give better perfs for regex processing
__line_number = None
__opcode = None
def __init__(self, action_replay_code:str, line_number:int):
self.__line_number = line_number
res = ActionReplayCode.__PATTERN.fullmatch(action_replay_code)
if res is None:
raise InvalidIniFileEntryError(f"Error - Arcode has to be in format: '0AXXXXXX XXXXXXXX' with A in [0,1,2,3,4,5] and X in [0-9a-fA-F] line {line_number} \"{action_replay_code}\".")
# address = (first 4 bytes & 0x01FFFFFF) | 0x80000000
address = (int(res[1], base=16) & 0x01FFFFFF) | 0x80000000
# opcode = first byte & 0xFE
self.__opcode = int(res[1][:2], base=16) & 0xFE
if self.__opcode not in [0, 2, 4]:
raise InvalidIniFileEntryError(f"Error - ARCode has to be in format: '0AXXXXXX XXXXXXXX' with A in [0,1,2,3,4,5] and X in [0-9a-fA-F] line {line_number} \"{action_replay_code}\".")
if self.__opcode == 0x04:
datas = int(res[2], 16).to_bytes(4, "big")
elif self.__opcode == 0x02:
datas = (int(res[2][:4], 16) + 1) * int(res[2][4:], 16).to_bytes(2, "big")
elif self.__opcode == 0x00:
datas = (int(res[2][:6], 16) + 1) * int(res[2][6:], 16).to_bytes(1, "big")
length = len(datas)
try:
super().__init__(address, SectionType.UNMAPPED, action_replay_code, length=length)
except OutOfMemoryError:
raise OutOfMemoryError(f"Error - Out of memory address line {line_number}: {address:08x}:{address + length} should be in 0x80003100:0x81200000.")
self.set_datas(datas)
def __str__(self):
return f"| {str(self.__line_number).rjust(8)} | {self.name()} | {self.address():08x} | {self.end_address():08x} | {self.length():08x} |"
def __eq__(a, b): return a.name() == b.name() and a.address() == b.address() and a.end_address() == b.end_address() and a.__line_number == b.__line_number and a.__opcode == b.__opcode and a.datas() == b.datas()
def __ne__(a, b): return a.name() != b.name() or a.address() != b.address() or a.end_address() != b.end_address() or a.__line_number != b.__line_number or a.__opcode != b.__opcode or a.datas() != b.datas()
def line_number(self): return self.__line_number
class Dol:
#HEADER_LEN = 0x100
__path = None
# [Section, ...] with length = 18
__sections = None
# Bss object
__bss = None
__entry_point = None
def __init__(self, path:Path):
self.__path = path
datas = path.read_bytes()
self.__bss = Bss( int.from_bytes(datas[0xd8:0xdc], "big"), int.from_bytes(datas[0xdc:0xe0], "big") )
self.__entry_point = int.from_bytes(datas[0xe0:0xe4], "big")
current_section = 0
sections = []
for i in range(18):
section = Section(
i, # index
int.from_bytes(datas[i*4:i*4+4], "big"), # offset
int.from_bytes(datas[0x48+i*4:0x48+i*4+4], "big"), # address
int.from_bytes(datas[0x90+i*4:0x90+i*4+4], "big")) # length
if section.is_used():
if i == 7: current_section = 0
section.set_datas(datas[section.offset():section.offset()+section.length()])
section.set_name( f".text{current_section}" if i < 7 else f".data{current_section}" )
current_section += 1
sections.append(section)
# Make a tuple to lock from sorting
self.__sections = tuple(sections)
def __str__(self):
'Print a table with each sections from 0 to 17.'
str_buffer = f"Entry point: {self.__entry_point:08x}\n\n|"
str_buffer += "-"*50 + "|\n| Section | Offset | Address | Length | Used |\n|" + "-"*9 + ("|"+"-"*10)*3 + "|" + "-"*7 + "|\n"
for section in self.__sections:
str_buffer += section.format_raw()
return str_buffer + "|"+"-"*50+f"|\n\n{self.__bss.format()}"
def __get_used_sections(self): return [section for section in self.__sections if section.is_used()]
def __get_merged_mapped_memory(self):
"""
Get sorted intervals where there is datas or text.
return [MemoryObject, ...]
* Merged and sorted
private [Section, ...]
* Don't overlap, section >= 1
"""
memory_objects = [section.to_memory_object() for section in self.__get_used_sections()]
memory_objects.sort(key=lambda x:x.address())
merged_intervals = [memory_objects[0]]
for memory_object in memory_objects[1:]:
if merged_intervals[-1].end_address() == memory_object.address():
merged_intervals[-1].set_end_address( memory_object.end_address() )
else:
merged_intervals.append(memory_object)
return merged_intervals
def resolve_img2virtual(self, offset:int):
"""
input: dol_absolute_offset
return virtual_memory_address
"""
memory_address = None
for section in self.__sections:
if section.is_used():
virtual_address = section.resolve_img2virtual(offset)
if virtual_address is not None:
return virtual_address
raise InvalidImgOffsetError(f"Error - Invalid dol image offset: {offset:08x}")
def resolve_virtual2img(self, address:int):
"""
input: virtual_memory_address
return dol_absolute_offset
"""
for section in self.__sections:
if section.is_used():
offset = section.resolve_virtual2img(address)
if offset is not None:
return offset
raise InvalidVirtualAddressError(f"Error - Not found in dol initial sections: {address:08x}")
def stats(self):
# https://www.gc-forever.com/yagcd/chap4.html#sec4
# system: 0x80000000 -> 0x80003100
# available: 0x80003100 -> 0x81200000
# apploader: 0x81200000 -> 0x81300000
# Bootrom/IPL: 0x81300000 -> 0x81800000
# Now we have to generate a memory map with splited bss and empty spaces
# [ [section_name, beg_addr, end_addr, length], ... ]
memory_objects = [
MemoryObject(0x80000000, SectionType.SYS, "System", length=0x3100),
MemoryObject(0x81200000, SectionType.SYS, "Apploader", length=0x100000),
MemoryObject(0x81300000, SectionType.SYS, "Bootrom/IPL", length=0x500000)] + self.__get_used_sections()
splited = self.__bss.split(memory_objects)
if splited is not None:
memory_objects += splited
# We search now unmapped program space
memory_objects += MemoryObject(0x80003100, SectionType.UNMAPPED, "Empty", end_address=0x81200000) - memory_objects
memory_objects.sort(key=lambda x: x.address())
str_buffer = "\n|"+"-"*46+"|\n| Section | beg_addr | end_addr | length |\n|" + "-"*13 + ("|"+"-"*10)*3 + "|\n"
for memory_object in memory_objects:
str_buffer += str(memory_object)+"\n"
print(f"{self}{str_buffer}|"+"-"*46+"|")
def extract(self, filename:str, section_index:int, output_path:Path):
if section_index > 17:
raise Exception("Error - Section index has to be in 0 - 17")
output_path.write_bytes(self.__sections[section_index].datas())
def analyse_action_replay(self, action_replay_list:list):
merged_intervals = self.__get_merged_mapped_memory()
overlaps_list = get_overlapping_arcodes(action_replay_list)
# Get unmapped groups splited by sections intervals:
# each group contains intervals to patch grouped by data sections to add
unmapped_memory_objects = get_unmapped_intervals(merged_intervals, action_replay_list)
if overlaps_list is not None:
str_buffer = "Found overlapping ARCodes:\n"
str_buffer += "|"+"-"*127+"|\n| Line | ActionReplayCode1 | beg_addr | end_addr | length | Line | ActionReplayCode2 | beg_addr | end_addr | length |\n|" + ("-"*10 + "|" + "-"*19 + ("|"+"-"*10)*3 + "|")*2 + "\n"
for [arcode0, arcode1] in overlaps_list:
str_buffer += str(arcode0)[-1] + str(arcode1) + "\n"
print(str_buffer+"|"+"-"*127+"|")
else:
print(f"No overlapping ARCodes found.")
if unmapped_memory_objects is not None:
str_buffer = "\nUnmapped virtual addresses intervals used by ARCodes:\n"+"|"+"-"*32+"|\n| beg_addr | end_addr | length |\n"+("|"+"-"*10)*3 +"|\n"
for unmapped_memory_object in unmapped_memory_objects:
unmapped_memory_object.align()
str_buffer += f"| {unmapped_memory_object.address():08x} | {unmapped_memory_object.end_address():08x} | {unmapped_memory_object.length():08x} |\n"
print(str_buffer+"|"+"-"*32+"|")
print("Use -par file.dol -ini arcodes.ini -o output.dol -sr to remap sections and allow complete processing of the ARCodes in this ini file. Else the patching process will be interupted for out of dol ARCodes.")
else:
print(f"No out of sections ARCodes found.\n")
def patch_memory_objects(self, output_path:Path, memory_objects:list):
"""
input: [MemoryObject, ... ]
return True
raise SectionsOverflowError if part of the bytecode is out of the existing sections
raise InvalidVirtualAddressError if the base virtual address is out of the existing sections
"""
sections = self.__get_used_sections()
sections.sort(key=lambda x: x.address())
def split_and_patch(sections:list, memory_object:MemoryObject):
"""
When patching a section we could overflow on the next section or in the previous.
input: ActionReplayCode
return True
raise SectionsOverflowError if part of the bytecode is out of the existing sections
raise InvalidVirtualAddressError if the base virtual address is out of the existing sections
"""
for section in sections:
try:
# Intersection
if not memory_object & section: continue
# Split left_interval, in, right_interval
splited = memory_object / section
if IntervalDiv.LEFT in splited:
split_and_patch(sections, splited[IntervalDiv.LEFT])
logging.debug(f"----> offset:{section.offset() + splited[IntervalDiv.IN].address() - section.address():08x} val:{splited[IntervalDiv.IN].datas().hex()}")
section.update_datas( splited[IntervalDiv.IN] )
if IntervalDiv.RIGHT in splited:
split_and_patch(sections, splited[IntervalDiv.RIGHT])
return True
except InvalidVirtualAddressError:
raise SectionsOverflowError(f"Error - Value Overflow in an inexistant dol initial section: {memory_object.address():08x}:{memory_object.datas().hex()}")
raise InvalidVirtualAddressError(f"Error - Not found in dol initial sections: {memory_object.address():08x}:{memory_object.end_address():08x}")
for memory_object in memory_objects:
logging.debug(f"Processing {memory_object.name()} address:{memory_object.address():08x}")
split_and_patch(sections, memory_object)
self.__save(output_path)
def remap_sections(self, action_replay_list:list):
merged_intervals = self.__get_merged_mapped_memory()
unmapped_memory_objects = get_unmapped_intervals(merged_intervals, action_replay_list)
if unmapped_memory_objects is None:
return True
text_sections = []
data_sections = []
for section in self.__sections:
if section.is_used():
section.set_offset(0)
section.set_index(None)
if section.type() == SectionType.TEXT:
text_sections.append(section)
else:
data_sections.append(section)
self.__sections = None
if len(unmapped_memory_objects) + len(data_sections) > 11:
raise Exception("Error - Not enought empty data sections available for remapping.")
for unmapped_memory_object in unmapped_memory_objects:
unmapped_memory_object.align()
new_section = Section(None, 0, unmapped_memory_object.address(), unmapped_memory_object.length(), section_type=SectionType.UNMAPPED)
new_section.set_datas( bytearray(b"\x00" * new_section.length()) )
data_sections.append( new_section )
text_sections.sort(key=lambda x: x.address())
data_sections.sort(key=lambda x: x.address())
sections = []
current_offset = 0x100
i = 0
for text_section in text_sections:
sections.append( text_section )
text_section.set_index(i)
text_section.set_offset(current_offset)
text_section.set_type(SectionType.TEXT)
current_offset += text_section.length()
i += 1
while i < 7:
sections.append( Section(i, 0, 0, 0) )
i += 1
for data_section in data_sections:
sections.append( data_section )
data_section.set_index(i)
data_section.set_offset(current_offset)
data_section.set_type(SectionType.DATA)
current_offset += data_section.length()
i += 1
while i < 18:
sections.append( Section(i, 0, 0, 0) )
i += 1
self.__sections = tuple(sections)
def __save(self, output_path:Path):
offsets = b""
addresses = b""
lengths = b""
for section in self.__sections:
offsets += section.offset().to_bytes(4, "big")
addresses += section.address().to_bytes(4, "big")
lengths += section.length().to_bytes(4, "big")
datas = offsets + addresses + lengths +\
self.__bss.address().to_bytes(4, "big") + self.__bss.length().to_bytes(4, "big") +\
self.__entry_point.to_bytes(4, "big")
datas = datas.ljust(0x100, b"\x00")
for section in sorted(self.__sections, key=lambda x: x.offset()):
if section.is_used():
if len(datas) != section.offset():
raise InvalidSectionOffsetError(f"Error - Section {section.index()} has an offset that does'nt match the previous datas length.")
if len(section.datas()) != section.length():
raise Exception(f"Error - Invalid datas length.")
datas += section.datas()
output_path.write_bytes(datas)
def get_argparser():
import argparse
parser = argparse.ArgumentParser(description='dol file format utilities - [GameCube] v' + __version__)
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')
parser.add_argument('input_path', metavar='INPUT', help='')
parser.add_argument('-o', '--output-path', type=str, help='-o path: output path.', default=None)
parser.add_argument('-ini', '--ini-path', type=str, help='-ini path: ini path.', default=None)
parser.add_argument('-sr', '--sections-remap', action='store_true', help="-sr: remap the data sections of the dol to allow full ARCodes ini"
" file processing.", default=None)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-v2i', '--virtual2image', type=str, help="-v2i source.dol virtual_address: Translate a virtual address into "
"a dol offset if this was originaly mapped from data or text. virtual_address has to be in hexadecimal: 80003100.")
group.add_argument('-i2v', '--image2virtual', type=str, help="-i2v source.dol dol_offset: Translate a dol offset to a virtual ad"
"dress mapped from data or text. dol_offset has to be in hexadecimal: 2000.")
group.add_argument('-s', '--stats', action='store_true', help="-s source.dol: Get stats about entry point, sections, bss and unu"
"sed virtual address space.")
group.add_argument('-e', '--extract', type=int, help="-e source.dol section_index [-o output_path]: Extract a section. index mus"
"t be between 0 and 17")
group.add_argument('-aar', '--analyse-action-replay', action='store_true', help="-aar source.dol action_replay.ini: Analyse an i"
"ni file containing a list of [write] directives to show unmapped sections to add for processing all ARCodes including thoos"
"e who are in inexistant sections. Handle only ARCodes beginning with [00, 01, 02, 03, 04, 05].")
group.add_argument('-par', '--patch-action-replay', action='store_true', help="-par source.dol -ini action_replay.ini [-o output"
"_path] [-sr]: Patch initialised data inside the dol with an ini file containing a list of [write] directives. Handle only A"
"RCodes beginning with [00, 01, 02, 03, 04, 05]. If -sr is specified then add or update .data sections to allow full ini proc"
"essing.")
return parser
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
args = get_argparser().parse_args()
p_input = Path(args.input_path)
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if not p_input.is_file():
raise Exception("Error - Invalid dol file path.")
dol = Dol(p_input)
if args.virtual2image:
virtual_address = int(args.virtual2image, 16)
try:
offset = dol.resolve_virtual2img(virtual_address)
print(f"Virtual address {virtual_address:08x} is at dol offset {offset:08x}")
except InvalidVirtualAddressError:
print("This virtual address is not in the dol.")
elif args.image2virtual:
offset = int(args.image2virtual, 16)
try:
virtual_address = dol.resolve_img2virtual(offset)
print(f"Dol offset {offset:08x} is at virtual address {virtual_address:08x}")
except InvalidImgOffsetError:
print("This dol offset is invalid.")
elif args.stats:
dol.stats()
elif args.extract:
logging.info("### Extract section")
index = args.extract
section_type = "text" if index < 7 else "data"
output_path = Path(args.output_path) if args.output_path is not None else Path(f"{p_input.name}_{section_type}{index}")
logging.info(f"Extracting section {index} in file {output_path}...")
dol.extract(p_input.name, index, output_path)
elif args.analyse_action_replay:
logging.info("### Analyse Action Replay ini file")
if args.ini_path is None:
raise Exception("Error - Action Replay ini file has to be specified.")
action_replay_ini_path = Path(args.ini_path)
if not action_replay_ini_path.is_file():
raise Exception("Error - Invalid action replay ini file path.")
dol.analyse_action_replay(parse_action_replay_ini(action_replay_ini_path))
elif args.patch_action_replay:
logging.info("### Patch dol using Action Replay ini file")
if args.ini_path is None:
raise Exception("Error - Action Replay ini file has to be specified.")
action_replay_ini_path = Path(args.ini_path)
if not action_replay_ini_path.is_file():
raise Exception("Error - Invalid action replay ini file path.")
if not args.output_path:
raise Exception("Error - Output path has to be specified.")
output_path = Path(args.output_path)
if output_path.is_file():
raise Exception(f"Error - Please remove {output_path}.")
logging.info(f"Patching dol {p_input} in {output_path} using {action_replay_ini_path} ini file...")
action_replay_list = parse_action_replay_ini(action_replay_ini_path)
if args.sections_remap != None:
logging.info(f"Sections remapping using action replay ini file...")
dol.remap_sections(action_replay_list)
dol.patch_memory_objects(output_path, action_replay_list)
|
nilq/baby-python
|
python
|
# unittest for cal.py
import unittest
import cal
class TestCal(unittest.TestCase):
def test_add(self):
result = cal.add(10,5)
self.assertEqual(result, 15)
if __name__ == '__main__':
unittest.main()
# to avoid using this if statement below; run python -m unittest test_cal.py
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEcapiprodDrawndnContractGetResponse(AlipayResponse):
def __init__(self):
super(AlipayEcapiprodDrawndnContractGetResponse, self).__init__()
self._contract_content = None
self._contract_no = None
self._request_id = None
@property
def contract_content(self):
return self._contract_content
@contract_content.setter
def contract_content(self, value):
self._contract_content = value
@property
def contract_no(self):
return self._contract_no
@contract_no.setter
def contract_no(self, value):
self._contract_no = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
def parse_response_content(self, response_content):
response = super(AlipayEcapiprodDrawndnContractGetResponse, self).parse_response_content(response_content)
if 'contract_content' in response:
self.contract_content = response['contract_content']
if 'contract_no' in response:
self.contract_no = response['contract_no']
if 'request_id' in response:
self.request_id = response['request_id']
|
nilq/baby-python
|
python
|
"""
# Data Structures and Algorithms - Part B
# Created by Reece Benson (16021424)
"""
from tennis import Round
from tennis.Colours import Colours
class Tournament():
# Variables
name = None
game = None
parent = None
json_data = None
rounds = None
gender = None
difficulty = None
prize_money = None
complete = None
def __init__(self, _game, _name, _parent, _json_data):
self.name = _name
self.game = _game
self.parent = _parent
self.json_data = _json_data
self.rounds = { }
self.difficulty = _json_data['_difficulty']
self.prize_money = _json_data['prize_money']
self.complete = False
# Read in Round Data
for round_number in _json_data["rounds"]:
round_data = _json_data["rounds"][round_number]
# Load our Round in (if it is new)
if(round_number not in self.rounds):
# Create our Tournament Object
self.rounds.update({ round_number: Round.Round(self.game, round_number, self, round_data) })
if(_game.debug):
print("[TOURNAMENT]: Tournament '{}' made!".format(_name))
def get_name(self):
return self.name
def get_gender(self):
return self.gender
def get_rounds(self):
return [ self.rounds[r] for r in self.rounds ]
def get_round(self, round_id):
return self.rounds["round_{0}".format(round_id)]
def get_difficulty(self):
return self.difficulty
def get_prize_money(self):
return self.prize_money
def is_complete(self):
return self.complete
def set_complete(self, state):
# Set this tournament as complete
self.complete = state
# Check if other tournaments are complete
all_complete = True
for t in self.parent.get_tournaments():
if(not t.is_complete()):
all_complete = False
if(all_complete):
# Open up the next season
print("\n\nAll tournaments are now " + Colours.OKGREEN + "complete" + Colours.ENDC + "! Start opening season {}".format(self.parent.get_id() + 1))
input(">>> Press <Return> to continue...")
# Create New Season
self.game.add_season(self.parent.get_id() + 1)
|
nilq/baby-python
|
python
|
#-*- encoding: utf-8 -*-
"""
Ordered fractions
Consider the fraction, n/d, where n and d are positive integers. If n<d and HCF(n,d)=1, it is called a reduced proper fraction.
If we list the set of reduced proper fractions for d ≤ 8 in ascending order of size, we get:
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8
It can be seen that 2/5 is the fraction immediately to the left of 3/7.
By listing the set of reduced proper fractions for d ≤ 1,000,000 in ascending order of size, find the numerator of the fraction immediately to the left of 3/7.
"""
from utils import *
print min(((d * 3 / 7, d) for d in range(1, 10 ** 6) if d % 7 != 0), key=lambda (n, d): 3. / 7 - n * 1. / d)[0]
# 428570
|
nilq/baby-python
|
python
|
from enum import Enum
ISO8601_FMT = "%Y-%m-%dT%H:%M:%S.%fZ"
DATE_FORMAT = "%Y-%m-%d"
ALL_CASES_QUEUE_ID = "00000000-0000-0000-0000-000000000001"
UPDATED_CASES_QUEUE_ID = "00000000-0000-0000-0000-000000000004"
ENFORCEMENT_XML_MAX_FILE_SIZE = 1000000 # 1 MB
class GoodSystemFlags:
CLC_FLAG = "00000000-0000-0000-0000-000000000002"
PV_GRADING_FLAG = "00000000-0000-0000-0000-000000000003"
# URLS
ORGANISATIONS_URL = "/organisations/"
ORGANISATION_STATUS_URL = "/status/"
CASE_URL = "/cases/"
GOOD_URL = "/goods/"
GOODS_TYPE_URL = "/goods-types/"
APPLICATIONS_URL = "/applications/"
CASE_NOTES_URL = "/case-notes/"
DOCUMENTS_URL = "/documents/"
USER_ADVICE_URL = "/user-advice/"
TEAM_ADVICE_URL = "/team-advice/"
VIEW_TEAM_ADVICE_URL = "/view-team-advice/"
FINAL_ADVICE_URL = "/final-advice/"
VIEW_FINAL_ADVICE_URL = "/view-final-advice/"
ACTIVITY_URL = "/activity/"
ORGANISATION_SITES_ACTIVITY_URL = "/sites-activity/"
ACTIVITY_FILTERS_URL = "/activity/filters/"
ECJU_QUERIES_URL = "/ecju-queries/"
END_USER_ADVISORY_URL = "/queries/end-user-advisories/"
CASE_DENIAL_REASONS_URL = "/denial-reasons/"
SITES_URL = "/sites/"
USERS_URL = "/users/"
TEAMS_URL = "/teams/"
LICENCES_URL = "/licences/"
QUEUES_URL = "/queues/"
AUTHENTICATION_URL = "/gov-users/authenticate/"
GOV_USERS_URL = "/gov-users/"
GOV_USERS_ROLES_URL = "/gov-users/roles/"
GOV_USERS_PERMISSIONS_URL = "/gov-users/permissions/"
NOTIFICATIONS_URL = "/gov-users/notifications/"
FLAGS_URL = "/flags/"
OPEN_GENERAL_LICENCES_URL = "/open-general-licences/"
ASSIGN_FLAGS_URL = FLAGS_URL + "assign/"
FLAGGING_RULES = FLAGS_URL + "rules/"
FLAGS_CASE_LEVEL_FOR_TEAM = "/flags/?level=Case&team=True"
FLAGS_GOOD_LEVEL_FOR_TEAM = "/flags/?level=Good&team=True"
FLAGS_ORGANISATION_LEVEL_FOR_TEAM = "/flags/?level=Organisation&team=True"
GOODS_QUERIES_URL = "/queries/goods-queries/"
CLC_RESPONSE_URL = "/clc-response/"
PV_GRADING_RESPONSE_URL = "/pv-grading-response/"
PICKLIST_URL = "/picklist/"
LETTER_TEMPLATES_URL = "/letter-templates/"
GOOD_CLC_REVIEW_URL = "/goods/control-list-entries/"
MANAGE_STATUS_URL = "/status/"
FINAL_DECISION_URL = "/final-decision/"
DURATION_URL = "/duration/"
GENERATED_DOCUMENTS_URL = "/generated-documents/"
GENERATED_DOCUMENTS_PREVIEW_URL = GENERATED_DOCUMENTS_URL + "preview/"
PREVIEW_URL = "/preview/"
GENERATE_PREVIEW_URL = "generate-preview/"
DESTINATION_URL = CASE_URL + "destinations/"
CASE_OFFICER_URL = "/case-officer/"
NEXT_REVIEW_DATE_URL = "/review-date/"
FINALISE_CASE_URL = "/finalise/"
ROUTING_RULES_URL = "/routing-rules/"
ROUTING_RULES_STATUS_URL = "/status/"
ENFORCEMENT_URL = CASE_URL + "enforcement-check/"
APPLICANT_URL = "/applicant/"
COMPLIANCE_URL = "/compliance/"
COMPLIANCE_SITE_URL = "site/"
COMPLIANCE_VISIT_URL = "visit/"
COMPLIANCE_LICENCES_URL = "/licences/"
COMPLIANCE_PEOPLE_PRESENT_URL = "people-present/"
OPEN_LICENCE_RETURNS_URL = "/compliance/open-licence-returns/"
# Static URLs
STATIC_URL = "/static/"
CASE_TYPES_URL = STATIC_URL + "case-types/"
DENIAL_REASONS_URL = STATIC_URL + "denial-reasons/"
COUNTRIES_URL = STATIC_URL + "countries/"
STATUSES_URL = STATIC_URL + "statuses/"
STATUS_PROPERTIES_URL = STATUSES_URL + "properties/"
CONTROL_LIST_ENTRIES_URL = STATIC_URL + "control-list-entries/"
GOV_PV_GRADINGS_URL = STATIC_URL + "private-venture-gradings/gov/"
PV_GRADINGS_URL = STATIC_URL + "private-venture-gradings/"
LETTER_LAYOUTS_URL = STATIC_URL + "letter-layouts/"
DECISIONS_URL = STATIC_URL + "decisions/"
# Permissions
MAKE_FINAL_DECISIONS = "MAKE_FINAL_DECISIONS"
DECISIONS_LIST = ["approve", "refuse", "no_licence_required"]
# Role IDs
SUPER_USER_ROLE_ID = "00000000-0000-0000-0000-000000000002"
# Document types
GENERATED_DOCUMENT = "GENERATED"
# Case types
APPLICATION_CASE_TYPES = ["open", "standard", "hmrc"]
CLEARANCE_CASE_TYPES = ["exhibition_clearance", "gifting_clearance", "f680_clearance"]
class AdviceType:
CONFLICTING = "conflicting"
class Permission(Enum):
MANAGE_TEAM_ADVICE = "MANAGE_TEAM_ADVICE"
MANAGE_TEAM_CONFIRM_OWN_ADVICE = "MANAGE_TEAM_CONFIRM_OWN_ADVICE"
MANAGE_LICENCE_FINAL_ADVICE = "MANAGE_LICENCE_FINAL_ADVICE"
MANAGE_CLEARANCE_FINAL_ADVICE = "MANAGE_CLEARANCE_FINAL_ADVICE"
ADMINISTER_ROLES = "ADMINISTER_ROLES"
REVIEW_GOODS = "REVIEW_GOODS"
CONFIGURE_TEMPLATES = "CONFIGURE_TEMPLATES"
MANAGE_LICENCE_DURATION = "MANAGE_LICENCE_DURATION"
RESPOND_PV_GRADING = "RESPOND_PV_GRADING"
MANAGE_ORGANISATIONS = "MANAGE_ORGANISATIONS"
REOPEN_CLOSED_CASES = "REOPEN_CLOSED_CASES"
MANAGE_FLAGGING_RULES = "MANAGE_FLAGGING_RULES"
MANAGE_TEAM_ROUTING_RULES = "MANAGE_TEAM_ROUTING_RULES"
MANAGE_ALL_ROUTING_RULES = "MANAGE_ALL_ROUTING_RULES"
ACTIVATE_FLAGS = "ACTIVATE_FLAGS"
MANAGE_PICKLISTS = "MANAGE_PICKLISTS"
ENFORCEMENT_CHECK = "ENFORCEMENT_CHECK"
MAINTAIN_FOOTNOTES = "MAINTAIN_FOOTNOTES"
MAINTAIN_OGL = "MAINTAIN_OGL"
class FlagLevels:
CASES = "cases"
GOODS = "goods"
ORGANISATIONS = "organisations"
DESTINATIONS = "destinations"
class UserStatuses:
ACTIVE = "Active"
DEACTIVATED = "Deactivated"
class SystemTeamsID(Enum):
ADMIN = "00000000-0000-0000-0000-000000000001"
class CaseType:
EXHIBITION = "exhibition_clearance"
F680 = "f680_clearance"
HMRC = "hmrc"
class GoodsTypeCategory:
MILITARY = "military"
CRYPTOGRAPHIC = "cryptographic"
MEDIA = "media"
UK_CONTINENTAL_SHELF = "uk_continental_shelf"
DEALER = "dealer"
|
nilq/baby-python
|
python
|
import pytest
import case_conversion.utils as utils
from case_conversion import Case, InvalidAcronymError
@pytest.mark.parametrize(
"string,expected",
(
("fooBarString", (["foo", "Bar", "String"], "", False)),
("FooBarString", (["Foo", "Bar", "String"], "", False)),
("foo_bar_string", (["foo", None, "bar", None, "string"], "_", False)),
("foo-bar-string", (["foo", None, "bar", None, "string"], "-", False)),
("FOO_BAR_STRING", (["foo", None, "bar", None, "string"], "_", True)),
("foo.bar.string", (["foo", None, "bar", None, "string"], ".", False)),
("foo bar string", (["foo", None, "bar", None, "string"], " ", False)),
("foo/bar/string", (["foo", None, "bar", None, "string"], "/", False)),
("foo\\bar\\string", (["foo", None, "bar", None, "string"], "\\", False)),
("foobarstring", (["foobarstring"], "", False)),
("FOOBARSTRING", (["foobarstring"], "", True)),
),
)
def test_segment_string(string, expected):
assert utils.segment_string(string) == expected
@pytest.mark.parametrize(
"acronyms,expected",
(
(("http",), ["HTTP"]),
(("HTTP",), ["HTTP"],),
(("Http",), ["HTTP"],),
(("httP",), ["HTTP"],),
(("http", "Nasa"), ["HTTP", "NASA"]),
),
)
def test_sanitize_acronyms(acronyms, expected):
assert utils.sanitize_acronyms(acronyms) == expected
@pytest.mark.parametrize(
"s,i,words,expected",
(
# TODO: Add more cases
(0, 1, ["FOO", "bar"], 0),
(1, 2, ["foo", "BAR", "baz"], 1),
),
)
def test_simple_acronym_detection(s, i, words, expected):
assert utils.simple_acronym_detection(s, i, words) == expected
@pytest.mark.parametrize(
"s,i,words,acronyms,expected",
(
# TODO: Add more cases
(0, 1, ["FOO", "bar"], ("FOO",), 0),
(0, 1, ["FOO", "bar"], ("BAR",), 2),
),
)
def test_advanced_acronym_detection(s, i, words, acronyms, expected):
assert utils.advanced_acronym_detection(s, i, words, acronyms) == expected
@pytest.mark.parametrize("acronyms", ("HT-TP", "NA SA", "SU.GAR"))
def test_sanitize_acronyms_raises_on_invalid_acronyms(acronyms):
with pytest.raises(InvalidAcronymError):
utils.sanitize_acronyms(acronyms)
@pytest.mark.parametrize(
"words,acronyms,expected",
(
(["foobar"], (), ["Foobar"]),
(["fooBar"], (), ["Foobar"]),
(["FooBar"], (), ["Foobar"]),
(["Foo", "Bar"], ("BAR"), ["Foo", "BAR"]),
),
)
def test_normalize_words(words, acronyms, expected):
assert utils.normalize_words(words, acronyms) == expected
@pytest.mark.parametrize(
"was_upper,words,string,expected",
(
(False, [], "", Case.UNKOWN),
(True, [], "", Case.UPPER),
(False, [], "foobar", Case.LOWER),
(False, ["foo", "Bar"], "", Case.CAMEL),
(False, ["Foo", "Bar"], "", Case.PASCAL),
(False, ["foo", "bar"], "", Case.MIXED),
),
)
def test_determine_case(was_upper, words, string, expected):
assert utils.determine_case(was_upper, words, string) == expected
|
nilq/baby-python
|
python
|
from weibo import APIClient
import json
APP_KEY = "3722673574"
APP_SECRET = "3686fea0a65da883b6c2a7586f350425"
CALLBACK_URL = 'https://api.weibo.com/oauth2/default.html'
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL)
with open('token.json', 'r') as f:
r = json.load(f)
access_token = r["access_token"]
expires_in = r["expires_at"]
client.set_access_token(access_token, expires_in)
raw_data = client.get('statuses/public_timeline', count=200)
for x in range(200):
print(str(raw_data['statuses'][x]['text']))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -+- coding: utf-8 -*-
import re
import json
import hashlib
from os import path, makedirs, SEEK_CUR
from harvester import libDataBs
def getOrCreatePath(archive_base_path):
if not path.exists(archive_base_path):
makedirs(archive_base_path)
def setUpDir(site, archive_base_path):
"""Prepare directory and json path for download."""
archive_json = path.join(archive_base_path, "archive.json")
final_dir = path.join(archive_base_path, site)
getOrCreatePath(final_dir)
return final_dir, archive_json
def appendToJson(data, file):
"""Append data to the end of json list without parsing it."""
with open(file, "ab+") as fj:
data_string = "{}]".format(json.dumps(data))
if fj.tell() > 0:
fj.seek(-1, SEEK_CUR) # remove closing bracket of the json list
fj.truncate()
data_string = ", {}".format(data_string)
else:
data_string = "[{}".format(data_string)
b = bytearray()
b.extend(map(ord, data_string))
fj.write(b)
def save(data, timestamp, path_):
"""Save given data into specified environment."""
# prepare directory
final_dir, archive_json = setUpDir(data['site'], path_)
# prepare filename and location
data['md5'] = hashlib.md5(data['content']).hexdigest()
data['timestamp'] = timestamp
filename = str(timestamp) + "_" + data['orig_filename']
filename += ".%s" % data['ext'] if data['ext'] else ""
file_location = path.join(final_dir, filename)
data['location'] = file_location
# check if we already downloaded the file
with libDataBs.DataBs(path_) as db:
print(db.gibData(data['md5']))
if not db.checkHashExistence(data['md5']):
# save the file
with open(file_location, 'wb') as f:
f.write(data['content'])
db.insertData(
{'hash': data['md5'], 'filename': filename, 'count': 1})
else:
# just update the count
db.upCount(data['md5'])
del data['content']
print(data)
# save information about data in json file
appendToJson(data, archive_json)
def urlReg(msg):
"""Try to match an url."""
m = re.match('^.*(https?://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?)', msg)
if m:
return m.group(1)
return
|
nilq/baby-python
|
python
|
from collections import OrderedDict
import itertools
import json
from scipy.sparse import coo_matrix, block_diag
import autograd.numpy as np
from .base_patterns import Pattern
####################
# JSON helpers.
# A dictionary of registered types for loading to and from JSON.
# This allows PatternDict and PatternArray read JSON containing arbitrary
# pattern types without executing user code.
__json_patterns = dict()
def register_pattern_json(pattern, allow_overwrite=False):
"""
Register a pattern for automatic conversion from JSON.
Parameters
------------
pattern: A Pattern class
The pattern to register.
allow_overwrite: Boolean
If true, allow overwriting already-registered patterns.
Examples
-------------
>>> class MyCustomPattern(paragami.Pattern):
>>> ... definitions ...
>>>
>>> paragami.register_pattern_json(paragmi.MyCustomPattern)
>>>
>>> my_pattern = MyCustomPattern(...)
>>> my_pattern_json = my_pattern.to_json()
>>>
>>> # ``my_pattern_from_json`` should be identical to ``my_pattern``.
>>> my_pattern_from_json = paragami.get_pattern_from_json(my_pattern_json)
"""
pattern_name = pattern.json_typename()
if (not allow_overwrite) and pattern_name in __json_patterns.keys():
raise ValueError(
'A pattern named {} is already registered for JSON.'.format(
pattern_name))
__json_patterns[pattern_name] = pattern
def get_pattern_from_json(pattern_json):
"""
Return the appropriate pattern from ``pattern_json``.
The pattern must have been registered using ``register_pattern_json``.
Parameters
--------------
pattern_json: String
A JSON string as created with a pattern's ``to_json`` method.
Returns
-----------
The pattern instance encoded in the ``pattern_json`` string.
"""
pattern_json_dict = json.loads(pattern_json)
try:
json_pattern_name = pattern_json_dict['pattern']
except KeyError as orig_err_string:
err_string = \
'A pattern JSON string must have an entry called pattern ' + \
'which is registered using ``register_pattern_json``.'
raise KeyError(err_string)
if not json_pattern_name in __json_patterns.keys():
err_string = (
'Before converting from JSON, the pattern {} must be ' +
'registered with ``register_pattern_json``.'.format(
json_pattern_name))
raise KeyError(err_string)
return __json_patterns[json_pattern_name].from_json(pattern_json)
def save_folded(file, folded_val, pattern, **argk):
"""
Save a folded value to a file with its pattern.
Flatten a folded value and save it with its pattern to a file using
``numpy.savez``. Additional keyword arguments will also be saved to the
file.
Parameters
---------------
file: String or file
Follows the conventions of ``numpy.savez``. Note that the ``npz``
extension will be added if it is not present.
folded_val:
The folded value of a parameter.
pattern:
A ``paragami`` pattern for the folded value.
"""
flat_val = pattern.flatten(folded_val, free=False)
pattern_json = pattern.to_json()
np.savez(file, flat_val=flat_val, pattern_json=pattern_json, **argk)
def load_folded(file):
"""
Load a folded value and its pattern from a file together with any
additional data.
Note that ``pattern`` must be registered with ``register_pattern_json``
to use ``load_folded``.
Parameters
---------------
file: String or file
A file or filename of data saved with ``save_folded``.
Returns
-----------
folded_val:
The folded value of the saved parameter.
pattern:
The ``paragami`` pattern of the saved parameter.
data:
The data as returned from ``np.load``. Additional saved values will
exist as keys of ``data``.
"""
data = np.load(file)
pattern = get_pattern_from_json(str(data['pattern_json']))
folded_val = pattern.fold(data['flat_val'], free=False)
return folded_val, pattern, data
##########################
# Dictionary of patterns.
class PatternDict(Pattern):
"""
A dictionary of patterns (which is itself a pattern).
Methods
------------
lock:
Prevent additional patterns from being added or removed.
Examples
------------
.. code-block:: python
import paragami
# Add some patterns.
dict_pattern = paragami.PatternDict()
dict_pattern['vec'] = paragami.NumericArrayPattern(shape=(2, ))
dict_pattern['mat'] = paragami.PSDSymmetricMatrixPattern(size=3)
# Dictionaries can also contain dictionaries (but they have to
# be populated /before/ being added to the parent).
sub_dict_pattern = paragami.PatternDict()
sub_dict_pattern['vec1'] = paragami.NumericArrayPattern(shape=(2, ))
sub_dict_pattern['vec2'] = paragami.NumericArrayPattern(shape=(2, ))
dict_pattern['sub_dict'] = sub_dict_pattern
# We're done adding patterns, so lock the dictionary.
dict_pattern.lock()
# Get a random intial value for the whole dictionary.
dict_val = dict_pattern.random()
print(dict_val['mat']) # Prints a 3x3 positive definite numpy matrix.
# Get a flattened value of the whole dictionary.
dict_val_flat = dict_pattern.flatten(dict_val, free=True)
# Get a new random folded value of the dictionary.
new_dict_val_flat = np.random.random(len(dict_val_flat))
new_dict_val = dict_pattern.fold(new_dict_val_flat, free=True)
"""
def __init__(self, free_default=None):
self.__pattern_dict = OrderedDict()
# __lock determines whether new elements can be added.
self.__lock = False
super().__init__(0, 0, free_default=free_default)
def lock(self):
self.__lock = True
def __str__(self):
pattern_strings = [
'\t[' + key + '] = ' + str(self.__pattern_dict[key])
for key in self.__pattern_dict]
return \
'OrderedDict:\n' + \
'\n'.join(pattern_strings)
def __getitem__(self, key):
return self.__pattern_dict[key]
def as_dict(self):
# json.loads returns a dictionary, not an OrderedDict, so
# save the keys in the current order.
contents = {}
for pattern_name, pattern in self.__pattern_dict.items():
contents[pattern_name] = pattern.to_json()
keys = [ key for key in self.__pattern_dict.keys() ]
return {
'pattern': self.json_typename(),
'keys': keys,
'contents': contents}
def _check_lock(self):
if self.__lock:
raise ValueError(
'The dictionary is locked, and its values cannot be changed.')
def __setitem__(self, pattern_name, pattern):
self._check_lock()
# if pattern_name in self.__pattern_dict.keys():
# self.__delitem__(pattern_name)
self.__pattern_dict[pattern_name] = pattern
# We cannot allow pattern dictionaries to change their size
# once they've been included as members in another dictionary,
# since we have no way of updating the parent dictionary's size.
# To avoid unexpected errors, lock any dictionary that is set as
# a member.
if type(self.__pattern_dict[pattern_name]) is PatternDict:
self.__pattern_dict[pattern_name].lock()
self._free_flat_length = self._update_flat_length(free=True)
self._flat_length = self._update_flat_length(free=False)
def __delitem__(self, pattern_name):
self._check_lock()
pattern = self.__pattern_dict[pattern_name]
self.__pattern_dict.pop(pattern_name)
self._free_flat_length = self._update_flat_length(free=True)
self._flat_length = self._update_flat_length(free=False)
def keys(self):
return self.__pattern_dict.keys()
def empty(self, valid):
empty_val = OrderedDict()
for pattern_name, pattern in self.__pattern_dict.items():
empty_val[pattern_name] = pattern.empty(valid)
return empty_val
def validate_folded(self, folded_val, validate_value=None):
for pattern_name, pattern in self.__pattern_dict.items():
if not pattern_name in folded_val:
return \
False, \
'{} not in folded_val dictionary.'.format(pattern_name)
valid, err_msg = pattern.validate_folded(
folded_val[pattern_name], validate_value=validate_value)
if not valid:
err_msg = '{} is not valid.'.format(err_msg)
return False, err_msg
return True, ''
def fold(self, flat_val, free=None, validate_value=None):
free = self._free_with_default(free)
flat_val = np.atleast_1d(flat_val)
if len(flat_val.shape) != 1:
raise ValueError('The argument to fold must be a 1d vector.')
flat_length = self.flat_length(free)
if flat_val.size != flat_length:
error_string = \
('Wrong size for pattern dictionary {}.\n' +
'Expected {}, got {}.').format(
str(self), str(flat_length), str(flat_val.size))
raise ValueError(error_string)
# TODO: add an option to do this -- and other operations -- in place.
folded_val = OrderedDict()
offset = 0
for pattern_name, pattern in self.__pattern_dict.items():
pattern_flat_length = pattern.flat_length(free)
pattern_flat_val = flat_val[offset:(offset + pattern_flat_length)]
offset += pattern_flat_length
# Containers must not mix free and non-free values, so do not
# use default values for free.
folded_val[pattern_name] = \
pattern.fold(pattern_flat_val,
free=free,
validate_value=validate_value)
if not free:
valid, msg = self.validate_folded(
folded_val, validate_value=validate_value)
if not valid:
raise ValueError(msg)
return folded_val
def flatten(self, folded_val, free=None, validate_value=None):
free = self._free_with_default(free)
valid, msg = self.validate_folded(
folded_val, validate_value=validate_value)
if not valid:
raise ValueError(msg)
# flat_length = self.flat_length(free)
# offset = 0
# flat_val = np.full(flat_length, float('nan'))
flat_vals = []
for pattern_name, pattern in self.__pattern_dict.items():
pattern_flat_length = pattern.flat_length(free)
# Containers must not mix free and non-free values, so do not
# use default values for free.
# flat_val[offset:(offset + pattern_flat_length)] = \
flat_vals.append(
pattern.flatten(
folded_val[pattern_name],
free=free,
validate_value=validate_value))
#offset += pattern_flat_length
return np.hstack(flat_vals)
def _update_flat_length(self, free):
# This is a little wasteful with the benefit of being less error-prone
# than adding and subtracting lengths as keys are changed.
return np.sum([pattern.flat_length(free) for pattern_name, pattern in
self.__pattern_dict.items()])
def unfreeing_jacobian(self, folded_val, sparse=True):
jacobians = []
for pattern_name, pattern in self.__pattern_dict.items():
jac = pattern.unfreeing_jacobian(
folded_val[pattern_name], sparse=True)
jacobians.append(jac)
sp_jac = block_diag(jacobians, format='coo')
if sparse:
return sp_jac
else:
return np.array(sp_jac.todense())
def freeing_jacobian(self, folded_val, sparse=True):
jacobians = []
for pattern_name, pattern in self.__pattern_dict.items():
jac = pattern.freeing_jacobian(
folded_val[pattern_name], sparse=True)
jacobians.append(jac)
sp_jac = block_diag(jacobians, format='coo')
if sparse:
return sp_jac
else:
return np.array(sp_jac.todense())
def log_abs_det_unfreeing_jacobian(self, folded_val):
log_abs_det = 0.0
for pattern_name, pattern in self.__pattern_dict.items():
log_abs_det += pattern.log_abs_det_unfreeing_jacobian(
folded_val[pattern_name])
return log_abs_det
def log_abs_det_freeing_jacobian(self, folded_val):
log_abs_det = 0.0
for pattern_name, pattern in self.__pattern_dict.items():
log_abs_det += pattern.log_abs_det_freeing_jacobian(
folded_val[pattern_name])
return log_abs_det
@classmethod
def from_json(cls, json_string):
json_dict = json.loads(json_string)
if json_dict['pattern'] != cls.json_typename():
error_string = \
('{}.from_json must be called on a json_string made ' +
'from a the same pattern type. The json_string ' +
'pattern type was {}.').format(
cls.json_typename(), json_dict['pattern'])
raise ValueError(error_string)
pattern_dict = cls()
for pattern_name in json_dict['keys']:
pattern_dict[pattern_name] = get_pattern_from_json(
json_dict['contents'][pattern_name])
return pattern_dict
def flat_indices(self, folded_bool, free=None):
free = self._free_with_default(free)
valid, msg = self.validate_folded(folded_bool, validate_value=False)
if not valid:
raise ValueError(msg)
flat_length = self.flat_length(free)
offset = 0
indices = []
for pattern_name, pattern in self.__pattern_dict.items():
pattern_flat_length = pattern.flat_length(free)
# Containers must not mix free and non-free values, so do not
# use default values for free.
pattern_indices = pattern.flat_indices(
folded_bool[pattern_name], free=free)
if len(pattern_indices) > 0:
indices.append(pattern_indices + offset)
offset += pattern_flat_length
if len(indices) > 0:
return np.hstack(indices)
else:
return np.array([], dtype=int)
def flat_names(self, free, delim='_'):
flat_names_list = []
for pattern_name, pattern in self.__pattern_dict.items():
pattern_flat_names = pattern.flat_names(free)
# TODO: only append the delimiter for containers
pattern_flat_names = \
[ pattern_name + delim + t for t in pattern_flat_names]
flat_names_list.append(pattern_flat_names)
return np.hstack(flat_names_list)
##########################
# An array of a pattern.
class PatternArray(Pattern):
"""
An array of a pattern (which is also itself a pattern).
The first indices of the folded pattern are the array and the final
indices are of the base pattern. For example, if `shape=(3, 4)`
and `base_pattern = PSDSymmetricMatrixPattern(size=5)`, then the folded
value of the array will have shape `(3, 4, 5, 5)`, where the entry
`folded_val[i, j, :, :]` is a 5x5 positive definite matrix.
Currently this can only contain patterns whose folded values are
numeric arrays (i.e., `NumericArrayPattern`, `SimplexArrayPattern`, and
`PSDSymmetricMatrixPattern`).
"""
def __init__(self, array_shape, base_pattern, free_default=None):
"""
Parameters
------------
array_shape: tuple of int
The shape of the array (not including the base parameter)
base_pattern:
The base pattern.
"""
# TODO: change the name shape -> array_shape
# and have shape be the whole array, including the pattern.
self.__array_shape = tuple(array_shape)
self.__array_ranges = [range(0, t) for t in self.__array_shape]
num_elements = np.prod(self.__array_shape)
self.__base_pattern = base_pattern
# Check whether the base_pattern takes values that are numpy arrays.
# If they are, then the unfolded value will be a single numpy array
# of shape __array_shape + base_pattern.empty().shape.
empty_pattern = self.__base_pattern.empty(valid=False)
if type(empty_pattern) is np.ndarray:
self.__folded_pattern_shape = empty_pattern.shape
else:
# autograd's numpy does not seem to support object arrays.
# The following snippet works with numpy 1.14.2 but not
# autograd's numpy (as of commit 5d49ee anyway).
#
# >>> import autograd.numpy as np
# >>> foo = OrderedDict(a=5)
# >>> bar = np.array([foo for i in range(3)])
# >>> print(bar[0]['a']) # Gives an index error.
#
raise NotImplementedError(
'PatternArray does not support patterns whose folded ' +
'values are not numpy.ndarray types.')
self.__shape = tuple(self.__array_shape) + empty_pattern.shape
super().__init__(
num_elements * base_pattern.flat_length(free=False),
num_elements * base_pattern.flat_length(free=True),
free_default=free_default)
def __str__(self):
return('PatternArray {} of {}'.format(
self.__array_shape, self.__base_pattern))
def as_dict(self):
return {
'pattern': self.json_typename(),
'shape': self.__shape,
'array_shape': self.__array_shape,
'base_pattern': self.__base_pattern.to_json() }
def array_shape(self):
"""The shape of the array of parameters.
This does not include the dimension of the folded parameters.
"""
return self.__array_shape
def shape(self):
"""The shape of a folded value.
"""
return self.__shape
def base_pattern(self):
return self.__base_pattern
def validate_folded(self, folded_val, validate_value=None):
if folded_val.ndim != len(self.__shape):
return \
False, \
'Wrong number of dimensions. Expected {}, got {}.'.format(
folded_val.ndim, len(self.__shape))
if folded_val.shape != self.__shape:
return \
False, \
'Wrong shape. Expected {}, got {}.'.format(
folded_val.shape, self.__shape)
for item in itertools.product(*self.__array_ranges):
valid, msg = self.__base_pattern.validate_folded(
folded_val[item], validate_value=validate_value)
if not valid:
err_msg = 'Bad value in location {}: {}'.format(item, msg)
return False, err_msg
return True, ''
def empty(self, valid):
empty_pattern = self.__base_pattern.empty(valid=valid)
repeated_array = np.array(
[empty_pattern
for item in itertools.product(*self.__array_ranges)])
return np.reshape(repeated_array, self.__shape)
def _stacked_obs_slice(self, item, flat_length):
"""
Get the slice in a flat array corresponding to ``item``.
Parameters
-------------
item: tuple
A tuple of indices into the array of patterns (i.e.,
into the shape ``__array_shape``).
flat_length: integer
The length of a single flat pattern.
Returns
---------------
A slice for the elements in a vector of length ``flat_length``
corresponding to element item of the array, where ``item`` is a tuple
indexing into the array of shape ``__array_shape``.
"""
assert len(item) == len(self.__array_shape)
linear_item = np.ravel_multi_index(item, self.__array_shape) * flat_length
return slice(linear_item, linear_item + flat_length)
def fold(self, flat_val, free=None, validate_value=None):
free = self._free_with_default(free)
flat_val = np.atleast_1d(flat_val)
if len(flat_val.shape) != 1:
raise ValueError('The argument to fold must be a 1d vector.')
if flat_val.size != self.flat_length(free):
error_string = \
'Wrong size for parameter. Expected {}, got {}'.format(
str(self.flat_length(free)), str(flat_val.size))
raise ValueError(error_string)
flat_length = self.__base_pattern.flat_length(free)
folded_array = np.array([
self.__base_pattern.fold(
flat_val[self._stacked_obs_slice(item, flat_length)],
free=free, validate_value=validate_value)
for item in itertools.product(*self.__array_ranges)])
folded_val = np.reshape(folded_array, self.__shape)
if not free:
valid, msg = self.validate_folded(
folded_val, validate_value=validate_value)
if not valid:
raise ValueError(msg)
return folded_val
def flatten(self, folded_val, free=None, validate_value=None):
free = self._free_with_default(free)
valid, msg = self.validate_folded(
folded_val, validate_value=validate_value)
if not valid:
raise ValueError(msg)
return np.hstack(np.array([
self.__base_pattern.flatten(
folded_val[item], free=free, validate_value=validate_value)
for item in itertools.product(*self.__array_ranges)]))
def flat_length(self, free=None):
free = self._free_with_default(free)
return self._free_flat_length if free else self._flat_length
def unfreeing_jacobian(self, folded_val, sparse=True):
base_flat_length = self.__base_pattern.flat_length(free=True)
base_freeflat_length = self.__base_pattern.flat_length(free=True)
jacobians = []
for item in itertools.product(*self.__array_ranges):
jac = self.__base_pattern.unfreeing_jacobian(
folded_val[item], sparse=True)
jacobians.append(jac)
sp_jac = block_diag(jacobians, format='coo')
if sparse:
return sp_jac
else:
return np.array(sp_jac.todense())
def freeing_jacobian(self, folded_val, sparse=True):
base_flat_length = self.__base_pattern.flat_length(free=True)
base_freeflat_length = self.__base_pattern.flat_length(free=True)
jacobians = []
for item in itertools.product(*self.__array_ranges):
jac = self.__base_pattern.freeing_jacobian(
folded_val[item], sparse=True)
jacobians.append(jac)
sp_jac = block_diag(jacobians, format='coo')
if sparse:
return sp_jac
else:
return np.array(sp_jac.todense())
@classmethod
def from_json(cls, json_string):
json_dict = json.loads(json_string)
if json_dict['pattern'] != cls.json_typename():
error_string = \
('{}.from_json must be called on a json_string made ' +
'from a the same pattern type. The json_string ' +
'pattern type was {}.').format(
cls.json_typename(), json_dict['pattern'])
raise ValueError(error_string)
base_pattern = get_pattern_from_json(json_dict['base_pattern'])
return cls(
array_shape=json_dict['array_shape'], base_pattern=base_pattern)
def flat_indices(self, folded_bool, free=None):
free = self._free_with_default(free)
valid, msg = self.validate_folded(folded_bool, validate_value=False)
if not valid:
raise ValueError(msg)
indices = []
pattern_flat_length = self.__base_pattern.flat_length(free=free)
offset = 0
for item in itertools.product(*self.__array_ranges):
if np.any(folded_bool[item]):
pattern_indices = self.__base_pattern.flat_indices(
folded_bool[item], free=free)
if len(pattern_indices) > 0:
indices.append(pattern_indices + offset)
offset += pattern_flat_length
if len(indices) > 0:
return np.hstack(indices)
else:
return np.array([], dtype=int)
register_pattern_json(PatternDict)
register_pattern_json(PatternArray)
|
nilq/baby-python
|
python
|
class Recall:
def __init__(self, max_count=10):
self.max_count = max_count
self.position = 0
self.buffer = []
def move_up(self):
if self.position < len(self.buffer) - 1:
self.position += 1
return self.buffer[self.position]
def move_down(self):
if self.position > 0:
self.position -= 1
return self.buffer[self.position]
else:
self.position = -1
def append(self, text):
self.position = -1
if text not in self.buffer:
if len(self.buffer) >= self.max_count:
self.buffer = [text] + self.buffer[:self.max_count - 1]
else:
self.buffer.insert(0, text)
elif self.buffer[0] != text:
self.buffer.remove(text)
self.buffer.insert(0, text)
|
nilq/baby-python
|
python
|
# https://leetcode.com/problems/3sum/
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
res = set()
nums = sorted(nums)
for k in range(0, len(nums)):
target = -(nums[k])
l, r = k+1, len(nums)-1
while(l<r):
sum_two = nums[l]+nums[r]
if sum_two < target:
l += 1
elif sum_two > target:
r -= 1
else:
res.add((nums[k],nums[l],nums[r]))
l += 1
r -= 1
return res
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class GradedConfig(AppConfig):
name = 'graded'
|
nilq/baby-python
|
python
|
from .base import *
DEBUG = False
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats-prod.json'),
}
}
|
nilq/baby-python
|
python
|
from src.pybitbucket.bitbucket import Bitbucket
config = {
"secret-properties": "secretproperties.properties",
"properties": "properties.properties"}
bb = Bitbucket(settings=config)
# workspace = bb.workspace
prs_df = bb.df_prs
commits_df = bb.df_commits
prs_list = prs_df["pr_id"].unique().tolist().sort()
print(f"PRs: {prs_list}")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''
Tiger
'''
import json
import os
import subprocess
from collections import OrderedDict
from tasks.util import (LoadPostgresFromURL, classpath, TempTableTask, grouper,
shell, TableTask, ColumnsTask, TagsTask,
Carto2TempTableTask)
from tasks.meta import (OBSColumn, GEOM_REF, GEOM_NAME, OBSTag, current_session)
from tasks.tags import SectionTags, SubsectionTags, LicenseTags, BoundaryTags
from luigi import (Task, WrapperTask, Parameter, LocalTarget, IntParameter)
from decimal import Decimal
class TigerSourceTags(TagsTask):
def version(self):
return 1
def tags(self):
return [
OBSTag(id='tiger-source',
name='US Census TIGER/Line Shapefiles',
type='source',
description='`TIGER/Line Shapefiles <https://www.census.gov/geo/maps-data/data/tiger-line.html>`_')
]
class ClippedGeomColumns(ColumnsTask):
def version(self):
return 13
def requires(self):
return {
'geom_columns': GeomColumns(),
'sections': SectionTags(),
'subsections': SubsectionTags(),
'source': TigerSourceTags(),
'license': LicenseTags(),
'boundary':BoundaryTags(),
}
def columns(self):
cols = OrderedDict()
session = current_session()
input_ = self.input()
sections = input_['sections']
subsections = input_['subsections']
source = input_['source']['tiger-source']
license = input_['license']['no-restrictions']
boundary_type = input_['boundary']
for colname, coltarget in self.input()['geom_columns'].iteritems():
col = coltarget.get(session)
cols[colname + '_clipped'] = OBSColumn(
type='Geometry',
name='Shoreline clipped ' + col.name,
weight=Decimal(col.weight) + Decimal(0.01),
description='A cartography-ready version of {name}'.format(
name=col.name),
targets={col: 'cartography'},
tags=[sections['united_states'],
subsections['boundary'],
source, license]
)
interpolated_boundaries = ['block_clipped', 'block_group_clipped',
'puma_clipped','census_tract_clipped',
'county_clipped','state_clipped']
cartographic_boundaries = ['cbsa_clipped',
'school_district_elementary_clipped',
'place_clipped',
'school_district_secondary_clipped',
'zcta5_clipped',
'congressional_district_clipped',
'school_district_unified_clipped',
'block_clipped', 'block_group_clipped',
'puma_clipped','census_tract_clipped',
'county_clipped','state_clipped']
for colname, col in cols.iteritems():
if colname in interpolated_boundaries:
col.tags.append(boundary_type['interpolation_boundary'])
if colname in cartographic_boundaries:
col.tags.append(boundary_type['cartographic_boundary'])
return cols
class GeomColumns(ColumnsTask):
def version(self):
return 15
def requires(self):
return {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'source': TigerSourceTags(),
'license': LicenseTags(),
'boundary': BoundaryTags(),
}
def _generate_desc(self, sumlevel):
'''
Add figure to the description
'''
return SUMLEVELS_BY_SLUG[sumlevel]['census_description']
def columns(self):
input_ = self.input()
sections = input_['sections']
subsections = input_['subsections']
source = input_['source']['tiger-source']
license = input_['license']['no-restrictions']
columns = {
'block_group': OBSColumn(
type='Geometry',
name='US Census Block Groups',
description=self._generate_desc("block_group"),
weight=10,
tags=[sections['united_states'], subsections['boundary']]
),
'block': OBSColumn(
type='Geometry',
name='US Census Blocks',
description=self._generate_desc("block"),
weight=0,
tags=[sections['united_states'], subsections['boundary']]
),
'census_tract': OBSColumn(
type='Geometry',
name='US Census Tracts',
description=self._generate_desc("census_tract"),
weight=9,
tags=[sections['united_states'], subsections['boundary']]
),
'congressional_district': OBSColumn(
type='Geometry',
name='US Congressional Districts',
description=self._generate_desc("congressional_district"),
weight=5.4,
tags=[sections['united_states'], subsections['boundary']]
),
'county': OBSColumn(
type='Geometry',
name='US County',
description=self._generate_desc("county"),
weight=7,
tags=[sections['united_states'], subsections['boundary']]
),
'puma': OBSColumn(
type='Geometry',
name='US Census Public Use Microdata Areas',
description=self._generate_desc("puma"),
weight=5.5,
tags=[sections['united_states'], subsections['boundary']]
),
'state': OBSColumn(
type='Geometry',
name='US States',
description=self._generate_desc("state"),
weight=8,
tags=[sections['united_states'], subsections['boundary']]
),
'zcta5': OBSColumn(
type='Geometry',
name='US Census Zip Code Tabulation Areas',
description=self._generate_desc('zcta5'),
weight=6,
tags=[sections['united_states'], subsections['boundary']]
),
'school_district_elementary': OBSColumn(
type='Geometry',
name='Elementary School District',
description=self._generate_desc('school_district_elementary'),
weight=2.8,
tags=[sections['united_states'], subsections['boundary']]
),
'school_district_secondary': OBSColumn(
type='Geometry',
name='Secondary School District',
description=self._generate_desc('school_district_secondary'),
weight=2.9,
tags=[sections['united_states'], subsections['boundary']]
),
'school_district_unified': OBSColumn(
type='Geometry',
name='Unified School District',
description=self._generate_desc('school_district_unified'),
weight=5,
tags=[sections['united_states'], subsections['boundary']]
),
'cbsa': OBSColumn(
type='Geometry',
name='Core Based Statistical Area (CBSA)',
description=self._generate_desc("cbsa"),
weight=1,
tags=[sections['united_states'], subsections['boundary']]
),
'place': OBSColumn(
type='Geometry',
name='Incorporated Places',
description=self._generate_desc("place"),
weight=1.1,
tags=[sections['united_states'], subsections['boundary']]
),
}
for _,col in columns.iteritems():
col.tags.append(source)
col.tags.append(license)
return columns
class Attributes(ColumnsTask):
def version(self):
return 2
def requires(self):
return SectionTags()
def columns(self):
return OrderedDict([
('aland', OBSColumn(
type='Numeric',
name='Land area',
aggregate='sum',
weight=0,
)),
('awater', OBSColumn(
type='Numeric',
name='Water area',
aggregate='sum',
weight=0,
)),
])
class GeoidColumns(ColumnsTask):
def version(self):
return 6
def requires(self):
return {
'raw': GeomColumns(),
'clipped': ClippedGeomColumns()
}
def columns(self):
cols = OrderedDict()
clipped = self.input()['clipped']
for colname, coltarget in self.input()['raw'].iteritems():
col = coltarget._column
cols[colname + '_geoid'] = OBSColumn(
type='Text',
name=col.name + ' Geoids',
weight=0,
targets={
col: GEOM_REF,
clipped[colname + '_clipped']._column: GEOM_REF
}
)
return cols
class GeonameColumns(ColumnsTask):
def version(self):
return 2
def requires(self):
return {
'raw': GeomColumns(),
'clipped': ClippedGeomColumns(),
'subsections': SubsectionTags(),
'sections':SectionTags(),
}
def columns(self):
cols = OrderedDict()
clipped = self.input()['clipped']
subsection = self.input()['subsections']
sections = self.input()['sections']
for colname, coltarget in self.input()['raw'].iteritems():
col = coltarget._column
cols[colname + '_geoname'] = OBSColumn(
type='Text',
name=col.name + ' Proper Name',
weight=1,
tags=[subsection['names'],sections['united_states']],
targets={
col: GEOM_NAME,
clipped[colname + '_clipped']._column: GEOM_NAME
}
)
return cols
class DownloadTigerGeography(Task):
year = IntParameter()
geography = Parameter()
url_format = 'ftp://ftp2.census.gov/geo/tiger/TIGER{year}/{geography}/'
@property
def url(self):
return self.url_format.format(year=self.year, geography=self.geography)
@property
def directory(self):
return os.path.join('tmp', classpath(self), str(self.year))
def run(self):
shell('wget --recursive --continue --accept=*.zip '
'--no-parent --cut-dirs=3 --no-host-directories '
'--directory-prefix={directory} '
'{url}'.format(directory=self.directory, url=self.url))
def output(self):
filenames = shell('ls {}'.format(os.path.join(
self.directory, self.geography, '*.zip'))).split('\n')
for path in filenames:
yield LocalTarget(path)
def complete(self):
try:
exists = shell('ls {}'.format(os.path.join(self.directory, self.geography, '*.zip')))
return exists != ''
except subprocess.CalledProcessError:
return False
class UnzipTigerGeography(Task):
'''
Unzip tiger geography
'''
year = Parameter()
geography = Parameter()
def requires(self):
return DownloadTigerGeography(year=self.year, geography=self.geography)
@property
def directory(self):
return os.path.join('tmp', classpath(self), str(self.year), self.geography)
def run(self):
#for infile in self.input():
cmd = "cd {path} && find -iname '*.zip' -print0 | xargs -0 -n1 unzip -n -q ".format(
path=self.directory)
shell(cmd)
def output(self):
shps = shell('ls {}'.format(os.path.join(self.directory, '*.shp')))
for path in shps:
yield LocalTarget(path)
def complete(self):
try:
exists = shell('ls {}'.format(os.path.join(self.directory, '*.shp')))
return exists != ''
except subprocess.CalledProcessError:
return False
class TigerGeographyShapefileToSQL(TempTableTask):
'''
Take downloaded shapefiles and load them into Postgres
'''
year = Parameter()
geography = Parameter()
def requires(self):
return UnzipTigerGeography(year=self.year, geography=self.geography)
def run(self):
shapefiles = shell('ls {dir}/*.shp'.format(
dir=os.path.join('tmp', classpath(self), str(self.year), self.geography)
)).strip().split('\n')
cmd = 'ogrinfo {shpfile_path}'.format(shpfile_path=shapefiles[0])
resp = shell(cmd)
if 'Polygon' in resp:
nlt = '-nlt MultiPolygon'
else:
nlt = ''
cmd = 'PG_USE_COPY=yes PGCLIENTENCODING=latin1 ' \
'ogr2ogr -f PostgreSQL "PG:dbname=$PGDATABASE active_schema={schema}" ' \
'-t_srs "EPSG:4326" {nlt} -nln {tablename} ' \
'-lco OVERWRITE=yes ' \
'-lco SCHEMA={schema} {shpfile_path} '.format(
tablename=self.output().tablename,
schema=self.output().schema, nlt=nlt,
shpfile_path=shapefiles.pop())
shell(cmd)
# chunk into 500 shapefiles at a time.
for i, shape_group in enumerate(grouper(shapefiles, 500)):
shell(
'export PG_USE_COPY=yes PGCLIENTENCODING=latin1; '
'echo \'{shapefiles}\' | xargs -P 16 -I shpfile_path '
'ogr2ogr -f PostgreSQL "PG:dbname=$PGDATABASE '
'active_schema={schema}" -append '
'-t_srs "EPSG:4326" {nlt} -nln {tablename} '
'shpfile_path '.format(
shapefiles='\n'.join([shp for shp in shape_group if shp]),
tablename=self.output().tablename, nlt=nlt,
schema=self.output().schema))
print 'imported {} shapefiles'.format((i + 1) * 500)
session = current_session()
# Spatial index
session.execute('ALTER TABLE {qualified_table} RENAME COLUMN '
'wkb_geometry TO geom'.format(
qualified_table=self.output().table))
session.execute('CREATE INDEX ON {qualified_table} USING GIST (geom)'.format(
qualified_table=self.output().table))
class DownloadTiger(LoadPostgresFromURL):
url_template = 'https://s3.amazonaws.com/census-backup/tiger/{year}/tiger{year}_backup.sql.gz'
year = Parameter()
def run(self):
schema = 'tiger{year}'.format(year=self.year)
shell("psql -c 'DROP SCHEMA IF EXISTS \"{schema}\" CASCADE'".format(schema=schema))
shell("psql -c 'CREATE SCHEMA \"{schema}\"'".format(schema=schema))
url = self.url_template.format(year=self.year)
self.load_from_url(url)
class SimpleShoreline(TempTableTask):
year = Parameter()
def requires(self):
return {
'data': TigerGeographyShapefileToSQL(geography='AREAWATER', year=self.year),
'us_landmask': Carto2TempTableTask(table='us_landmask_union'),
}
def run(self):
session = current_session()
session.execute('CREATE TABLE {output} AS '
'SELECT ST_Subdivide(geom) geom, false in_landmask, '
' aland, awater, mtfcc '
'FROM {input} '
"WHERE mtfcc != 'H2030' OR awater > 300000".format(
input=self.input()['data'].table,
output=self.output().table
))
session.execute('CREATE INDEX ON {output} USING GIST (geom)'.format(
output=self.output().table
))
session.execute('UPDATE {output} data SET in_landmask = True '
'FROM {landmask} landmask '
'WHERE ST_WITHIN(data.geom, landmask.the_geom)'.format(
landmask=self.input()['us_landmask'].table,
output=self.output().table
))
class SplitSumLevel(TempTableTask):
'''
Split the positive table into geoms with a reasonable number of
vertices. Assumes there is a geoid and the_geom column.
'''
year = Parameter()
geography = Parameter()
def requires(self):
return SumLevel(year=self.year, geography=self.geography)
def run(self):
session = current_session()
session.execute('CREATE TABLE {output} '
'(id serial primary key, geoid text, the_geom geometry, '
'aland NUMERIC, awater NUMERIC)'.format(
output=self.output().table))
session.execute('INSERT INTO {output} (geoid, the_geom, aland, awater) '
'SELECT geoid, ST_Subdivide(the_geom) the_geom, '
' aland, awater '
'FROM {input} '
'WHERE aland > 0 '.format(output=self.output().table,
input=self.input().table))
session.execute('CREATE INDEX ON {output} USING GIST (the_geom)'.format(
output=self.output().table))
class JoinTigerWaterGeoms(TempTableTask):
'''
Join the split up pos to the split up neg, then union the geoms based
off the split pos id (technically the union on pos geom is extraneous)
'''
year = Parameter()
geography = Parameter()
def requires(self):
return {
'pos': SplitSumLevel(year=self.year, geography=self.geography),
'neg': SimpleShoreline(year=self.year),
}
def use_mask(self):
'''
Returns true if we should not clip interior geometries, False otherwise.
'''
return self.geography.lower() in ('state', 'county', )
def run(self):
session = current_session()
stmt = ('CREATE TABLE {output} AS '
'SELECT id, geoid, ST_Union(ST_MakeValid(neg.geom)) neg_geom, '
' MAX(pos.the_geom) pos_geom '
'FROM {pos} pos, {neg} neg '
'WHERE ST_Intersects(pos.the_geom, neg.geom) '
' AND pos.awater > 0 '
' {mask_clause} '
'GROUP BY id '.format(
neg=self.input()['neg'].table,
mask_clause=' AND in_landmask = false' if self.use_mask() else '',
pos=self.input()['pos'].table,
output=self.output().table), )[0]
session.execute(stmt)
class DiffTigerWaterGeoms(TempTableTask):
'''
Calculate the difference between the pos and neg geoms
'''
year = Parameter()
geography = Parameter()
def requires(self):
return JoinTigerWaterGeoms(year=self.year, geography=self.geography)
def run(self):
session = current_session()
stmt = ('CREATE TABLE {output} '
'AS SELECT geoid, id, ST_Difference( '
'ST_MakeValid(pos_geom), ST_MakeValid(neg_geom)) the_geom '
#'pos_geom, neg_geom) the_geom '
'FROM {input}'.format(
output=self.output().table,
input=self.input().table), )[0]
session.execute(stmt)
class PreunionTigerWaterGeoms(TempTableTask):
'''
Create new table with both diffed and non-diffed (didn't intersect with
water) geoms
'''
year = Parameter()
geography = Parameter()
def requires(self):
return {
'diffed': DiffTigerWaterGeoms(year=self.year, geography=self.geography),
'split': SplitSumLevel(year=self.year, geography=self.geography)
}
def run(self):
session = current_session()
session.execute('CREATE TABLE {output} '
'AS SELECT geoid::text, id::int, the_geom::geometry, '
'aland::numeric, awater::Numeric '
'FROM {split} LIMIT 0 '.format(
output=self.output().table,
split=self.input()['split'].table))
session.execute('INSERT INTO {output} (geoid, id, the_geom) '
'SELECT geoid, id, the_geom FROM {diffed} '
'WHERE ST_Area(ST_Transform(the_geom, 3857)) > 5000'
' AND ST_NPoints(the_geom) > 10 '.format(
output=self.output().table,
diffed=self.input()['diffed'].table))
session.execute('INSERT INTO {output} '
'SELECT geoid, id, the_geom, aland, awater FROM {split} '
'WHERE id NOT IN (SELECT id from {diffed})'.format(
split=self.input()['split'].table,
diffed=self.input()['diffed'].table,
output=self.output().table))
session.execute('CREATE INDEX ON {output} (geoid) '.format(
output=self.output().table))
class UnionTigerWaterGeoms(TempTableTask):
'''
Re-union the pos table based off its geoid, this includes holes in
the output geoms
'''
year = Parameter()
geography = Parameter()
def requires(self):
return PreunionTigerWaterGeoms(year=self.year, geography=self.geography)
def run(self):
session = current_session()
session.execute('CREATE TABLE {output} AS '
'SELECT geoid, ST_Union(ST_MakeValid(the_geom)) AS the_geom, '
' MAX(aland) aland, MAX(awater) awater '
'FROM {input} '
'GROUP BY geoid'.format(
output=self.output().table,
input=self.input().table))
class ShorelineClip(TableTask):
'''
Clip the provided geography to shoreline.
'''
# MTFCC meanings:
# http://www2.census.gov/geo/pdfs/maps-data/data/tiger/tgrshp2009/TGRSHP09AF.pdf
year = Parameter()
geography = Parameter()
def version(self):
return 7
def requires(self):
return {
'data': UnionTigerWaterGeoms(year=self.year, geography=self.geography),
'geoms': ClippedGeomColumns(),
'geoids': GeoidColumns(),
'attributes': Attributes(),
'geonames': GeonameColumns()
}
def columns(self):
return OrderedDict([
('geoid', self.input()['geoids'][self.geography + '_geoid']),
('the_geom', self.input()['geoms'][self.geography + '_clipped']),
('aland', self.input()['attributes']['aland']),
('name', self.input()['geonames'][self.geography + '_geoname']),
])
def timespan(self):
return self.year
def populate(self):
session = current_session()
stmt = ('INSERT INTO {output} '
'SELECT geoid, ST_Union(ST_MakePolygon(ST_ExteriorRing(the_geom))) AS the_geom, '
' MAX(aland) AS aland, cdb_observatory.FIRST(name) AS name '
'FROM ( '
' SELECT geoid, (ST_Dump(the_geom)).geom AS the_geom, '
' aland, name '
' FROM {input} '
") holes WHERE GeometryType(the_geom) = 'POLYGON' "
'GROUP BY geoid'.format(
output=self.output().table,
input=self.input()['data'].table), )[0]
session.execute(stmt)
class SumLevel(TableTask):
geography = Parameter()
year = Parameter()
def has_10_suffix(self):
return self.geography.lower() in ('puma', 'zcta5', 'block', )
@property
def geoid(self):
return 'geoid10' if self.has_10_suffix() else 'geoid'
@property
def aland(self):
return 'aland10' if self.has_10_suffix() else 'aland'
@property
def awater(self):
return 'awater10' if self.has_10_suffix() else 'awater'
@property
def name(self):
if self.geography in ('state', 'county', 'census_tract', 'place',
'school_district_elementary', 'cbsa', 'metdiv',
'school_district_secondary',
'school_district_unified'):
return 'name'
elif self.geography in ('congressional_district', 'block_group'):
return 'namelsad'
elif self.geography in ('block'):
return 'name10'
elif self.geography in ('puma'):
return 'namelsad10'
@property
def input_tablename(self):
return SUMLEVELS_BY_SLUG[self.geography]['table']
def version(self):
return 11
def requires(self):
tiger = DownloadTiger(year=self.year)
return {
'data': tiger,
'attributes': Attributes(),
'geoids': GeoidColumns(),
'geoms': GeomColumns(),
'sections': SectionTags(),
'subsections': SubsectionTags(),
'geonames': GeonameColumns(),
}
def columns(self):
input_ = self.input()
cols = OrderedDict([
('geoid', input_['geoids'][self.geography + '_geoid']),
('the_geom', input_['geoms'][self.geography]),
('aland', input_['attributes']['aland']),
('awater', input_['attributes']['awater']),
])
if self.name:
cols['geoname'] = input_['geonames'][self.geography + '_geoname']
return cols
def timespan(self):
return self.year
def populate(self):
session = current_session()
from_clause = '{inputschema}.{input_tablename}'.format(
inputschema='tiger' + str(self.year),
input_tablename=self.input_tablename,
)
in_colnames = [self.geoid, 'geom', self.aland, self.awater]
if self.name:
in_colnames.append(self.name)
out_colnames = self.columns().keys()
session.execute('INSERT INTO {output} ({out_colnames}) '
'SELECT {in_colnames} '
'FROM {from_clause} '.format(
output=self.output().table,
in_colnames=', '.join(in_colnames),
out_colnames=', '.join(out_colnames),
from_clause=from_clause
))
class AllSumLevels(WrapperTask):
'''
Compute all sumlevels
'''
year = Parameter()
def requires(self):
for geo in ('state', 'county', 'census_tract', 'block_group', 'place',
'puma', 'zcta5', 'school_district_elementary', 'cbsa',
'school_district_secondary', 'school_district_unified',
'block', 'congressional_district'):
yield SumLevel(year=self.year, geography=geo)
yield ShorelineClip(year=self.year, geography=geo)
class SharedTigerColumns(ColumnsTask):
def version(self):
return 2
def requires(self):
return {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'source': TigerSourceTags(),
'license': LicenseTags(),
}
def columns(self):
input_ = self.input()
return OrderedDict([
('fullname', OBSColumn(
type='Text',
name='Name of the feature',
weight=3,
tags=[input_['sections']['united_states'],
input_['source']['tiger-source'],
input_['license']['no-restrictions']]
)),
('mtfcc', OBSColumn(
type='Text',
name='MAF/TIGER Feature Class Code Definitions',
description='''The MAF/TIGER Feature Class Code (MTFCC) is
a 5-digit code assigned by the Census Bureau intended to
classify and describe geographic objects or features. These
codes can be found in the TIGER/Line products. A full list of
code meanings can be found `here
<https://www.census.gov/geo/reference/mtfcc.html>`_.''',
weight=3,
tags=[input_['sections']['united_states'],
input_['source']['tiger-source'],
input_['license']['no-restrictions']]
))
])
class PointLandmarkColumns(ColumnsTask):
'''
Point landmark column definitions
'''
def version(self):
return 8
def requires(self):
return {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'source': TigerSourceTags(),
'license': LicenseTags(),
}
def columns(self):
input_ = self.input()
geom = OBSColumn(
id='pointlm_geom',
type='Geometry(Point)',
weight=5,
tags=[input_['sections']['united_states'],
input_['subsections']['poi'],
input_['source']['tiger-source'],
input_['license']['no-restrictions']]
)
cols = OrderedDict([
('pointlm_id', OBSColumn(
type='Text',
weight=0,
targets={geom: GEOM_REF}
)),
('pointlm_geom', geom)
])
return cols
class PointLandmark(TableTask):
'''
Point landmark data from the census
'''
year = Parameter()
def version(self):
return 2
def requires(self):
return {
'data': TigerGeographyShapefileToSQL(year=self.year,
geography='POINTLM'),
'meta': PointLandmarkColumns(),
'shared': SharedTigerColumns()
}
def timespan(self):
return self.year
def columns(self):
shared = self.input()['shared']
cols = self.input()['meta']
return OrderedDict([
('pointid', cols['pointlm_id']),
('fullname', shared['fullname']),
('mtfcc', shared['mtfcc']),
('geom', cols['pointlm_geom']),
])
def populate(self):
session = current_session()
session.execute('''
INSERT INTO {output}
SELECT pointid, fullname, mtfcc, geom
FROM {input}'''.format(output=self.output().table,
input=self.input()['data'].table))
class PriSecRoadsColumns(ColumnsTask):
'''
Primary & secondary roads column definitions
'''
def version(self):
return 5
def requires(self):
return {
'sections': SectionTags(),
'subsections': SubsectionTags(),
'source': TigerSourceTags(),
'license': LicenseTags(),
}
def columns(self):
input_ = self.input()
geom = OBSColumn(
id='prisecroads_geom',
type='Geometry(LineString)',
weight=5,
tags=[input_['sections']['united_states'],
input_['subsections']['roads'],
input_['source']['tiger-source'],
input_['license']['no-restrictions']]
)
cols = OrderedDict([
('prisecroads_id', OBSColumn(
type='Text',
weight=0,
targets={geom: GEOM_REF}
)),
('rttyp', OBSColumn(
type='Text'
)),
('prisecroads_geom', geom)
])
return cols
class PriSecRoads(TableTask):
'''
Primary & Secondary roads from the census
'''
year = Parameter()
def requires(self):
return {
'data': TigerGeographyShapefileToSQL(year=self.year,
geography='PRISECROADS'),
'meta': PriSecRoadsColumns(),
'shared': SharedTigerColumns()
}
def version(self):
return 2
def timespan(self):
return self.year
def columns(self):
shared = self.input()['shared']
cols = self.input()['meta']
return OrderedDict([
('linearid', cols['prisecroads_id']),
('fullname', shared['fullname']),
('rttyp', cols['rttyp']),
('mtfcc', shared['mtfcc']),
('geom', cols['prisecroads_geom']),
])
def populate(self):
session = current_session()
session.execute('''
INSERT INTO {output}
SELECT linearid, fullname, rttyp, mtfcc, geom
FROM {input}'''.format(output=self.output().table,
input=self.input()['data'].table))
def load_sumlevels():
'''
Load summary levels from JSON. Returns a dict by sumlevel number.
'''
with open(os.path.join(os.path.dirname(__file__), 'summary_levels.json')) as fhandle:
sumlevels_list = json.load(fhandle)
sumlevels = {}
for slevel in sumlevels_list:
# Replace pkey ancestors with paths to columns
# We subtract 1 from the pkey because it's 1-indexed, unlike python
fields = slevel['fields']
for i, ancestor in enumerate(fields['ancestors']):
colpath = os.path.join('columns', classpath(load_sumlevels),
sumlevels_list[ancestor - 1]['fields']['slug'])
fields['ancestors'][i] = colpath
if fields['parent']:
fields['parent'] = os.path.join(
'columns', classpath(load_sumlevels),
sumlevels_list[fields['parent'] - 1]['fields']['slug'])
sumlevels[fields['summary_level']] = fields
return sumlevels
SUMLEVELS = load_sumlevels()
SUMLEVELS_BY_SLUG = dict([(v['slug'], v) for k, v in SUMLEVELS.iteritems()])
|
nilq/baby-python
|
python
|
from collections import OrderedDict
from algorithms.RNN import RNNModel
from algorithms.AR import AutoRegressive
from algorithms.LSTM import LSTMModel
from algorithms import LSTNet, Optim
import torch
p = 5
def get_models_optimizers(node_list, algs, cuda, lr, hidden_dim, layer_dim, nonlinearity, Data):
models, quantile_models, optimizers = OrderedDict(), OrderedDict(), OrderedDict()
quantile_optimizers, combined_optimizers = OrderedDict(), OrderedDict()
for name in node_list:
model_dict = {'rnn': [RNNModel(input_dim=1, hidden_dim=hidden_dim, layer_dim=layer_dim, quantiles=[0.5],
nonlinearity=nonlinearity),
RNNModel(input_dim=1, hidden_dim=hidden_dim, layer_dim=layer_dim, quantiles=[0.05, 0.95],
nonlinearity=nonlinearity)],
'lstm': [LSTMModel(input_dim=1, hidden_dim=hidden_dim, layer_dim=layer_dim, quantiles=[0.5]),
LSTMModel(input_dim=1, hidden_dim=hidden_dim, layer_dim=layer_dim, quantiles=[0.05, 0.95])],
'ar': [AutoRegressive(quantiles=[0.5], p=p), AutoRegressive(quantiles=[0.05, 0.95], p=p)],
'LSTNet': [LSTNet.Model(Data, method='sharq', quantiles=[0.5]),
LSTNet.Model(Data, method='sharq', quantiles=[0.05, 0.95])]}
model, quantile_model = model_dict[algs][0], model_dict[algs][1]
if cuda:
models[name], quantile_models[name] = model.cuda(), quantile_model.cuda()
else:
models[name], quantile_models[name] = model, quantile_model
optimizer_dict = {'rnn': [torch.optim.SGD(models[name].parameters(), lr=lr),
torch.optim.SGD(quantile_models[name].parameters(), lr=lr),
torch.optim.Adam(quantile_models[name].parameters(), lr=lr)],
'lstm': [torch.optim.SGD(models[name].parameters(), lr=lr),
torch.optim.SGD(quantile_models[name].parameters(), lr=lr),
torch.optim.Adam(quantile_models[name].parameters(), lr=lr)],
'ar': [torch.optim.SGD(models[name].parameters(), lr=lr),
torch.optim.SGD(quantile_models[name].parameters(), lr=lr),
torch.optim.Adam(quantile_models[name].parameters(), lr=lr)],
'LSTNet': [torch.optim.Adam(models[name].parameters(), lr=lr),
torch.optim.Adam(quantile_models[name].parameters(), lr=lr),
torch.optim.Adam(quantile_models[name].parameters(), lr=lr)]}
optimizers[name] = optimizer_dict[algs][0]
quantile_optimizers[name] = optimizer_dict[algs][1]
combined_optimizers[name] = optimizer_dict[algs][2]
return models, quantile_models, optimizers, quantile_optimizers, combined_optimizers, p
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import logging
import sys
from .ToolChainExplorer import ToolChainExplorer
class ToolChainExplorerDFS(ToolChainExplorer):
def __init__(
self,
simgr,
max_length,
exp_dir,
nameFileShort,
worker,
):
super(ToolChainExplorerDFS, self).__init__(
simgr,
max_length,
exp_dir,
nameFileShort,
worker
)
self.log = logging.getLogger("ToolChainExplorerDFS")
self.log.setLevel("INFO")
def __take_longuest(self, simgr, source_stash):
"""
Take a state of source_stash with longuest amount of steps and append it to active stash
@pre : source_stash exists
"""
id_to_move = 0
max_step = 0
if len(simgr.stashes[source_stash]) > 0:
id_to_move = simgr.stashes[source_stash][0].globals["id"]
max_step = simgr.stashes[source_stash][0].globals["n_steps"]
else:
return
for s in simgr.stashes[source_stash]:
if s.globals["n_steps"] > max_step:
id_to_move = s.globals["id"]
max_step = s.globals["n_steps"]
simgr.move(source_stash, "active", lambda s: s.globals["id"] == id_to_move)
def step(self, simgr, stash="active", **kwargs):
try:
simgr = simgr.step(stash=stash, **kwargs)
except Exception as inst:
self.log.warning("ERROR IN STEP() - YOU ARE NOT SUPPOSED TO BE THERE !")
# self.log.warning(type(inst)) # the exception instance
self.log.warning(inst) # __str__ allows args to be printed directly,
exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.log.warning(exc_type, exc_obj)
exit(-1)
super().build_snapshot(simgr)
if self.print_sm_step and (
len(self.fork_stack) > 0 or len(simgr.deadended) > self.deadended
):
self.log.info(
"A new block of execution have been executed with changes in sim_manager."
)
self.log.info("Currently, simulation manager is :\n" + str(simgr))
self.log.info("pause stash len :" + str(len(self.pause_stash)))
if self.print_sm_step and len(self.fork_stack) > 0:
self.log.info("fork_stack : " + str(len(self.fork_stack)))
# if self.print_sm_step:
# self.log.info("len(self.loopBreak_stack) : " + str(len(self.loopBreak_stack)))
# self.log.info("state.globals['n_steps'] : " + str(state.globals['n_steps']))
# self.log.warning("STEP")
# We detect fork for a state
super().manage_fork(simgr)
# Remove state which performed more jump than the limit allowed
super().remove_exceeded_jump(simgr)
# Manage ended state
super().manage_deadended(simgr)
super().mv_bad_active(simgr)
# import pdb; pdb.set_trace()
# If limit of simultaneous state is not reached and we have some states available in pause stash
if len(simgr.stashes["pause"]) > 0 and len(simgr.active) < self.max_simul_state:
moves = min(
self.max_simul_state - len(simgr.active),
len(simgr.stashes["pause"]),
)
for m in range(moves):
self.__take_longuest(simgr, "pause")
super().manage_pause(simgr)
super().drop_excessed_loop(simgr)
# If states end with errors, it is often worth investigating. Set DEBUG_ERROR to live debug
# TODO : add a log file if debug error is not activated
super().manage_error(simgr)
super().manage_unconstrained(simgr)
for vis in simgr.active:
self.dict_addr_vis[
str(super().check_constraint(vis, vis.history.jump_target))
] = 1
super().excessed_step_to_active(simgr)
super().excessed_loop_to_active(simgr)
super().time_evaluation(simgr)
return simgr
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: signer.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0csigner.proto\x12\x07signrpc\"3\n\nKeyLocator\x12\x12\n\nkey_family\x18\x01 \x01(\x05\x12\x11\n\tkey_index\x18\x02 \x01(\x05\"L\n\rKeyDescriptor\x12\x15\n\rraw_key_bytes\x18\x01 \x01(\x0c\x12$\n\x07key_loc\x18\x02 \x01(\x0b\x32\x13.signrpc.KeyLocator\")\n\x05TxOut\x12\r\n\x05value\x18\x01 \x01(\x03\x12\x11\n\tpk_script\x18\x02 \x01(\x0c\"\xc4\x01\n\x0eSignDescriptor\x12(\n\x08key_desc\x18\x01 \x01(\x0b\x32\x16.signrpc.KeyDescriptor\x12\x14\n\x0csingle_tweak\x18\x02 \x01(\x0c\x12\x14\n\x0c\x64ouble_tweak\x18\x03 \x01(\x0c\x12\x16\n\x0ewitness_script\x18\x04 \x01(\x0c\x12\x1e\n\x06output\x18\x05 \x01(\x0b\x32\x0e.signrpc.TxOut\x12\x0f\n\x07sighash\x18\x07 \x01(\r\x12\x13\n\x0binput_index\x18\x08 \x01(\x05\"L\n\x07SignReq\x12\x14\n\x0craw_tx_bytes\x18\x01 \x01(\x0c\x12+\n\nsign_descs\x18\x02 \x03(\x0b\x32\x17.signrpc.SignDescriptor\"\x1c\n\x08SignResp\x12\x10\n\x08raw_sigs\x18\x01 \x03(\x0c\"2\n\x0bInputScript\x12\x0f\n\x07witness\x18\x01 \x03(\x0c\x12\x12\n\nsig_script\x18\x02 \x01(\x0c\">\n\x0fInputScriptResp\x12+\n\rinput_scripts\x18\x01 \x03(\x0b\x32\x14.signrpc.InputScript\"m\n\x0eSignMessageReq\x12\x0b\n\x03msg\x18\x01 \x01(\x0c\x12$\n\x07key_loc\x18\x02 \x01(\x0b\x32\x13.signrpc.KeyLocator\x12\x13\n\x0b\x64ouble_hash\x18\x03 \x01(\x08\x12\x13\n\x0b\x63ompact_sig\x18\x04 \x01(\x08\"$\n\x0fSignMessageResp\x12\x11\n\tsignature\x18\x01 \x01(\x0c\"B\n\x10VerifyMessageReq\x12\x0b\n\x03msg\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\x12\x0e\n\x06pubkey\x18\x03 \x01(\x0c\"\"\n\x11VerifyMessageResp\x12\r\n\x05valid\x18\x01 \x01(\x08\"\x80\x01\n\x10SharedKeyRequest\x12\x18\n\x10\x65phemeral_pubkey\x18\x01 \x01(\x0c\x12(\n\x07key_loc\x18\x02 \x01(\x0b\x32\x13.signrpc.KeyLocatorB\x02\x18\x01\x12(\n\x08key_desc\x18\x03 \x01(\x0b\x32\x16.signrpc.KeyDescriptor\"\'\n\x11SharedKeyResponse\x12\x12\n\nshared_key\x18\x01 \x01(\x0c\x32\xd4\x02\n\x06Signer\x12\x34\n\rSignOutputRaw\x12\x10.signrpc.SignReq\x1a\x11.signrpc.SignResp\x12@\n\x12\x43omputeInputScript\x12\x10.signrpc.SignReq\x1a\x18.signrpc.InputScriptResp\x12@\n\x0bSignMessage\x12\x17.signrpc.SignMessageReq\x1a\x18.signrpc.SignMessageResp\x12\x46\n\rVerifyMessage\x12\x19.signrpc.VerifyMessageReq\x1a\x1a.signrpc.VerifyMessageResp\x12H\n\x0f\x44\x65riveSharedKey\x12\x19.signrpc.SharedKeyRequest\x1a\x1a.signrpc.SharedKeyResponseB/Z-github.com/lightningnetwork/lnd/lnrpc/signrpcb\x06proto3')
_KEYLOCATOR = DESCRIPTOR.message_types_by_name['KeyLocator']
_KEYDESCRIPTOR = DESCRIPTOR.message_types_by_name['KeyDescriptor']
_TXOUT = DESCRIPTOR.message_types_by_name['TxOut']
_SIGNDESCRIPTOR = DESCRIPTOR.message_types_by_name['SignDescriptor']
_SIGNREQ = DESCRIPTOR.message_types_by_name['SignReq']
_SIGNRESP = DESCRIPTOR.message_types_by_name['SignResp']
_INPUTSCRIPT = DESCRIPTOR.message_types_by_name['InputScript']
_INPUTSCRIPTRESP = DESCRIPTOR.message_types_by_name['InputScriptResp']
_SIGNMESSAGEREQ = DESCRIPTOR.message_types_by_name['SignMessageReq']
_SIGNMESSAGERESP = DESCRIPTOR.message_types_by_name['SignMessageResp']
_VERIFYMESSAGEREQ = DESCRIPTOR.message_types_by_name['VerifyMessageReq']
_VERIFYMESSAGERESP = DESCRIPTOR.message_types_by_name['VerifyMessageResp']
_SHAREDKEYREQUEST = DESCRIPTOR.message_types_by_name['SharedKeyRequest']
_SHAREDKEYRESPONSE = DESCRIPTOR.message_types_by_name['SharedKeyResponse']
KeyLocator = _reflection.GeneratedProtocolMessageType('KeyLocator', (_message.Message,), {
'DESCRIPTOR' : _KEYLOCATOR,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.KeyLocator)
})
_sym_db.RegisterMessage(KeyLocator)
KeyDescriptor = _reflection.GeneratedProtocolMessageType('KeyDescriptor', (_message.Message,), {
'DESCRIPTOR' : _KEYDESCRIPTOR,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.KeyDescriptor)
})
_sym_db.RegisterMessage(KeyDescriptor)
TxOut = _reflection.GeneratedProtocolMessageType('TxOut', (_message.Message,), {
'DESCRIPTOR' : _TXOUT,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.TxOut)
})
_sym_db.RegisterMessage(TxOut)
SignDescriptor = _reflection.GeneratedProtocolMessageType('SignDescriptor', (_message.Message,), {
'DESCRIPTOR' : _SIGNDESCRIPTOR,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SignDescriptor)
})
_sym_db.RegisterMessage(SignDescriptor)
SignReq = _reflection.GeneratedProtocolMessageType('SignReq', (_message.Message,), {
'DESCRIPTOR' : _SIGNREQ,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SignReq)
})
_sym_db.RegisterMessage(SignReq)
SignResp = _reflection.GeneratedProtocolMessageType('SignResp', (_message.Message,), {
'DESCRIPTOR' : _SIGNRESP,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SignResp)
})
_sym_db.RegisterMessage(SignResp)
InputScript = _reflection.GeneratedProtocolMessageType('InputScript', (_message.Message,), {
'DESCRIPTOR' : _INPUTSCRIPT,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.InputScript)
})
_sym_db.RegisterMessage(InputScript)
InputScriptResp = _reflection.GeneratedProtocolMessageType('InputScriptResp', (_message.Message,), {
'DESCRIPTOR' : _INPUTSCRIPTRESP,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.InputScriptResp)
})
_sym_db.RegisterMessage(InputScriptResp)
SignMessageReq = _reflection.GeneratedProtocolMessageType('SignMessageReq', (_message.Message,), {
'DESCRIPTOR' : _SIGNMESSAGEREQ,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SignMessageReq)
})
_sym_db.RegisterMessage(SignMessageReq)
SignMessageResp = _reflection.GeneratedProtocolMessageType('SignMessageResp', (_message.Message,), {
'DESCRIPTOR' : _SIGNMESSAGERESP,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SignMessageResp)
})
_sym_db.RegisterMessage(SignMessageResp)
VerifyMessageReq = _reflection.GeneratedProtocolMessageType('VerifyMessageReq', (_message.Message,), {
'DESCRIPTOR' : _VERIFYMESSAGEREQ,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.VerifyMessageReq)
})
_sym_db.RegisterMessage(VerifyMessageReq)
VerifyMessageResp = _reflection.GeneratedProtocolMessageType('VerifyMessageResp', (_message.Message,), {
'DESCRIPTOR' : _VERIFYMESSAGERESP,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.VerifyMessageResp)
})
_sym_db.RegisterMessage(VerifyMessageResp)
SharedKeyRequest = _reflection.GeneratedProtocolMessageType('SharedKeyRequest', (_message.Message,), {
'DESCRIPTOR' : _SHAREDKEYREQUEST,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SharedKeyRequest)
})
_sym_db.RegisterMessage(SharedKeyRequest)
SharedKeyResponse = _reflection.GeneratedProtocolMessageType('SharedKeyResponse', (_message.Message,), {
'DESCRIPTOR' : _SHAREDKEYRESPONSE,
'__module__' : 'signer_pb2'
# @@protoc_insertion_point(class_scope:signrpc.SharedKeyResponse)
})
_sym_db.RegisterMessage(SharedKeyResponse)
_SIGNER = DESCRIPTOR.services_by_name['Signer']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'Z-github.com/lightningnetwork/lnd/lnrpc/signrpc'
_SHAREDKEYREQUEST.fields_by_name['key_loc']._options = None
_SHAREDKEYREQUEST.fields_by_name['key_loc']._serialized_options = b'\030\001'
_KEYLOCATOR._serialized_start=25
_KEYLOCATOR._serialized_end=76
_KEYDESCRIPTOR._serialized_start=78
_KEYDESCRIPTOR._serialized_end=154
_TXOUT._serialized_start=156
_TXOUT._serialized_end=197
_SIGNDESCRIPTOR._serialized_start=200
_SIGNDESCRIPTOR._serialized_end=396
_SIGNREQ._serialized_start=398
_SIGNREQ._serialized_end=474
_SIGNRESP._serialized_start=476
_SIGNRESP._serialized_end=504
_INPUTSCRIPT._serialized_start=506
_INPUTSCRIPT._serialized_end=556
_INPUTSCRIPTRESP._serialized_start=558
_INPUTSCRIPTRESP._serialized_end=620
_SIGNMESSAGEREQ._serialized_start=622
_SIGNMESSAGEREQ._serialized_end=731
_SIGNMESSAGERESP._serialized_start=733
_SIGNMESSAGERESP._serialized_end=769
_VERIFYMESSAGEREQ._serialized_start=771
_VERIFYMESSAGEREQ._serialized_end=837
_VERIFYMESSAGERESP._serialized_start=839
_VERIFYMESSAGERESP._serialized_end=873
_SHAREDKEYREQUEST._serialized_start=876
_SHAREDKEYREQUEST._serialized_end=1004
_SHAREDKEYRESPONSE._serialized_start=1006
_SHAREDKEYRESPONSE._serialized_end=1045
_SIGNER._serialized_start=1048
_SIGNER._serialized_end=1388
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
import sym.models
import sym.trainer
import sym.datasets
import sym.config
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#
# Copyright (C) 2016 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# SPDX-License-Identifier: GPL-2.0+
#
import os
import struct
import sys
import tempfile
import command
import tools
def fdt32_to_cpu(val):
"""Convert a device tree cell to an integer
Args:
Value to convert (4-character string representing the cell value)
Return:
A native-endian integer value
"""
if sys.version_info > (3, 0):
if isinstance(val, bytes):
val = val.decode('utf-8')
val = val.encode('raw_unicode_escape')
return struct.unpack('>I', val)[0]
def EnsureCompiled(fname):
"""Compile an fdt .dts source file into a .dtb binary blob if needed.
Args:
fname: Filename (if .dts it will be compiled). It not it will be
left alone
Returns:
Filename of resulting .dtb file
"""
_, ext = os.path.splitext(fname)
if ext != '.dts':
return fname
dts_input = tools.GetOutputFilename('source.dts')
dtb_output = tools.GetOutputFilename('source.dtb')
search_paths = [os.path.join(os.getcwd(), 'include')]
root, _ = os.path.splitext(fname)
args = ['-E', '-P', '-x', 'assembler-with-cpp', '-D__ASSEMBLY__']
args += ['-Ulinux']
for path in search_paths:
args.extend(['-I', path])
args += ['-o', dts_input, fname]
command.Run('cc', *args)
# If we don't have a directory, put it in the tools tempdir
search_list = []
for path in search_paths:
search_list.extend(['-i', path])
args = ['-I', 'dts', '-o', dtb_output, '-O', 'dtb']
args.extend(search_list)
args.append(dts_input)
command.Run('dtc', *args)
return dtb_output
def GetInt(node, propname, default=None):
prop = node.props.get(propname)
if not prop:
return default
value = fdt32_to_cpu(prop.value)
if type(value) == type(list):
raise ValueError("Node '%s' property '%' has list value: expecting"
"a single integer" % (node.name, propname))
return value
def GetString(node, propname, default=None):
prop = node.props.get(propname)
if not prop:
return default
value = prop.value
if type(value) == type(list):
raise ValueError("Node '%s' property '%' has list value: expecting"
"a single string" % (node.name, propname))
return value
def GetBool(node, propname, default=False):
if propname in node.props:
return True
return default
|
nilq/baby-python
|
python
|
import inspect
import typing
try:
from contextlib import (
AsyncExitStack,
asynccontextmanager,
AbstractAsyncContextManager,
)
except ImportError: # pragma: no cover
AbstractAsyncContextManager = None # type: ignore
from async_generator import asynccontextmanager # type: ignore
from async_exit_stack import AsyncExitStack # type: ignore
def is_async_context_manager(obj: typing.Any) -> bool:
if AbstractAsyncContextManager is None: # pragma: no cover
return (
not inspect.isclass(obj)
and hasattr(obj, "__aenter__")
and hasattr(obj, "__aexit__")
)
return isinstance(obj, AbstractAsyncContextManager)
class asyncnullcontext:
async def __aenter__(self) -> None:
pass
async def __aexit__(self, *args: typing.Any) -> None:
pass
|
nilq/baby-python
|
python
|
from oeis import phi
def test_phi():
assert [phi(x) for x in range (1, 10)] == [1, 1, 2, 2, 4, 2, 6, 4, 6]
|
nilq/baby-python
|
python
|
from datetime import datetime
import json
import glob
import os
from pathlib import Path
from multiprocessing.pool import ThreadPool
from typing import Dict
import numpy as np
import pandas as pd
from scipy.stats.mstats import gmean
import torch
from torch import nn
from torch.utils.data import DataLoader
ON_KAGGLE: bool = 'KAGGLE_WORKING_DIR' in os.environ
def gmean_df(df: pd.DataFrame) -> pd.DataFrame:
return df.groupby(level=0).agg(lambda x: gmean(list(x)))
def mean_df(df: pd.DataFrame) -> pd.DataFrame:
return df.groupby(level=0).mean()
def load_model(model: nn.Module, path: Path) -> Dict:
state = torch.load(str(path))
model.load_state_dict(state['model'])
print('Loaded model from epoch {epoch}, step {step:,}'.format(**state))
return state
class ThreadingDataLoader(DataLoader):
def __iter__(self):
sample_iter = iter(self.batch_sampler)
if self.num_workers == 0:
for indices in sample_iter:
yield self.collate_fn([self._get_item(i) for i in indices])
else:
prefetch = 1
with ThreadPool(processes=self.num_workers) as pool:
futures = []
for indices in sample_iter:
futures.append([pool.apply_async(self._get_item, args=(i,))
for i in indices])
if len(futures) > prefetch:
yield self.collate_fn([f.get() for f in futures.pop(0)])
# items = pool.map(lambda i: self.dataset[i], indices)
# yield self.collate_fn(items)
for batch_futures in futures:
yield self.collate_fn([f.get() for f in batch_futures])
def _get_item(self, i):
return self.dataset[i]
def write_event(log, step: int, **data):
data['step'] = step
data['dt'] = datetime.now().isoformat()
log.write(json.dumps(data, sort_keys=True))
log.write('\n')
log.flush()
def _smooth(ys, indices):
return [np.mean(ys[idx: indices[i + 1]])
for i, idx in enumerate(indices[:-1])]
import random
import math
from PIL import Image
import torchvision.transforms as transforms
from torchvision.transforms import (
ToTensor, Normalize, Compose, Resize, CenterCrop, RandomCrop,
RandomHorizontalFlip)
class RandomSizedCrop:
def __init__(self, size, interpolation=Image.BILINEAR,min_aspect=4/5, max_aspect=5/4,min_area=0.25, max_area=1):
self.size = size
self.interpolation = interpolation
self.min_aspect = min_aspect
self.max_aspect = max_aspect
self.min_area = min_area
self.max_area = max_area
def __call__(self, img):
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(self.min_area, self.max_area) * area
aspect_ratio = random.uniform(self.min_aspect, self.max_aspect)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert(img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
scale = Resize(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize(320),
transforms.ColorJitter(),
RandomSizedCrop(224),
])
test_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize(320),
RandomCrop(224),
])
tensor_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
from pathlib import Path
from typing import Callable, List
import cv2
import pandas as pd
from pathlib import Path
from typing import Callable, List
import cv2
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import Dataset
N_CLASSES = 1103
DATA_ROOT = Path('../input/imet-2019-fgvc6' if ON_KAGGLE else '/nfsshare/home/white-hearted-orange/data')
class TrainDataset(Dataset):
def __init__(self, root: Path, df: pd.DataFrame,count: pd.DataFrame,thres,
image_transform: Callable, debug: bool = True):
super().__init__()
self._root = root
self._df = df
self._image_transform = image_transform
self._debug = debug
self.index = np.where(count['count'] < thres)
def __len__(self):
return len(self._df)
def __getitem__(self, idx: int):
item = self._df.iloc[idx]
image = load_transform_image(
item, self._root, self._image_transform, debug=self._debug)
target = torch.zeros(N_CLASSES)
for cls in item.attribute_ids.split():
target[int(cls)] = 1
target[self.index] = 0
return image, target
class TTADataset:
def __init__(self, root: Path, df: pd.DataFrame,
image_transform: Callable, tta: int):
self._root = root
self._df = df
self._image_transform = image_transform
self._tta = tta
def __len__(self):
return len(self._df) * self._tta
def __getitem__(self, idx):
item = self._df.iloc[idx % len(self._df)]
image = load_transform_image(item, self._root, self._image_transform)
return image, item.id
def load_transform_image(
item, root: Path, image_transform: Callable, debug: bool = False):
image = load_image(item, root)
image = image_transform(image)
if debug:
image.save('_debug.png')
return tensor_transform(image)
def load_image(item, root: Path) -> Image.Image:
image = cv2.imread(str(root / f'{item.id}.png'))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return Image.fromarray(image)
def get_ids(root: Path) -> List[str]:
return sorted({p.name.split('_')[0] for p in root.glob('*.png')})
import argparse
from collections import defaultdict, Counter
import random
import pandas as pd
import tqdm
def make_folds(n_folds: int) -> pd.DataFrame:
df = pd.read_csv(DATA_ROOT / 'train.csv')
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split()
for cls in classes)
fold_cls_counts = defaultdict(int)
folds = [-1] * len(df)
for item in tqdm.tqdm(df.sample(frac=1, random_state=42).itertuples(),
total=len(df)):
cls = min(item.attribute_ids.split(), key=lambda cls: cls_counts[cls])
fold_counts = [(f, fold_cls_counts[f, cls]) for f in range(n_folds)]
min_count = min([count for _, count in fold_counts])
random.seed(item.Index)
fold = random.choice([f for f, count in fold_counts
if count == min_count])
folds[item.Index] = fold
for cls in item.attribute_ids.split():
fold_cls_counts[fold, cls] += 1
df['fold'] = folds
return df
####################################model#################################
"""
ResNet code gently borrowed from
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
from collections import OrderedDict
import math
import torch.nn as nn
from torch.utils import model_zoo
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class Bottleneck(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
"""
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
"""
Bottleneck for SENet154.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes * 2)
self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3,
stride=stride, padding=1, groups=groups,
bias=False)
self.bn2 = nn.BatchNorm2d(planes * 4)
self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False,
stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1,
groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(Bottleneck):
"""
ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = math.floor(planes * (base_width / 64)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False,
stride=1)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SENet(nn.Module):
def __init__(self, block, layers, groups, reduction, dropout_p=0.2,
inplanes=128, input_3x3=True, downsample_kernel_size=3,
downsample_padding=1, num_classes=1000):
"""
Parameters
----------
block (nn.Module): Bottleneck class.
- For SENet154: SEBottleneck
- For SE-ResNet models: SEResNetBottleneck
- For SE-ResNeXt models: SEResNeXtBottleneck
layers (list of ints): Number of residual blocks for 4 layers of the
network (layer1...layer4).
groups (int): Number of groups for the 3x3 convolution in each
bottleneck block.
- For SENet154: 64
- For SE-ResNet models: 1
- For SE-ResNeXt models: 32
reduction (int): Reduction ratio for Squeeze-and-Excitation modules.
- For all models: 16
dropout_p (float or None): Drop probability for the Dropout layer.
If `None` the Dropout layer is not used.
- For SENet154: 0.2
- For SE-ResNet models: None
- For SE-ResNeXt models: None
inplanes (int): Number of input channels for layer1.
- For SENet154: 128
- For SE-ResNet models: 64
- For SE-ResNeXt models: 64
input_3x3 (bool): If `True`, use three 3x3 convolutions instead of
a single 7x7 convolution in layer0.
- For SENet154: True
- For SE-ResNet models: False
- For SE-ResNeXt models: False
downsample_kernel_size (int): Kernel size for downsampling convolutions
in layer2, layer3 and layer4.
- For SENet154: 3
- For SE-ResNet models: 1
- For SE-ResNeXt models: 1
downsample_padding (int): Padding for downsampling convolutions in
layer2, layer3 and layer4.
- For SENet154: 1
- For SE-ResNet models: 0
- For SE-ResNeXt models: 0
num_classes (int): Number of outputs in `last_linear` layer.
- For all models: 1000
"""
super(SENet, self).__init__()
self.inplanes = inplanes
if input_3x3:
layer0_modules = [
('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1,
bias=False)),
('bn1', nn.BatchNorm2d(64)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,
bias=False)),
('bn2', nn.BatchNorm2d(64)),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,
bias=False)),
('bn3', nn.BatchNorm2d(inplanes)),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2,
padding=3, bias=False)),
('bn1', nn.BatchNorm2d(inplanes)),
('relu1', nn.ReLU(inplace=True)),
]
# To preserve compatibility with Caffe weights `ceil_mode=True`
# is used instead of `padding=1`.
layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2,
ceil_mode=True)))
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
self.layer1 = self._make_layer(
block,
planes=64,
blocks=layers[0],
groups=groups,
reduction=reduction,
downsample_kernel_size=1,
downsample_padding=0
)
self.layer2 = self._make_layer(
block,
planes=128,
blocks=layers[1],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.layer3 = self._make_layer(
block,
planes=256,
blocks=layers[2],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.layer4 = self._make_layer(
block,
planes=512,
blocks=layers[3],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.avg_pool = nn.AvgPool2d(7, stride=1)
self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None
self.last_linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,
downsample_kernel_size=1, downsample_padding=0):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=downsample_kernel_size, stride=stride,
padding=downsample_padding, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, groups, reduction, stride,
downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups, reduction))
return nn.Sequential(*layers)
def features(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, x):
x = self.avg_pool(x)
if self.dropout is not None:
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, x):
x = self.features(x)
x = self.logits(x)
return x
def initialize_pretrained_model(model, num_classes, settings):
assert num_classes == settings['num_classes'], \
'num_classes should be {}, but is {}'.format(
settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
def se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'):
model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['se_resnext50_32x4d'][pretrained]
initialize_pretrained_model(model, num_classes, settings)
return model
def se_resnext101_32x4d(num_classes=1000, pretrained=None):
model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16,
dropout_p=None, inplanes=64, input_3x3=False,
downsample_kernel_size=1, downsample_padding=0,
num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['se_resnext101_32x4d'][pretrained]
initialize_pretrained_model(model, num_classes, settings)
return model
########################main.py########################################################
import argparse
from itertools import islice
import json
from pathlib import Path
import shutil
import warnings
from typing import Dict
import numpy as np
import pandas as pd
from sklearn.metrics import fbeta_score
from sklearn.exceptions import UndefinedMetricWarning
import torch
from torch import nn, cuda
from torch.optim import Adam, SGD,lr_scheduler
import tqdm
def predict(model, root: Path, df: pd.DataFrame, out_path: Path,
batch_size: int, tta: int, workers: int, use_cuda: bool):
loader = DataLoader(
dataset=TTADataset(root, df, test_transform, tta=tta),
shuffle=False,
batch_size=batch_size,
num_workers=workers,
)
model.eval()
all_outputs, all_ids = [], []
with torch.no_grad():
for inputs, ids in tqdm.tqdm(loader, desc='Predict'):
if use_cuda:
inputs = inputs.cuda()
outputs = torch.sigmoid(model(inputs))
all_outputs.append(outputs.data.cpu().numpy())
all_ids.extend(ids)
df = pd.DataFrame(
data=np.concatenate(all_outputs),
index=all_ids,
columns=map(str, range(N_CLASSES)))
df = mean_df(df)
df.to_hdf(out_path, 'prob', index_label='id')
print(f'Saved predictions to {out_path}')
def train(args, model: nn.Module, criterion, *, params,folds, count,
init_optimizer, use_cuda,
n_epochs=None, patience=2, max_lr_changes=2) -> bool:
lr = args.lr
n_epochs = n_epochs or args.n_epochs
params = list(params)
optimizer = init_optimizer(params, lr)
run_root = Path(args.run_root)
model_path = run_root / 'model.pt'
best_model_path = run_root / 'best-model.pt'
if best_model_path.exists():
state = load_model(model, best_model_path)
epoch = state['epoch']
step = state['step']
best_valid_loss = state['best_valid_loss']
else:
epoch = 1
step = 0
best_valid_loss = float('inf')
lr_changes = 0
save = lambda ep: torch.save({
'model': model.state_dict(),
'epoch': ep,
'step': step,
'best_valid_loss': best_valid_loss
}, str(model_path))
report_each = 10
log = run_root.joinpath('train.log').open('at', encoding='utf8')
valid_losses = []
lr_reset_epoch = epoch
### doing cv
train_fold = folds[folds['fold'] != 0]
valid_fold = folds[folds['fold'] == 0]
def make_loader(df: pd.DataFrame, image_transform,count,thres) -> DataLoader:
return DataLoader(
TrainDataset(train_root, df, count,thres,image_transform, debug=args.debug),
shuffle=True,
batch_size=args.batch_size,
num_workers=args.workers,
)
if args.limit:
train_loader = make_loader(train_fold[:args.limit], train_transform,count,args.count)
valid_loader = make_loader(valid_fold[:args.limit], test_transform,count,0)
else:
train_loader = make_loader(train_fold, train_transform,count,args.count)
valid_loader = make_loader(valid_fold, test_transform,count,0)
##############
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max = 20)
for epoch in range(epoch, n_epochs + 1):
scheduler.step()
model.train()
losses = []
tq = tqdm.tqdm(total=(len(train_loader) * args.batch_size))
tq.set_description(f'Epoch {epoch}, lr {lr}')
tl = train_loader
try:
mean_loss = 0
for i, (inputs, targets) in enumerate(tl):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# C is the number of classes.
batch_size = inputs.size(0)
#smoothed_labels =0.9*targets + 0.1*(torch.ones((batch_size,N_CLASSES)).cuda()-targets)
#smoothed_labels = smoothed_labels.cuda()
outputs = model(inputs)
loss = _reduce_loss(criterion(outputs, targets))
(batch_size * loss).backward()
if (i + 1) % args.step == 0:
optimizer.step()
optimizer.zero_grad()
step += 1
tq.update(batch_size)
losses.append(loss.item())
mean_loss = np.mean(losses[-report_each:])
tq.set_postfix(loss=f'{mean_loss:.3f}')
if i and i % report_each == 0:
write_event(log, step, loss=mean_loss)
write_event(log, step, loss=mean_loss)
tq.close()
save(epoch + 1)
valid_metrics = validation(model, criterion, valid_loader, use_cuda)
write_event(log, step, **valid_metrics)
valid_loss = valid_metrics['valid_loss']
valid_losses.append(valid_loss)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
shutil.copy(str(model_path), str(best_model_path))
elif (patience and epoch - lr_reset_epoch > patience and
min(valid_losses[-patience:]) > best_valid_loss):
# "patience" epochs without improvement
lr_changes +=1
if lr_changes > max_lr_changes:
break
lr *= 0.8
print(f'lr updated to {lr}')
lr_reset_epoch = epoch
optimizer = init_optimizer(params, lr)
except KeyboardInterrupt:
tq.close()
print('Ctrl+C, saving snapshot')
save(epoch)
print('done.')
return False
return True
def validation(
model: nn.Module, criterion, valid_loader, use_cuda,
) -> Dict[str, float]:
model.eval()
all_losses, all_predictions, all_targets = [], [], []
with torch.no_grad():
for inputs, targets in valid_loader:
all_targets.append(targets.numpy().copy())
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
loss = criterion(outputs, targets)
all_losses.append(_reduce_loss(loss).item())
predictions = torch.sigmoid(outputs)
all_predictions.append(predictions.cpu().numpy())
all_predictions = np.concatenate(all_predictions)
all_targets = np.concatenate(all_targets)
def get_score(y_pred):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UndefinedMetricWarning)
return fbeta_score(
all_targets, y_pred, beta=2, average='samples')
metrics = {}
argsorted = all_predictions.argsort(axis=1)
for threshold in [0.05,0.10, 0.15, 0.20]:
metrics[f'valid_f2_th_{threshold:.2f}'] = get_score(
binarize_prediction(all_predictions, threshold, argsorted))
metrics['valid_loss'] = np.mean(all_losses)
print(' | '.join(f'{k} {v:.3f}' for k, v in sorted(
metrics.items(), key=lambda kv: -kv[1])))
return metrics
def binarize_prediction(probabilities, threshold: float, argsorted=None,
min_labels=1, max_labels=10):
""" Return matrix of 0/1 predictions, same shape as probabilities.
"""
assert probabilities.shape[1] == N_CLASSES
if argsorted is None:
argsorted = probabilities.argsort(axis=1)
max_mask = _make_mask(argsorted, max_labels)
min_mask = _make_mask(argsorted, min_labels)
prob_mask = probabilities > threshold
return (max_mask & prob_mask) | min_mask
def _make_mask(argsorted, top_n: int):
mask = np.zeros_like(argsorted, dtype=np.uint8)
col_indices = argsorted[:, -top_n:].reshape(-1)
row_indices = [i // top_n for i in range(len(col_indices))]
mask[row_indices, col_indices] = 1
return mask
def _reduce_loss(loss):
return loss.sum() / loss.shape[0]
class arg():
def __init__(self):
self.run_root = 'model2'
self.batch_size = 64
self.step = 1
self.workers = 2
self.lr = 0.0001
self.patience = 2
self.clean = 0
self.n_epochs = 25
self.tta = 4
self.debug = 'store_true'
self.pretrained = 0
self.threshold = 0.1
self.folds = 5
self.limit = 0
self.count = 0
args = arg()
run_root = Path(args.run_root)
folds = make_folds(n_folds = args.folds)
train_root = DATA_ROOT / 'train'
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, logits = True, reduction=False):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduction
def forward(self, inputs, targets):
#print(inputs.size(),targets.size())
if self.logits:
BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets,reduction='none')
else:
BCE_loss = F.binary_cross_entropy(inputs, targets, reduction = 'none')
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
Sim = torch.load(DATA_ROOT/'Sim.pt')
Sim = Sim*torch.FloatTensor((Sim>0.5).numpy())
Sim = Sim.cuda()
class SimilarityLoss1(nn.Module):
def __init__(self, sim):
'''
sim : N_class*N_class
'''
super(SimilarityLoss1, self).__init__()
self.sim = sim
def forward(self,input,target):
Smatrix = torch.matmul(target, self.sim) + 1
#print(Smatrix)
P = torch.exp(input)
loss = -(Smatrix*target*(input-torch.log(P+1))+(1-target)*(-torch.log(1+P)))
return loss
class FocalSimilarityLoss1(nn.Module):
def __init__(self, sim, gamma=2):
'''
sim : N_class*N_class
'''
super(FocalSimilarityLoss1, self).__init__()
self.sim = sim
self.gamma = gamma
def forward(self,input,target):
Smatrix = torch.matmul(target, self.sim) + 1
P = torch.exp(input)
loss = -(Smatrix*target*(input-torch.log(P+1))+(1-target)*(-torch.log(1+P)))
pt = torch.exp(-loss)
F_loss = (1-pt)**self.gamma * loss
return F_loss
#criterion = FocalSimilarityLoss1(sim = Sim)
criterion = SimilarityLoss1(sim = Sim)
#criterion = FocalLoss()
class AvgPool(nn.Module):
def forward(self, x):
return F.avg_pool2d(x, x.shape[2:])
class Net(nn.Module):
def __init__(self, num_classes, dropout=True):
super().__init__()
self.net = se_resnext101_32x4d()
self.net.load_state_dict(torch.load(DATA_ROOT/'se_resnext101_32x4d-3b2fe3d8.pth'))
self.net.avg_pool = nn.AdaptiveAvgPool2d(1)
#self.net = nn.Sequential(*list(model0.children())[0])
# print(self.net.output)
if dropout:
# model.add_module('fc', torch.nn.Linear(4096, out_num))
self.net.last_linear = nn.Sequential(
nn.Dropout(),
nn.Linear(self.net.last_linear.in_features, num_classes)
)
else:
self.net.last_linear = nn.Linear(self.net.last_linear.in_features, num_classes)
#self.finetune()
def forward(self, x):
return self.net(x)
def finetune(self):
for para in list(self.net.parameters())[:-2]:
para.requires_grad=False
model = Net(N_CLASSES)
use_cuda = cuda.is_available()
print(use_cuda)
#fresh_params = list(model.fresh_params())
all_params = list(model.parameters())
if use_cuda:
model = model.cuda()
if run_root.exists() and args.clean:
shutil.rmtree(run_root)
run_root.mkdir(exist_ok=True, parents=True)
(run_root / 'params.json').write_text(
json.dumps(vars(args), indent=4, sort_keys=True))
from collections import Counter
def get_count():
df = pd.read_csv('../input/imet-2019-fgvc6/train.csv' if ON_KAGGLE else '/nfsshare/home/white-hearted-orange/data/train.csv')
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split() for cls in classes)
stat = cls_counts.most_common()
stat1 = pd.DataFrame(stat)
stat1.columns=('attribute_id','count')
stat1['attribute_id'].astype('int')
return stat1
count = get_count()
train_kwargs = dict(
args= args,
model = model,
folds = folds,
count = count,
criterion=criterion,
patience=args.patience,
init_optimizer=lambda params, lr: Adam(params, lr),
use_cuda=use_cuda,
)
train(params=all_params, **train_kwargs)
load_model(model, run_root / 'best-model.pt')
predict_kwargs = dict(
batch_size=args.batch_size,
tta=args.tta,
use_cuda=use_cuda,
workers=args.workers,
)
test_root = DATA_ROOT / ('test')
ss = pd.read_csv(DATA_ROOT / 'sample_submission.csv')
predict(model, df=ss, root=test_root,
out_path=run_root / 'test.h5',
**predict_kwargs)
def get_classes(item):
return ' '.join(cls for cls, is_present in item.items() if is_present)
sample_submission = pd.read_csv(
DATA_ROOT / 'sample_submission.csv', index_col='id')
df = pd.read_hdf(run_root / 'test.h5', index_col='id')
df = df.reindex(sample_submission.index)
df = mean_df(df)
df[:] = binarize_prediction(df.values, threshold=args.threshold)
df = df.apply(get_classes, axis=1)
df.name = 'attribute_ids'
df.to_csv('submission.csv', header=True)
|
nilq/baby-python
|
python
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from platformio import fs
from platformio.package.exception import PackageException
from platformio.package.meta import PackageItem, PackageSpec
class PackageManagerSymlinkMixin(object):
@staticmethod
def is_symlink(path):
return path and path.endswith(".pio-link") and os.path.isfile(path)
@classmethod
def resolve_symlink(cls, path):
assert cls.is_symlink(path)
data = fs.load_json(path)
spec = PackageSpec(**data["spec"])
assert spec.symlink
pkg_dir = spec.uri[10:]
if not os.path.isabs(pkg_dir):
pkg_dir = os.path.normpath(os.path.join(data["cwd"], pkg_dir))
return (pkg_dir if os.path.isdir(pkg_dir) else None, spec)
def get_symlinked_package(self, path):
pkg_dir, spec = self.resolve_symlink(path)
if not pkg_dir:
return None
pkg = PackageItem(os.path.realpath(pkg_dir))
if not pkg.metadata:
pkg.metadata = self.build_metadata(pkg.path, spec)
return pkg
def install_symlink(self, spec):
assert spec.symlink
pkg_dir = spec.uri[10:]
if not os.path.isdir(pkg_dir):
raise PackageException(
f"Can not create a symbolic link for `{pkg_dir}`, not a directory"
)
link_path = os.path.join(
self.package_dir,
"%s.pio-link" % (spec.name or os.path.basename(os.path.abspath(pkg_dir))),
)
with open(link_path, mode="w", encoding="utf-8") as fp:
json.dump(dict(cwd=os.getcwd(), spec=spec.as_dict()), fp)
return self.get_symlinked_package(link_path)
def uninstall_symlink(self, spec):
assert spec.symlink
for name in os.listdir(self.package_dir):
path = os.path.join(self.package_dir, name)
if not self.is_symlink(path):
continue
pkg = self.get_symlinked_package(path)
if pkg.metadata.spec.uri == spec.uri:
os.remove(path)
|
nilq/baby-python
|
python
|
from ebonite.core.objects.requirements import InstallableRequirement, Requirements, resolve_requirements
def test_resolve_requirements_arg():
requirements = Requirements([InstallableRequirement('dumb', '0.4.1'), InstallableRequirement('art', '4.0')])
actual_reqs = resolve_requirements(requirements)
assert actual_reqs == requirements
def test_resolve_requirement_arg():
req = InstallableRequirement('dumb', '0.4.1')
actual_reqs = resolve_requirements(req)
assert actual_reqs.installable[0] == req
def test_resolve_requirement_list_arg():
req = [InstallableRequirement('dumb', '0.4.1'), InstallableRequirement('art', '4.0')]
actual_reqs = resolve_requirements(req)
assert len(actual_reqs.installable) == 2
assert actual_reqs.installable == req
def test_resolve_str_arg():
req = "dumb==0.4.1"
actual_reqs = resolve_requirements(req)
assert actual_reqs.installable[0].to_str() == req
def test_resolve_str_list_arg():
req = ["dumb==0.4.1", "art==4.0"]
actual_reqs = resolve_requirements(req)
assert len(actual_reqs.installable) == 2
assert req == [r.to_str() for r in actual_reqs.installable]
|
nilq/baby-python
|
python
|
"""
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import pytest
from unittest import mock
from pytube import YouTube
from pytube.exceptions import LiveStreamError
from pytube.exceptions import RecordingUnavailable
from pytube.exceptions import RegexMatchError
from pytube.exceptions import VideoUnavailable
from pytube.exceptions import VideoPrivate
def test_video_unavailable():
try:
raise VideoUnavailable(video_id="YLnZklYFe7E")
except VideoUnavailable as e:
assert e.video_id == "YLnZklYFe7E" # noqa: PT017
assert str(e) == "YLnZklYFe7E is unavailable"
def test_regex_match_error():
try:
raise RegexMatchError(caller="hello", pattern="*")
except RegexMatchError as e:
assert str(e) == "hello: could not find match for *"
def test_live_stream_error():
try:
raise LiveStreamError(video_id="YLnZklYFe7E")
except LiveStreamError as e:
assert e.video_id == "YLnZklYFe7E" # noqa: PT017
assert str(e) == "YLnZklYFe7E is streaming live and cannot be loaded"
def test_recording_unavailable():
try:
raise RecordingUnavailable(video_id="5YceQ8YqYMc")
except RecordingUnavailable as e:
assert e.video_id == "5YceQ8YqYMc" # noqa: PT017
assert str(e) == "5YceQ8YqYMc does not have a live stream recording available"
def test_private_error():
try:
raise VideoPrivate("mRe-514tGMg")
except VideoPrivate as e:
assert e.video_id == "mRe-514tGMg" # noqa: PT017
assert str(e) == "mRe-514tGMg is a private video"
def test_raises_video_private(private):
with mock.patch("pytube.request.urlopen") as mock_url_open:
# Mock the responses to YouTube
mock_url_open_object = mock.Mock()
mock_url_open_object.read.side_effect = [
private["watch_html"].encode("utf-8"),
]
mock_url_open.return_value = mock_url_open_object
with pytest.raises(VideoPrivate):
YouTube("https://youtube.com/watch?v=mRe-514tGMg")
def test_raises_recording_unavailable(missing_recording):
with mock.patch("pytube.request.urlopen") as mock_url_open:
# Mock the responses to YouTube
mock_url_open_object = mock.Mock()
mock_url_open_object.read.side_effect = [
missing_recording["watch_html"].encode("utf-8"),
]
mock_url_open.return_value = mock_url_open_object
with pytest.raises(RecordingUnavailable):
YouTube("https://youtube.com/watch?v=5YceQ8YqYMc")
|
nilq/baby-python
|
python
|
import sys
import numpy as np
raw = sys.stdin.read()
locs = np.fromstring(raw, dtype=np.int64, sep=',')
average = np.average(locs)
def forLocation(locs, dest):
absolute = np.abs(locs - dest)
return ((absolute + 1) * absolute // 2).sum()
print('Result:', min(
forLocation(locs, int(np.ceil(average))),
forLocation(locs, int(np.floor(average)))
))
|
nilq/baby-python
|
python
|
from pycantonese import stop_words
_DEFAULT_STOP_WORDS = stop_words()
def test_stop_words():
_stop_words = stop_words()
assert "唔" in _stop_words
def test_stop_words_add_one_word():
_stop_words = stop_words(add="foobar")
assert "foobar" in _stop_words
assert len(_stop_words) - len(_DEFAULT_STOP_WORDS) == 1
def test_stop_words_remove_one_word():
_stop_words = stop_words(remove="唔")
assert "唔" not in _stop_words
assert len(_DEFAULT_STOP_WORDS) - len(_stop_words) == 1
def test_stop_words_add_multiple_words():
_stop_words = stop_words(add=["foo", "bar", "baz"])
assert {"foo", "bar", "baz"}.issubset(_stop_words)
assert len(_stop_words) - len(_DEFAULT_STOP_WORDS) == 3
def test_stop_words_remove_multiple_words():
_stop_words = stop_words(remove=["唔", "乜嘢", "其他"])
assert not {"唔", "乜嘢", "其他"}.issubset(_stop_words)
assert len(_DEFAULT_STOP_WORDS) - len(_stop_words) == 3
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from __future__ import print_function
import roslib
import rospy
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2 as cv
print(cv.__version__)
class image_converter:
def __init__(self):
self.image_pub = rospy.Publisher("image_bgr8",Image)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/xtion/color/image_raw",Image,self.callback)
def callback(self,data):
try:
img = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
(rows,cols,channels) = img.shape
if cols > 60 and rows > 60 :
cv.circle(img, (50,50), 10, 0,0,255)
blue = np.mat(img[:, :, 0])
green = np.mat(img[:, :, 1])
red = np.mat(img[:, :, 2])
blue_only = np.int16(blue) - np.int16(red) - np.int16(green)
blue_only[blue_only < 0] = 0
blue_only[blue_only >= 255] = 255
blue_only = np.uint8(blue_only)
kernel = np.ones((5, 5), np.uint8)
imgCanny = cv.Canny(blue_only, 100, 150) # edge detection
imgDilation = cv.dilate(imgCanny, kernel, iterations=1)
imgEroded = cv.erode(imgDilation, kernel, iterations=1)
im2,contours, hierarchy = cv.findContours(imgEroded, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cnt = max(contours, key=len)
x, y, w, h = cv.boundingRect(cnt)
rect = cv.minAreaRect(cnt)
box = cv.boxPoints(rect)
box = np.int0(box)
ang = rect[2]
ang = np.abs(ang)
print("ang_before", ang)
leftmost = tuple(cnt[cnt[:, :, 0].argmin()][0])
rightmost = tuple(cnt[cnt[:, :, 0].argmax()][0])
topmost = tuple(cnt[cnt[:, :, 1].argmin()][0])
bottommost = tuple(cnt[cnt[:, :, 1].argmax()][0])
if ang < 8 or ang > 82:
if w > h:
x1 = x
y1 = y + np.uint8(h / 2)
x2 = x + w
y2 = y1
print("horizontal")
else:
y1 = y
x1 = x + np.uint8(w / 2)
x2 = x1
y2 = y + h
print("vertical")
else:
if ang > 10 or ang < 80:
if rightmost[1] - leftmost[1] >= 20:
x1 = np.int0((leftmost[0] + topmost[0]) / 2)
y1 = np.int0((leftmost[1] + topmost[1]) / 2)
x2 = np.int0((rightmost[0] + bottommost[0]) / 2)
y2 = np.int0((rightmost[1] + bottommost[1]) / 2)
print("left up")
else:
if rightmost[0] > bottommost[0]:
x2 = np.int0((rightmost[0] + bottommost[0]) / 2)
y2 = np.int0((rightmost[1] + bottommost[1]) / 2)
x1 = np.int0((leftmost[0] + topmost[0]) / 2)
y1 = np.int0((leftmost[1] + topmost[1]) / 2)
print("right up 1")
else:
x1 = np.int0((rightmost[0] + topmost[0]) / 2)
y1 = np.int0((rightmost[1] + topmost[1]) / 2)
x2 = np.int0((leftmost[0] + bottommost[0]) / 2)
y2 = np.int0((leftmost[1] + bottommost[1]) / 2)
print("right up 2")
else:
if w > h:
x1 = x
y1 = y + np.uint8(h / 2)
x2 = x + w
y2 = y1
print("horizontal 2")
else:
y1 = y
x1 = x + np.uint8(w / 2)
x2 = x1
y2 = y + h
print("vertical 2")
print("ang", ang)
print("leftmost:", leftmost, "rightmost:", rightmost, "topmost:", topmost, "bottommost:", bottommost, "\n")
print("x1, y1",x1,y1 ,"x2,y2", x2,y2)
print("box", box)
# cv.drawContours(imgEroded, [box], 0, (255, 0, 255), 2)
# print("x:", x, "y:", y, "w:", w, "h:", h, "\n")
#
#
cv.circle(imgEroded, (x1, y1), 10, (255, 0, 0), 2)
cv.circle(imgEroded, (x2, y2), 10, (255, 0, 0), 2)
# cv.drawContours(img, contours, 0, (255, 255, 0), 5)
cv.drawContours(img, cnt, -1, (0, 255, 255), 5)
# cv.imshow("Orig", img)
cv.imshow("Eroded Image", imgEroded)
cv.waitKey(1)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(img, "bgr8"))
except CvBridgeError as e:
print(e)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
nilq/baby-python
|
python
|
import numpy as np
from whole_body_mpc_msgs.msg import StateFeedbackGain
import copy
class StateFeedbackGainInterface():
def __init__(self, nx, nu, frame_id="world"):
self._msg = StateFeedbackGain()
self._msg.header.frame_id = frame_id
self._msg.nx = nx
self._msg.nu = nu
self._msg.data = [None] * (nx * nu)
self._K = np.zeros([nu, nx])
def writeToMessage(self, K):
if K.shape[0] is not self._msg.nu:
print("Couldn't convert the state feedback gain into a message since nu is not consistent")
return
if K.shape[1] is not self._msg.nx:
print("Couldn't convert the state feedback gain into a message since nx is not consistent")
return
for i in range(self._msg.nu):
for j in range(self._msg.nx):
self._msg.data[i * self._msg.nx + j] = K[i, j]
return copy.deepcopy(self._msg)
def writeFromMessage(self, msg):
if msg.nu is not self._K.shape[0]:
print("Couldn't convert the message into a state feedback gain into since nu is not consistent")
return
if msg.nx is not self._K.shape[1]:
print("Couldn't convert the message into a state feedback gain since nx is not consistent")
return
for i in range(msg.nu):
for j in range(msg.nx):
self._K[i, j] = msg.data[i * msg.nx + j]
return copy.deepcopy(self._K)
|
nilq/baby-python
|
python
|
""" This script runs every 10 seconds and assigns users to a new batch of tasks filtered by the specified column.
Notes:
1. Don't forget to enable Manual mode in Annotation settings
2. Be careful when adding email users: users who are not members of the project or workspace will break Data Manager
Install:
git clone https://github.com/heartexlabs/label-studio-sdk.git
cd label-studio-sdk
pip install -e .
python examples/label_studio_enterprise/assigner.py
Demo video:
https://www.youtube.com/watch?v=IeqrsCYYQ8k
"""
import time
import math
import label_studio_sdk
from label_studio_sdk.data_manager import Filters, Column, Operator, Type
class BatchAssigner:
def __init__(self, host, api_key, project_id):
self.ls = label_studio_sdk.Client(url=host, api_key=api_key)
self.project = self.ls.get_project(id=project_id)
def get_tasks(self, filter_column, filter_value, page, page_size):
""" Get tasks with filter by column and page number
"""
filters = Filters.create(Filters.OR, [
Filters.item(
Column.data(filter_column),
Operator.EQUAL,
Type.String,
Filters.value(filter_value)
)
])
return self.project.get_paginated_tasks(filters=filters, page=page, page_size=page_size, only_ids=True)
def get_page_total(self, filter_column, filter_value, page_size):
""" Total page number for tasks with filter by column and specified page size
"""
result = self.get_tasks(filter_column, filter_value, 1, page_size)
return math.ceil(result['total'] / float(page_size))
def get_user_ids(self, emails):
""" Get user IDs by email and preserve the order
:param emails: list of strings with email addresses
:return: user IDs in the same order as email addresses
"""
# get all users
user_ids = []
users = self.ls.get_users()
for email in emails:
for user in users:
if email == user.email:
print(user.email, '=>', user.id)
user_ids.append(user.id)
break
return user_ids
def assign_users_to_tasks(self,
user_ids,
filter_column='organization',
filter_value='name',
page=1,
page_size=100):
""" Assign annotators to filter by specified column and paginated tasks
:param user_ids: list of user email addresses
:param filter_column: str with data column name from Data Manager
:param filter_value: str with data value to filter as equal
:param page: current page
:param page_size: task number
:return: True if success else False or exception
"""
result = self.get_tasks(filter_column, filter_value, page, page_size)
task_ids = result['tasks']
if not task_ids:
print(f'No tasks found')
return False
# call assign API
body = {
"type": "AN",
"users": user_ids,
"selectedItems": {
"all": False,
"included": task_ids
}
}
self.ls.make_request('post', f'/api/projects/{self.project.id}/tasks/assignees', json=body)
print(f'Users {user_ids} were assigned to {len(task_ids)} tasks '
f'from id={task_ids[0]} to id={task_ids[-1]}')
return True
def start():
host = 'http://localhost:8000'
api_key = 'e0b7751e84a059b0accaf14392e5e9fd4abe3de7'
project_id = 182
filter_column = 'shortname'
filter_value = 'opossum'
page_size = 10
emails = ['makseq@gmail.com', 'test@test.ru']
assigner = BatchAssigner(host, api_key, project_id)
# Be careful when using email users:
# users who are not members of the project or workspace will break Data Manager
user_ids = assigner.get_user_ids(emails=emails)
page_total = assigner.get_page_total(filter_column, filter_value, page_size)
print(f'Total pages for {filter_column}={filter_value} => {page_total}')
for current_page in range(1, page_total+1):
assigner.assign_users_to_tasks(
filter_column=filter_column,
filter_value=filter_value,
user_ids=user_ids,
page=current_page,
page_size=page_size
)
time.sleep(10)
if __name__ == '__main__':
start()
|
nilq/baby-python
|
python
|
import time
import requests
from requests.exceptions import HTTPError, Timeout
from bs4 import BeautifulSoup
from core.log_manager import logger
class Updates:
MAX_LENGTH = 25 # Maximum amount of numbers that a version can support
TIME_INTERVAL = 48 # In hours
def __init__(self, link, local_version):
self.raw_local_version = str(local_version)
self.url = link if link[-1] == "/" else link + "/" # IMPORTANT: the url must contain a slash at the end
def get_remote_version(self):
"""Gets the last version of the remote AutomatiK repository."""
try:
req = requests.get(self.url) # Gets the HTML code from the web page
except (HTTPError, Timeout, requests.exceptions.ConnectionError):
logger.error("Version request to GitHub failed")
return False
soup = BeautifulSoup(req.content, "html.parser")
try:
remote_version = soup.find("span", # Type of container
{"class": "css-truncate-target"}, # Additional attrs
recursive=True).text # Parameters of the search
except AttributeError:
logger.error("Version parsing from GitHub failed")
return False
return remote_version
def convert(self, raw_remote_version):
"""Converts the complex syntax of a version to an integer."""
if not raw_remote_version:
return False
local_version = "".join([x for x in self.raw_local_version if x.isdigit()])
local_version += "0" * (Updates.MAX_LENGTH - len(local_version))
remote_version = "".join([x for x in raw_remote_version if x.isdigit()])
remote_version += "0" * (Updates.MAX_LENGTH - len(remote_version))
# If the number of 25 digits of the remote version is higher, then It is a newer one
if int(remote_version) > int(local_version):
logger.info(f"New update ({raw_remote_version}) available at {self.url + raw_remote_version}")
return {"remote": int(remote_version), "local": int(local_version)}
def start_checking(self):
"""Starts looking for new version every X hours."""
while True:
self.convert(self.get_remote_version())
time.sleep(Updates.TIME_INTERVAL * 3600)
|
nilq/baby-python
|
python
|
import numpy
from typing import List
from skipi.function import Function
class AverageFunction(Function):
@classmethod
def from_functions(cls, functions: List[Function], domain=None):
r"""
Returns the average function based on the functions given as a list F = [f_1, ..., f_n]
::math..
f_avg(x) = 1/n * (f_1(x) + \ldots + f_n(x))
where f_i is an element of F
:param functions: List of functions to average
:return:
"""
n = len(functions)
if n == 0:
raise RuntimeError("Cannot average functions if no function was given")
if n == 1:
return functions[0]
if domain is None:
domain = functions[0].get_domain()
# sum of axis=0, since x might be a vector containing multiple evaluation points
return cls(domain, lambda x: numpy.sum([f(x) for f in functions], axis=0) / n)
class ComputeAverage(Function):
@classmethod
def from_functions(cls, functions: [Function], domain=None, avg_fun=None):
if domain is None:
domain = functions[0].get_domain()
if avg_fun is None:
avg_fun = cls.avg
return Function.to_function(domain, lambda x: avg_fun([f(x) for f in functions]))
@staticmethod
def avg(numbers):
numbers = numpy.array(numbers)
return numpy.average(numbers.real) + 1j * numpy.average(numbers.imag)
class DrawFromFunction(Function):
@classmethod
def from_function(cls, function: Function):
dy = function.dy
if dy is None:
return function
value = numpy.random.normal(function.eval().real, dy.eval().real)
if function.is_complex():
value = value + 1j * numpy.random.normal(function.eval().imag, dy.eval().imag)
return Function.to_function(function.get_dom(), value)
class ComputeStandardDeviation(Function):
@classmethod
def from_functions(cls, functions: [Function], domain=None, std_fun=None):
"""
Computes the standard deviation (pointwise) using all functions
If domain is None, the domain from the first function will be used
If std_fun is None, the "complex" standard deviation will be used, see the method cstd.
:param functions: A list of functions from which the std should be calculated
:param domain: A domain
:param std_fun: A function calculating the std
:return: new Function
"""
if domain is None:
domain = functions[0].get_domain()
if std_fun is None:
std_fun = cls.cstd
return Function.to_function(domain, lambda x: std_fun([f(x) for f in functions]))
@staticmethod
def cstd(complexs):
"""
Calculates the standard deviation of a complex number by splitting it into the real and imaginary
part, resulting in a complex standard deviation:
cstd(complex) = std(complex.real) + 1j*std(complex.imag).
:param complexs:
:return:
"""
complexs = numpy.array(complexs)
return numpy.std(complexs.real) + 1j * numpy.std(complexs.imag)
class MaxOfFunctions(Function):
@classmethod
def from_functions(cls, functions: [Function]):
return Function.to_function(functions[0].get_dom(), lambda x: numpy.max([f(x) for f in functions]))
class MinOfFunctions(Function):
@classmethod
def from_functions(cls, functions: [Function]):
return Function.to_function(functions[0].get_dom(), lambda x: numpy.min([f(x) for f in functions]))
|
nilq/baby-python
|
python
|
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
def getBooks(soup):
siteURL = 'http://www.thelatinlibrary.com'
textsURL = []
# get links to books in the collection
for a in soup.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, a['href']))
# remove unnecessary URLs
while ("http://www.thelatinlibrary.com//index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com//index.html")
textsURL.remove("http://www.thelatinlibrary.com//classics.html")
textsURL.remove("http://www.thelatinlibrary.com//christian")
logger.info("\n".join(textsURL))
return textsURL
def getSermons(soup):
siteURL = 'http://www.thelatinlibrary.com/augustine'
textsURL = []
# get links to books in the collection
for a in soup.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, a['href']))
# remove unnecessary URLs
while ("http://www.thelatinlibrary.com/augustine//index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com/augustine//index.html")
textsURL.remove("http://www.thelatinlibrary.com/augustine//classics.html")
textsURL.remove("http://www.thelatinlibrary.com/augustine//christian")
textsURL.remove("http://www.thelatinlibrary.com/augustine//august.html")
logger.info("\n".join(textsURL))
return textsURL
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/august.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.strip()
colltitle = "AUGUSTINE OF HIPPO"
date = collSOUP.span.string.strip().replace('(', '').replace(')', '').replace(u"\u2013", '-')
textsURL = getBooks(collSOUP)
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Augustine'")
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
if url.startswith("http://www.thelatinlibrary.com/augustine/iulianus1.shtml"):
title = "CONTRA SECUNDAM IULIANI RESPONSIONEM LIBER PRIMUS"
elif url.startswith("http://www.thelatinlibrary.com/augustine/iulianus2.shtml"):
title = "CONTRA SECUNDAM IULIANI RESPONSIONEM LIBER SECUNDUS"
else:
try:
title = textsoup.title.string.split(':')[1].strip()
except:
try:
title = textsoup.title.string.split(',')[1].strip()
except:
title = textsoup.find('p', class_='pagehead').string.strip()
print(title)
if title.startswith("Confessions"):
getp = textsoup.find_all('p')
chapter = 0
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
if re.match("[0-9]+", pstring):
if " " in pstring:
heading = pstring.split(" ")[0]
pstring = pstring.split(" ")[1]
chapter = heading.split(".")[1].strip()
verse = heading.split(".")[2].strip()
else:
chapter = pstring.split(".")[1].strip()
verse = pstring.split(".")[2].strip()
continue
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v.startswith('commentary'):
# ignore an english note in there
continue
if v is None or v == '' or v.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("SANCTI AUGUSTINI EPISTULA"):
getp = textsoup.find_all('p')
chapter = 0
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("De Civitate Dei"):
getp = textsoup.find_all('p')
chapter = 0
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
if re.match("\[", pstring):
# this is a chapter heading
chapter = pstring.split("]")[0].replace("[", "").strip()
verse = 0
pstring = pstring.split("]")[1].strip()
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("de Trinitate"):
getp = textsoup.find_all('p')
chapter = "PROLOGUS"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
if p.find('b') is not None:
continue
# these headings are handled elsewhere
if re.match("\[", pstring):
# this is a heading
heading = pstring.split("]")[0].replace("[", "").strip()
if re.match("[IVXL]+", heading):
# this is a chapter and verse heading
try:
chapter = re.split(" ", heading)[0].strip()
verse = re.split(" ", heading)[1].strip()
except:
verse = heading
else:
verse = heading
pstring = pstring.split("]")[1].strip()
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("CONTRA SECUNDAM IULIANI RESPONSIONEM"):
getp = textsoup.find_all('p')
chapter = "PRAEFATIO"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
# does this leave numbers in the text from footnote links?
pstring = pstring.strip()
if p.find('br') is not None:
# skip footnotes - not sure about this?
continue
# used bolded headings as chapters
# left numbers in text
# can be changed if neccesary
if p.find('b') is not None:
if pstring.startswith("PRAEFATIO") or pstring.startswith("LIBER"):
continue
# these headings are handled elsewhere
else:
chapter = pstring
verse = 0
continue
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("de Dialectica"):
getp = textsoup.find_all('p')
chapter = 0
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
if re.match("[IVXL]+", pstring):
# this is a chapter heading
chapter = pstring.split(".")[0].strip()
verse = 0
pstring = pstring.split(".")[1].strip()
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
elif title.startswith("de Fide"):
# verses are split across chapter headings, so they get double entered
# e.g. there are two verse 21s, one in Caput IX and one in Caput X
getp = textsoup.find_all('p')
chapter = "-1"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
pstring = p.get_text()
pstring = pstring.strip()
if p.find('b') is not None:
chapter = pstring
continue
lines = re.split("([0-9]+\.)", pstring)
for l in lines:
if re.match("[0-9]", l):
verse += 1
continue
if l.startswith('Augustine'):
continue
if l is None or l == '' or l.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, l.strip(), url, 'prose'))
elif title.startswith("de Catechizandis"):
getp = textsoup.find_all('p')
chapter = "-1"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
pstring = p.get_text()
pstring = pstring.strip()
if p.find('b') is not None:
chapter = p.find('b').string.strip()
pstring = pstring.replace(chapter, "").strip()
lines = re.split("([0-9]+\.)", pstring)
for l in lines:
if re.match("[0-9]", l):
verse += 1
continue
if l.startswith('Augustine'):
continue
if l is None or l == '' or l.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, l.strip(), url, 'prose'))
elif title.startswith("REGULA SANCTI AUGUSTINI"):
getp = textsoup.find_all('p')
chapter = "-1"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
pstring = p.get_text()
pstring = pstring.strip()
if p.find('b') is not None:
chapter = pstring
continue
lines = re.split("([0-9]+\.)", pstring)
for l in lines:
if re.match("[0-9]", l):
verse += 1
continue
if l.startswith('Augustine'):
continue
if l is None or l == '' or l.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, l.strip(), url, 'prose'))
else:
sermons = getSermons(textsoup)
# these are the Sermons, which have their own page of links
for s in sermons:
sermonurl = urllib.request.urlopen(s)
sermonsoup = BeautifulSoup(sermonurl, 'html5lib')
title = sermonsoup.title.string.split(':')[1].strip()
print(title)
getp = sermonsoup.find_all('p')
chapter = "-1"
verse = 0
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation', 'citation']: # these are not part of the main t
continue
except:
pass
verses = []
pstring = p.get_text()
pstring = pstring.strip()
verses.append(pstring)
for v in verses:
if v.startswith('Augustine'):
continue
if v is None or v == '' or v.isspace():
continue
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import scipy.integrate as scin
import numpy as np
import matplotlib.pyplot as pl
g=9.80665
Cd=0.2028
m=80
ics = ([0,0])
t = np.linspace(0,100,500) #creates an array t, integration range from 0 and inclusive of 100 since its linspace, increment of 500
def deriv(x,t):
F = np.zeros(2) #creates an array F, with length 2 thats filled with 0's
F[0]=x[1] #dy/dt = y'
F[1]=g-((Cd/m)*((x[1])**2)) #d2y/dt2 = g-((cd/m) * (y'^2))
return F
sol_1 = scin.odeint(deriv,ics,t) #odeint outputs array --> [y solution, dy/dt solution]
x_1 = sol_1[:,0] #(every value in) the y solution column
y_1 = sol_1[:,1] #(every value in) the dy/dt solution column
print sol_1
#print x_1
#print y_1
pl.figure(1)
pl.plot(t,x_1,'r-')
pl.xlabel('Time(s)')
pl.ylabel('y')
pl.show()
pl.figure(2)
pl.plot(t,y_1,'r-')
pl.xlabel('Time(s)')
pl.ylabel('y')
pl.show()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
wakatime.projects.projectmap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use the ~/.wakatime.cfg file to set custom project names by
recursively matching folder paths.
Project maps go under the [projectmap] config section.
For example:
[projectmap]
/home/user/projects/foo = new project name
/home/user/projects/bar = project2
Will result in file `/home/user/projects/foo/src/main.c` to have
project name `new project name`.
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
from .base import BaseProject
log = logging.getLogger(__name__)
# str is unicode in Python3
try:
unicode
except NameError:
unicode = str
class ProjectMap(BaseProject):
def process(self):
if not self._configs:
return False
self.project = self._find_project(self.path)
return self.project is not None
def _find_project(self, path):
path = os.path.realpath(path)
if os.path.isfile(path):
path = os.path.split(path)[0]
if self._configs.get(path.lower()):
return self._configs.get(path.lower())
if self._configs.get('%s/' % path.lower()):
return self._configs.get('%s/' % path.lower())
if self._configs.get('%s\\' % path.lower()):
return self._configs.get('%s\\' % path.lower())
split_path = os.path.split(path)
if split_path[1] == '':
return None
return self._find_project(split_path[0])
def branch(self):
return None
def name(self):
if self.project:
return unicode(self.project)
return None
|
nilq/baby-python
|
python
|
__all__ = ['features','graph_layers']
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.