text string | size int64 | token_count int64 |
|---|---|---|
# -------------------------------------------------------------------------------
# Licence:
# Copyright (c) 2012-2017 Valerio for Gecosistema S.r.l.
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
# Name: gdal_shape.py
# Purpose:
#
# Author: Luzzi Valerio
#
# Created: 26/07/2017
# -------------------------------------------------------------------------------
"""apt-get -y install gdal-bin libgdal-dev python-gdal"""
import ogr
from execution import *
def GetFeatures(fileshp):
"""
GetFeatures
"""
res = []
dataset = ogr.OpenShared(fileshp)
if dataset:
layer = dataset.GetLayer(0)
for feature in layer:
res.append(feature)
dataset = None
return res
def GetFeatureByFid(fileshp, fid):
"""
GetFeatureByFid
"""
feature = None
dataset = ogr.OpenShared(fileshp)
if dataset:
layer = dataset.GetLayer(0)
feature = layer.GetFeature(fid)
dataset = None
return feature
def removeShape(filename):
"""
removeShape
"""
try:
if file(filename):
driver = ogr.GetDriverByName('ESRI Shapefile')
driver.DeleteDataSource(filename)
except Exception, ex:
print ex
return None
def SaveFeature(feature, fileshp=""):
"""
SaveFeature
"""
fileshp = fileshp if fileshp else "%d.shp" % (feature.GetField("OBJECTID"))
driver = ogr.GetDriverByName("ESRI Shapefile")
if os.path.exists(fileshp):
driver.DeleteDataSource(fileshp)
ds = driver.CreateDataSource(fileshp)
geom = feature.GetGeometryRef()
layer = ds.CreateLayer(fileshp, srs=geom.GetSpatialReference(), geom_type=geom.GetGeometryType())
# create a field
# idField = ogr.FieldDefn(fieldName, fieldType)
# layer.CreateField(idField)
# Create the feature and set values
featureDefn = layer.GetLayerDefn()
layer.CreateFeature(feature)
feature = None
ds = None
return fileshp
def Extent2shp(filename, fileout=""):
"""
Extent2shp
"""
fileout = fileout if fileout else forceext(filename, "ext.shp")
layername, (minx, miny, maxx, maxy), proj4, geomtype, dontcare = GDAL_META(filename)
rect = ogr.Geometry(ogr.wkbLinearRing)
rect.AddPoint(minx, miny)
rect.AddPoint(maxx, miny)
rect.AddPoint(maxx, maxy)
rect.AddPoint(minx, maxy)
rect.AddPoint(minx, miny)
# Create polygon
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(rect)
# Save extent to a new Shapefile
driver = ogr.GetDriverByName("ESRI Shapefile")
# Remove output shapefile if it already exists
if os.path.exists(fileout):
driver.DeleteDataSource(fileout)
# Create the output shapefile
ds = driver.CreateDataSource(fileout)
srs = ogr.osr.SpatialReference()
srs.ImportFromProj4(proj4)
strtofile(srs.ExportToWkt(), forceext(fileout, "prj"))
layer = ds.CreateLayer(layername, geom_type=ogr.wkbPolygon)
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetGeometry(poly)
layer.CreateFeature(feature)
feature, ds = None, None
return fileout
def XYZ2Shp(filecsv, t_srs="EPSG:4326", fileout=None):
"""
XYZ2Shp
"""
driver = ogr.GetDriverByName('ESRI Shapefile')
fileout = forceext(filecsv, "shp") if not fileout else fileout
remove(fileout)
layername = juststem(fileout)
print layername
dataset = driver.CreateDataSource(fileout)
# important fix layername!!!!!
layername = layername.encode('utf-8')
# end fix
layer = dataset.CreateLayer(layername, None, ogr.wkbPoint)
layer.CreateField(ogr.FieldDefn("VALUE", ogr.OFTReal))
srs = None
try:
# Set the SpatialReference
srs = ogr.osr.SpatialReference()
epsg = val(t_srs.upper().replace("EPSG:", ""))
srs.ImportFromEPSG(epsg)
strtofile(srs.ExportToWkt(), forceext(fileout, "prj"))
except Exception, ex:
print ex
with open(filecsv, 'rb') as stream:
line = stream.readline()
while line:
arr = line.strip(" \r\n").split(",")
arr = [item for item in arr if len(item) > 0]
if len(arr) >= 3 and val(arr[0]) != None and val(arr[1]) != None and val(arr[2]) != None:
X, Y, value = val(arr[0]), val(arr[1]), val(arr[2])
# print X,Y,value
# X,Y = X*1000,Y*1000 # from km to meters!
feature = ogr.Feature(layer.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt("POINT (%s %s)" % (X, Y))
if srs:
geom.AssignSpatialReference(srs)
feature.SetGeometry(geom)
feature.SetField2("VALUE", value)
# Save the feature on the layer
layer.CreateFeature(feature)
feature.Destroy()
line = stream.readline()
return fileout
# -------------------------------------------------------------------------------
# XYZ2VRT
# -------------------------------------------------------------------------------
def XYZ2VRT(filename, t_srs="EPSG:4326", fileout=None):
"""
XYZ2VRT
"""
fileout = fileout if fileout else forceext(filename, "csv")
filevrt = forceext(fileout, "vrt")
ws = open(fileout, "wb")
# ws.write("""X,Y,Z\n""")
with open(filename, 'rb') as stream:
line = stream.readline()
while line:
line = line.strip(" \r\n")
line = re.sub(r'\s+', ',', line)
if len(line.split(",")) == 5:
arr = line.split(",")
arr = val(arr)
arr[0] *= 1000
arr[1] *= 1000
arr = ["%g" % item for item in arr[:3]]
line = ",".join(arr)
ws.write(line + "\n")
line = stream.readline()
ws.close()
## srs = ogr.osr.SpatialReference()
## epsg= val(t_srs.upper().replace("EPSG:",""))
## srs.ImportFromEPSG(epsg)
env = {"layername": juststem(fileout), "fileout": fileout, "t_srs": t_srs}
text = """<OGRVRTDataSource>
<OGRVRTLayer name="{layername}">
<SrcDataSource>{fileout}</SrcDataSource>
<GeometryType>wkbPoint</GeometryType>
<GeometryField encoding="PointFromColumns" x="field_1" y="field_2" z="field_3"/>
<LayerSRS>{t_srs}</LayerSRS>
</OGRVRTLayer>
</OGRVRTDataSource>"""
text = sformat(text, env)
strtofile(text, filevrt)
# -------------------------------------------------------------------------------
# Main loop
# -------------------------------------------------------------------------------
if __name__ == '__main__':
workdir = r"D:\EUDEM_GECO\Basins\Tevere\PERC"
chdir(workdir)
env = {"Tevere": r"Tevere.perc0.3.tif", "px": 1}
# gdalwarp()
| 7,404 | 2,511 |
import pyglet
import logging
logging.basicConfig(format='%(asctime)s %(module)s %(levelname)s %(message)s', level=logging.DEBUG)
# setup resource paths before importing any game code
pyglet.resource.path = ['data', 'data/tiles']
pyglet.resource.reindex()
from .scenes import SceneManager, GameScene
window = pyglet.window.Window(width=1024, height=768, caption='The Nightmare')
scene_manager = SceneManager(window)
scene_manager.push(GameScene(scene_manager))
clock_display = pyglet.clock.ClockDisplay()
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
@window.event
def on_draw():
window.clear()
try:
scene_manager.on_draw()
except IndexError:
pyglet.app.exit()
pyglet.gl.glLoadIdentity()
clock_display.draw()
def on_update(dt):
try:
scene_manager.on_update(dt)
except IndexError:
pyglet.app.exit()
def main():
pyglet.clock.schedule(on_update)
pyglet.app.run()
| 1,015 | 380 |
import os
import sys
sys.path.insert(1, f'{os.path.dirname(os.getcwd())}\\models\\')
from datetime import datetime
from time import sleep
import pandas as pd
from Mapper import df_ISO3_mapper
def get_fixture_data(url, driver):
# Get Fixture data for gameweeks 1-38
home_teams = []
away_teams = []
date_times = []
gameweeks = []
gw_counter = 0
for i in range(1,39):
gw_counter += 1
week = url+str(i)
driver.get(week)
sleep(1)
game_days = driver.find_elements_by_css_selector('div.sc-bdVaJa.eIzRjw')
for day in game_days:
date = day.find_element_by_tag_name('h4').text
game_day = day.find_element_by_tag_name('ul').text
games = game_day.split('\n')
if ':' in game_day:
# work around to keep loop consistent with game updates
n_games = []
for item in games:
new_items = item.split(':')
for i in new_items:
n_games.append(i)
for i in range(0, len(n_games), 4):
home_teams.append(n_games[i])
away_teams.append(n_games[i+3])
date_time = datetime.strptime(date, '%A %d %B %Y')
date_times.append(date_time)
gameweeks.append(gw_counter)
df = pd.DataFrame({'home_team':home_teams,'away_team':away_teams,'datetime':date_times,'gameweek':gameweeks})
return df[['home_team','away_team','gameweek','datetime']]
def save_csv(data):
path = f'{os.path.dirname(os.getcwd())}\\data\\Fixtures\\fixtures.csv'
data.to_csv(path, index=0, sep=',')
def collect(driver, mapper):
print('Collecting fixtures...')
fixtures_url = 'https://fantasy.premierleague.com/fixtures/'
fixtures = get_fixture_data(fixtures_url, driver)
fixtures = df_ISO3_mapper(fixtures, mapper)
save_csv(fixtures)
| 1,993 | 685 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Output:
def __init__(self, print_function):
"""
@param print_function: Dependency. Inject a function implementing the same interface as
print().
"""
self.__print_function = print_function
self.colored = True
# Contains at any time the whole text that has been echoed by this instance:
self.echoed = ""
def echo(self, text):
self.__print_function(text)
self.echoed += text + "\n"
def red(self, text):
return self.__colored(text, 31)
def green(self, text):
return self.__colored(text, 32)
def blue(self, text):
return self.__colored(text, 34)
def __colored(self, text, color):
"""Returns 'text' with a color, i.e. bash and zsh would print the returned string in the
given color.
Returns 'text' with no color if not self.colored.
"""
text = unicode(text)
return text if not self.colored else "\033[" + unicode(color) + "m" + text + "\033[0m"
| 1,097 | 331 |
# =================================
# STACKEXCHANGE APP SETTINGS
# =================================
CLIENT_ID = "***"
CLIENT_SECRET = "*****"
KEY = "****"
ACCESS_TOKEN = "*****"
# =================================
# STACKEXCHANGE API SETTINGS
# =================================
STACKEXCHANGE_URL = "api.stackexchange.com"
API_VERSION = "2.2"
ANSWERS_URL = "answers"
QUESTIONS_URL = "questions"
COMMENTS_URL = "answers/{answerID}/comments"
SECONDS_TO_SLEEP = 10 | 463 | 168 |
# print(1)
# print(2)
# print(3)
# print(4)
# print(5)
# contador = 1
# print(contador)
# while contador < 1000:
# contador += 1
# print(contador)
# a = list(range(1000))
# print(a)
# for contador in range(1, 1001):
# print(contador)
for i in range(10):
print(11 * i)
| 288 | 138 |
import typing
import functools
class Solution:
def firstDayBeenInAllRooms(
self,
nx: typing.List[int],
) -> int:
mod = 10 ** 9 + 7
@functools.lru_cache(maxsize=None)
def dfs(i: int):
if i == 0: return 0
res = 2 + dfs(i - 1)
if nx[i - 1] != i - 1:
res += dfs(i - 1) - dfs(nx[i - 1])
return res % mod
return dfs(len(nx) - 1)
| 410 | 181 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="ssh_certificate_parser",
version="1.3.3",
author="James Brown",
author_email="jbrown@easypost.com",
url="https://github.com/easypost/ssh_certificate_parser",
license="ISC",
packages=find_packages(exclude=['tests']),
description="Python library for interacting with OpenSSH Certificates",
long_description=open('README.md', 'r').read(),
long_description_content_type='text/markdown',
install_requires=[
'attrs>=16',
],
project_urls={
'Docs': 'https://ssh-certificate-parser.readthedocs.io/',
'Tracker': 'https://github.com/EasyPost/ssh_certificate_parser/issues',
'Source': 'https://github.com/EasyPost/ssh_certificate_parser',
},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: POSIX",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
]
)
| 1,278 | 403 |
'''
This function gives you the possibility to
replace the video audio.
'''
import os
def replace_audio(video_file_path, audio_file_path):
old_filename = video_file_path.rsplit('\\', 1)[-1]
old_extension = os.path.splitext(video_file_path)[1]
new_filename = old_filename.replace(old_extension, '.mp4')
os.system(f'ffmpeg -i {video_file_path} -i {audio_file_path} -map 0:0 -map 1:0 -c:v copy -c:a aac -b:a 256k -shortest assets/videos/{new_filename}')
| 472 | 183 |
from pylab import *
X = linspace(-1, 1, 50)
XX, YY = meshgrid(X, X)
def calcf(XX, YY, mu1):
f1 = 0.5
f2 = 1/(2*sqrt(3)*mu1)
f3 = 1/(2*sqrt(3)*mu1)
f4 = 1/(6*mu1**2)
return f1*0.5 + f2*sqrt(3)*XX/2 + f3*sqrt(3)*YY/2 + 3*XX*YY/2
mu1 = 3.0/5.0
f1 = calcf(XX, YY, mu1)
pcolormesh(XX, YY, transpose(f1))
axis('image')
colorbar()
print("Max: %g. Min = %g" % (f1.max(), f1.min()))
show()
| 405 | 232 |
"""Module for the main CfnCustomResourcesBackedByStepFunctions Stack."""
# Standard library imports
import time
# Related third party imports
# -
# Local application/library specific imports
from aws_cdk import (
core as cdk,
aws_lambda as lambda_,
aws_stepfunctions as sfn,
aws_stepfunctions_tasks as sfn_tasks,
)
class CfnCustomResourcesBackedByStepFunctionsStack(cdk.Stack):
"""The CfnCustomResourcesBackedByStepFunctions Stack."""
def __init__(
self,
scope: cdk.Construct,
construct_id: str,
**kwargs,
) -> None:
"""Construct a new CfnCustomResourcesBackedByStepFunctionsStack."""
super().__init__(scope, construct_id, **kwargs)
# Define the Lambda functions for the state machine
fail_50_percent_lambda = lambda_.Function(
scope=self,
id="Fail50PercentOfUpdates",
code=lambda_.Code.from_asset("lambda/functions/fail_50_percent_of_updates"),
handler="index.lambda_handler",
runtime=lambda_.Runtime.PYTHON_3_9,
)
requests_layer = lambda_.LayerVersion(
scope=self,
id="RequestsLayer",
code=lambda_.Code.from_asset("lambda/layers/requests_layer/python.zip"),
)
update_cfn_lambda = lambda_.Function(
scope=self,
id="UpdateCfnLambda",
code=lambda_.Code.from_asset("lambda/functions/update_cfn_custom_resource"),
handler="index.lambda_handler",
runtime=lambda_.Runtime.PYTHON_3_9,
layers=[requests_layer],
)
# The State Machine looks like this:
# Start
# |
# V
#
# Lambda (fails 50% of the time)
#
# | |
# success \ / catch
# V
#
# Lambda (update CFN)
fail_50_percent_step = sfn_tasks.LambdaInvoke(
scope=self,
id="Lambda (Fail 50%)",
lambda_function=fail_50_percent_lambda,
retry_on_service_exceptions=False,
)
update_cfn_step = sfn_tasks.LambdaInvoke(
scope=self,
id="Update CloudFormation",
lambda_function=update_cfn_lambda,
# We pass both the original execution input AND the lambda execution
# results to the Update CloudFormation Lambda. The function will use
# the Lambda execution results to determine success or failure, and will
# use the original Step Functions Execution Input to fetch the CloudFormation
# callback parameters (ResponseURL, StackId, RequestId and LogicalResourceId).
payload=sfn.TaskInput.from_object(
{
"ExecutionInput": sfn.JsonPath.string_at("$$.Execution.Input"),
"LambdaResults.$": "$",
}
),
)
# Make sure failures are also handled by the update_cfn_step
fail_50_percent_step.add_catch(handler=update_cfn_step, errors=["States.ALL"])
# Create the state machine.
state_machine = sfn.StateMachine(
self,
"StateMachine",
definition=fail_50_percent_step.next(update_cfn_step),
timeout=cdk.Duration.minutes(1),
)
# The Lambda Function backing the custom resource
custom_resource_handler_function = lambda_.Function(
scope=self,
id="CustomResourceHandler",
code=lambda_.Code.from_asset("lambda/functions/custom_resource_handler"),
handler="index.lambda_handler",
runtime=lambda_.Runtime.PYTHON_3_9,
environment={"STATE_MACHINE_ARN": state_machine.state_machine_arn},
)
state_machine.grant_start_execution(custom_resource_handler_function)
# The CFN Custom Resource
cdk.CustomResource(
scope=self,
id="CustomResource",
service_token=custom_resource_handler_function.function_arn,
# Passing the time as a parameter will trigger the custom
# resource with every deployment.
properties={"ExecutionTime": str(time.time())},
)
| 4,312 | 1,145 |
import os
import sys
import django
from channels.routing import get_default_application
# from django.core.asgi import get_asgi_application
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sonsuz.config.settings.local')
# application = get_asgi_application()
# application加入查找路径中
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'sonsuz')) # ../mydjango/mydjango
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
application = get_default_application()
| 590 | 219 |
import os, tempfile
from flask.ext.dotenv import DotEnv
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
@classmethod
def init_app(self, app):
env = DotEnv()
env.init_app(app, verbose_mode=True)
<% if (databaseMapper === 'sqlalchemy') { -%>
if self.__name__ != 'TestingConfig':
prefix = self.__name__.replace('Config', '').upper()
env.alias(maps={
'<%= appEnvVar %>_' + prefix + '_DATABASE_URI': 'SQLALCHEMY_DATABASE_URI'
})
<% } -%>
class ProductionConfig(Config):
pass
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
<% if (databaseMapper === 'sqlalchemy') { -%>
db_file = tempfile.NamedTemporaryFile()
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + db_file.name
SQLALCHEMY_ECHO = True
<% } -%>
config = {
'production': ProductionConfig,
'development': DevelopmentConfig,
'testing': TestingConfig,
'default': ProductionConfig,
}
| 1,113 | 379 |
import random
class Item():
def __init__(self, name, hex):
self.name = name
self.hex = hex
def __str__(self):
return self.name
@staticmethod
def fromBytes(hex):
return Item.reverse[hex]
@staticmethod
def buildReverse():
reverse = {}
Item.members = [attr for attr in dir(Item) if not callable(getattr(Item, attr)) and not attr.startswith("__")]
for member in Item.members:
item = getattr(Item, member)
reverse[item.hex] = item
Item.reverse = reverse
@staticmethod
def rnd():
return getattr(Item, random.choice(Item.members))
Item.NOTHING = Item('Nothing', 0x00)
Item.MASTER_BALL = Item("Master Ball", 0x01)
Item.ULTRA_BALL = Item("Ultra Ball", 0x02)
Item.BRIGHT_POWDER = Item("BrightPowder", 0x03)
Item.GREAT_BALL = Item("Great Ball", 0x04)
Item.POKE_BALL = Item("Poke Ball", 0x05)
Item.BICYCLE = Item("Bicycle", 0x06)
Item.MOON_STONE = Item("Moon Stone", 0x08)
Item.ANTIDOTE = Item("Antidote", 0x09)
Item.BURN_HEAL = Item("Burn Heal", 0x0A)
Item.ICE_HEAL = Item("Ice Heal", 0x0B)
Item.AWAKENING = Item("Awakening", 0x0C)
Item.PARLYZ_HEAL = Item("Parlyz Heal", 0x0D)
Item.FULL_RESTORE = Item("Full Restore", 0x0E)
Item.MAX_POTION = Item("Max Potion", 0x0F)
Item.HYPER_POTION = Item("Hyper Potion", 0x10)
Item.SUPER_POTION = Item("Super Potion", 0x11)
Item.POTION = Item("Potion", 0x12)
Item.ESCAPE_ROPE = Item("Escape Rope", 0x13)
Item.REPEL = Item("Repel", 0x14)
Item.MAX_ELIXER = Item("Max Elixer", 0x15)
Item.FIRE_STONE = Item("Fire Stone", 0x16)
Item.THUNDER_STONE = Item("Thunder Stone", 0x17)
Item.WATER_STONE = Item("Water Stone", 0x18)
Item.HP_UP = Item("HP Up", 0x1A)
Item.PROTEIN = Item("Protein", 0x1B)
Item.IRON = Item("Iron", 0x1C)
Item.CARBOS = Item("Carbos", 0x1D)
Item.LUCKY_PUNCH = Item("Lucky Punch", 0x1E)
Item.CALCIUM = Item("Calcium", 0x1F)
Item.RARE_CANDY = Item("Rare Candy", 0x20)
Item.X_ACCURACY = Item("X Accuracy", 0x21)
Item.LEAF_STONE = Item("Leaf Stone", 0x22)
Item.METAL_POWDER = Item("Metal Powder", 0x23)
Item.NUGGET = Item("Nugget", 0x24)
Item.POKE_DOLL = Item("Poke Doll", 0x25)
Item.FULL_HEAL = Item("Full Heal", 0x26)
Item.REVIVE = Item("Revive", 0x27)
Item.MAX_REVIVE = Item("Max Revive", 0x28)
Item.GUARD_SPEC = Item("Guard Spec.", 0x29)
Item.SUPER_REPEL = Item("Super Repel", 0x2A)
Item.MAX_REPEL = Item("Max Repel", 0x2B)
Item.DIRE_HIT = Item("Dire Hit", 0x2C)
Item.FRESH_WATER = Item("Fresh Water", 0x2E)
Item.SODA_POP = Item("Soda Pop", 0x2F)
Item.LEMONADE = Item("Lemonade", 0x30)
Item.X_ATTACK = Item("X Attack", 0x31)
Item.X_DEFEND = Item("X Defend", 0x33)
Item.X_SPEED = Item("X Speed", 0x34)
Item.X_SPECIAL = Item("X Special", 0x35)
Item.COIN_CASE = Item("Coin Case", 0x36)
Item.ITEMFINDER = Item("Itemfinder", 0x37)
Item.EXP_SHARE = Item("Exp Share", 0x39)
Item.OLD_ROD = Item("Old Rod", 0x3A)
Item.GOOD_ROD = Item("Good Rod", 0x3B)
Item.SILVER_LEAF = Item("Silver Leaf", 0x3C)
Item.SUPER_ROD = Item("Super Rod", 0x3D)
Item.PP_UP = Item("PP Up", 0x3E)
Item.ETHER = Item("Ether", 0x3F)
Item.MAX_ETHER = Item("Max Ether", 0x40)
Item.ELIXER = Item("Elixer", 0x41)
Item.RED_SCALE = Item("Red Scale", 0x42)
Item.SECRET_POTION = Item("SecretPotion", 0x43)
Item.SS_TICKET = Item("S.S. Ticket", 0x44)
Item.MYSTERY_EGG = Item("Mystery Egg", 0x45)
Item.CLEAR_BELL = Item("Clear Bell*", 0x46)
Item.SILVER_WING = Item("Silver Wing", 0x47)
Item.MOOMOO_MILK = Item("Moomoo Milk", 0x48)
Item.QUICK_CLAW = Item("Quick Claw", 0x49)
Item.PSN_CURE_BERRY = Item("PSNCureBerry", 0x4A)
Item.GOLD_LEAF = Item("Gold Leaf", 0x4B)
Item.SOFT_SAND = Item("Soft Sand", 0x4C)
Item.SHARP_BEAK = Item("Sharp Beak", 0x4D)
Item.PRZ_CURE_BERRY = Item("PRZCureBerry", 0x4E)
Item.BURNT_BERRY = Item("Burnt Berry", 0x4F)
Item.ICE_BERRY = Item("Ice Berry", 0x50)
Item.POISON_BARB = Item("Poison Barb", 0x51)
Item.KINGS_ROCK = Item("King's Rock", 0x52)
Item.BITTER_BERRY = Item("Bitter Berry", 0x53)
Item.MINT_BERRY = Item("Mint Berry", 0x54)
Item.RED_APRICORN = Item("Red Apricorn", 0x55)
Item.TINY_MUSHROOM = Item("TinyMushroom", 0x56)
Item.BIG_MUSHROOM = Item("Big Mushroom", 0x57)
Item.SILVER_POWDER = Item("SilverPowder", 0x58)
Item.BLU_APRICORN = Item("Blu Apricorn", 0x59)
Item.AMULET_COIN = Item("Amulet Coin", 0x5B)
Item.YLW_APRICORN = Item("Ylw Apricorn", 0x5C)
Item.GRN_APRICORN = Item("Grn Apricorn", 0x5D)
Item.CLEANSE_TAG = Item("Cleanse Tag", 0x5E)
Item.MYSTIC_WATER = Item("Mystic Water", 0x5F)
Item.TWISTED_SPOON = Item("TwistedSpoon", 0x60)
Item.WHT_APRICORN = Item("Wht Apricorn", 0x61)
Item.BLACK_BELT = Item("Black Belt", 0x62)
Item.BLK_APRICORN = Item("Blk Apricorn", 0x63)
Item.PNK_APRICORN = Item("Pnk Apricorn", 0x65)
Item.BLACK_GLASSES = Item("BlackGlasses", 0x66)
Item.SLOWPOKE_TAIL = Item("SlowpokeTail", 0x67)
Item.PINK_BOW = Item("Pink Bow", 0x68)
Item.STICK = Item("Stick", 0x69)
Item.SMOKE_BALL = Item("Smoke Ball", 0x6A)
Item.NEVER_MELT_ICE = Item("NeverMeltIce", 0x6B)
Item.MAGNET = Item("Magnet", 0x6C)
Item.MIRACLE_BERRY = Item("MiracleBerry", 0x6D)
Item.PEARL = Item("Pearl", 0x6E)
Item.BIG_PEARL = Item("Big Pearl", 0x6F)
Item.EVERSTONE = Item("Everstone", 0x70)
Item.SPELL_TAG = Item("Spell Tag", 0x71)
Item.RAGE_CANDY_BAR = Item("RageCandyBar", 0x72)
Item.GS_BALL = Item("GS Ball*", 0x73)
Item.BLUE_CARD = Item("Blue Card*", 0x74)
Item.MIRACLE_SEED = Item("Miracle Seed", 0x75)
Item.THICK_CLUB = Item("Thick Club", 0x76)
Item.FOCUS_BAND = Item("Focus Band", 0x77)
Item.ENERGY_POWDER = Item("EnergyPowder", 0x79)
Item.ENERGY_ROOT = Item("Energy Root", 0x7A)
Item.HEAL_POWDER = Item("Heal Powder", 0x7B)
Item.REVIVAL_HERB = Item("Revival Herb", 0x7C)
Item.HARD_STONE = Item("Hard Stone", 0x7D)
Item.LUCKY_EGG = Item("Lucky Egg", 0x7E)
Item.CARD_KEY = Item("Card Key", 0x7F)
Item.MACHINE_PART = Item("Machine Part", 0x80)
Item.EGG_TICKET = Item("Egg Ticket*", 0x81)
Item.LOST_ITEM = Item("Lost Item", 0x82)
Item.STARDUST = Item("Stardust", 0x83)
Item.STAR_PIECE = Item("Star Piece", 0x84)
Item.BASEMENT_KEY = Item("Basement Key", 0x85)
Item.PASS = Item("Pass", 0x86)
Item.CHARCOAL = Item("Charcoal", 0x8A)
Item.BERRY_JUICE = Item("Berry Juice", 0x8B)
Item.SCOPE_LENS = Item("Scope Lens", 0x8C)
Item.METAL_COAT = Item("Metal Coat", 0x8F)
Item.DRAGON_FANG = Item("Dragon Fang", 0x90)
Item.LEFTOVERS = Item("Leftovers", 0x92)
Item.MYSTERY_BERRY = Item("MysteryBerry", 0x96)
Item.DRAGON_SCALE = Item("Dragon Scale", 0x97)
Item.BERSERK_GENE = Item("Berserk Gene", 0x98)
Item.SACRED_ASH = Item("Sacred Ash", 0x9C)
Item.HEAVY_BALL = Item("Heavy Ball", 0x9D)
Item.FLOWER_MAIL = Item("Flower Mail", 0x9E)
Item.LEVEL_BALL = Item("Level Ball", 0x9F)
Item.LURE_BALL = Item("Lure Ball", 0xA0)
Item.FAST_BALL = Item("Fast Ball", 0xA1)
Item.LIGHT_BALL = Item("Light Ball", 0xA3)
Item.FRIEND_BALL = Item("Friend Ball", 0xA4)
Item.MOON_BALL = Item("Moon Ball", 0xA5)
Item.LOVE_BALL = Item("Love Ball", 0xA6)
Item.NORMAL_BOX = Item("Normal Box", 0xA7)
Item.GORGEOUS_BOX = Item("Gorgeous Box", 0xA8)
Item.SUN_STONE = Item("Sun Stone", 0xA9)
Item.POLKADOT_BOW = Item("Polkadot Bow", 0xAA)
Item.UP_GRADE = Item("Up-Grade", 0xAC)
Item.BERRY = Item("Berry", 0xAD)
Item.GOLD_BERRY = Item("Gold Berry", 0xAE)
Item.SQUIRT_BOTTLE = Item("SquirtBottle", 0xAF)
Item.PARK_BALL = Item("Park Ball", 0xB1)
Item.RAINBOW_WING = Item("Rainbow Wing", 0xB2)
Item.BRICK_PIECE = Item("Brick Piece", 0xB4)
Item.SURF_MAIL = Item("Surf Mail", 0xB5)
Item.LITEBLUEMAIL = Item("Litebluemail", 0xB6)
Item.PORTRAITMAIL = Item("Portraitmail", 0xB7)
Item.LOVELY_MAIL = Item("Lovely Mail", 0xB8)
Item.EON_MAIL = Item("Eon Mail", 0xB9)
Item.MORPH_MAIL = Item("Morph Mail", 0xBA)
Item.BLUESKY_MAIL = Item("Bluesky Mail", 0xBB)
Item.MUSIC_MAIL = Item("Music Mail", 0xBC)
Item.MIRAGE_MAIL = Item("Mirage Mail", 0xBD)
Item.TM01 = Item("TM01", 0xBF)
Item.TM02 = Item("TM02", 0xC0)
Item.TM03 = Item("TM03", 0xC1)
Item.TM04 = Item("TM04", 0xC2)
Item.TM05 = Item("TM05", 0xC4)
Item.TM06 = Item("TM06", 0xC5)
Item.TM07 = Item("TM07", 0xC6)
Item.TM08 = Item("TM08", 0xC7)
Item.TM09 = Item("TM09", 0xC8)
Item.TM10 = Item("TM10", 0xC9)
Item.TM11 = Item("TM11", 0xCA)
Item.TM12 = Item("TM12", 0xCB)
Item.TM13 = Item("TM13", 0xCC)
Item.TM14 = Item("TM14", 0xCD)
Item.TM15 = Item("TM15", 0xCE)
Item.TM16 = Item("TM16", 0xCF)
Item.TM17 = Item("TM17", 0xD0)
Item.TM18 = Item("TM18", 0xD1)
Item.TM19 = Item("TM19", 0xD2)
Item.TM20 = Item("TM20", 0xD3)
Item.TM21 = Item("TM21", 0xD4)
Item.TM22 = Item("TM22", 0xD5)
Item.TM23 = Item("TM23", 0xD6)
Item.TM24 = Item("TM24", 0xD7)
Item.TM25 = Item("TM25", 0xD8)
Item.TM26 = Item("TM26", 0xD9)
Item.TM27 = Item("TM27", 0xDA)
Item.TM28 = Item("TM28", 0xDB)
Item.TM29 = Item("TM29", 0xDD)
Item.TM30 = Item("TM30", 0xDE)
Item.TM31 = Item("TM31", 0xDF)
Item.TM32 = Item("TM32", 0xE0)
Item.TM33 = Item("TM33", 0xE1)
Item.TM34 = Item("TM34", 0xE2)
Item.TM35 = Item("TM35", 0xE3)
Item.TM36 = Item("TM36", 0xE4)
Item.TM37 = Item("TM37", 0xE5)
Item.TM38 = Item("TM38", 0xE6)
Item.TM39 = Item("TM39", 0xE7)
Item.TM40 = Item("TM40", 0xE8)
Item.TM41 = Item("TM41", 0xE9)
Item.TM42 = Item("TM42", 0xEA)
Item.TM43 = Item("TM43", 0xEB)
Item.TM44 = Item("TM44", 0xEC)
Item.TM45 = Item("TM45", 0xED)
Item.TM46 = Item("TM46", 0xEE)
Item.TM47 = Item("TM47", 0xEF)
Item.TM48 = Item("TM48", 0xF0)
Item.TM49 = Item("TM49", 0xF1)
Item.TM50 = Item("TM50", 0xF2)
Item.HM01 = Item("HM01", 0xF3)
Item.HM02 = Item("HM02", 0xF4)
Item.HM03 = Item("HM03", 0xF5)
Item.HM04 = Item("HM04", 0xF6)
Item.HM05 = Item("HM05", 0xF7)
Item.HM06 = Item("HM06", 0xF8)
Item.HM07 = Item("HM07", 0xF9)
Item.HM08 = Item("HM08", 0xFA)
Item.HM09 = Item("HM09", 0xFB)
Item.HM10 = Item("HM10", 0xFC)
Item.HM11 = Item("HM11", 0xFD)
Item.HM12 = Item("HM12", 0xFE)
Item.UNKNOWN = Item("Unknown", 0xFF)
Item.buildReverse()
| 9,726 | 5,083 |
#!/usr/bin/env python
import argparse
import pickle
import h5py
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.layers import Activation, add, BatchNormalization, Conv2D, Dense, Dropout, Flatten, Input, ZeroPadding2D
from keras.models import load_model, Model
from keras.regularizers import l2
from keras.utils import plot_model
import numpy as np
def positive_int(value):
try:
parsed = int(value)
if not parsed > 0:
raise ValueError()
return parsed
except ValueError:
raise argparse.ArgumentTypeError('value must be an positive integer')
def parse_cli():
parser = argparse.ArgumentParser()
parser.add_argument(
'-e', '--epochs',
nargs='?',
type=positive_int,
action='store',
default=10,
help='number of training epochs'
)
parser.add_argument(
metavar='TRAIN',
type=str,
dest='train',
help='path to the HDF5 file with the training data'
)
parser.add_argument(
metavar='MODEL',
type=str,
dest='model',
help='path where to store the model'
)
return parser.parse_args()
def load_data(path):
with h5py.File(path, 'r') as handle:
data = np.array(handle['diagonalset'])
labels = np.array(handle['vectorset'])
return data, labels
def preprocess(data, labels):
# simply add an additional dimension for the channels for data
# swap axis of the label set
return np.expand_dims(data, axis=3), np.moveaxis(labels, 0, -1)
def build_model(input_shape):
input_img = Input(shape=input_shape)
# first bottleneck unit
bn_1 = BatchNormalization()(input_img)
activation_1 = Activation('selu')(bn_1)
conv_1 = Conv2D(32, kernel_size=(5, 5,), padding='same', kernel_regularizer=l2(0.02))(activation_1)
bn_2 = BatchNormalization()(conv_1)
activation_2 = Activation('selu')(bn_2)
conv_2 = Conv2D(128, kernel_size=(3, 3,), padding='same', kernel_regularizer=l2(0.02))(activation_2)
merged = add([input_img, conv_2])
# corner detection
bn_3 = BatchNormalization()(merged)
padding = ZeroPadding2D(padding=(0, 3))(bn_3)
conv_3 = Conv2D( 32, kernel_size=(21, 7,), padding='valid', activation='tanh')(padding)
conv_4 = Conv2D(128, kernel_size=( 1, 3,), padding='same', activation='tanh')(conv_3)
# fully-connected predictor
flat = Flatten()(conv_4)
classify = Dense(512, activation='sigmoid')(flat)
dropout = Dropout(0.1)(classify)
result = Dense(input_shape[1], activation='sigmoid')(dropout)
model = Model(inputs=input_img, outputs=result)
model.compile(optimizer=optimizers.Nadam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
return model
def train_network(model, data, labels, model_file, epochs):
plot_model(model, to_file='{}.png'.format(model_file), show_shapes=True)
checkpoint = ModelCheckpoint(model_file, monitor='val_loss', verbose=True, save_best_only=True, save_weights_only=False, mode='auto')
training = model.fit(data, labels, epochs=epochs, batch_size=8, validation_split=1.0/5.0, class_weight={0: 0.1, 1: 0.9}, callbacks=[checkpoint])
with open('{}.history'.format(model_file), 'wb') as handle:
pickle.dump(training.history, handle)
if __name__ == '__main__':
arguments = parse_cli()
data, labels = preprocess(*load_data(arguments.train))
model = build_model(input_shape=data.shape[1:])
train_network(model, data, labels, arguments.model, arguments.epochs)
| 3,589 | 1,239 |
_base_ = ["./FlowNet512_1.5AugCosyAAEGray_Flat_Pbr_01_ape.py"]
OUTPUT_DIR = "output/deepim/lmPbrSO/FlowNet512_1.5AugCosyAAEGray_Flat_lmPbr_SO/benchvise"
DATASETS = dict(TRAIN=("lm_pbr_benchvise_train",), TEST=("lm_real_benchvise_test",))
# bbnc7
# objects benchvise Avg(1)
# ad_2 9.12 9.12
# ad_5 44.52 44.52
# ad_10 90.69 90.69
# rete_2 45.97 45.97
# rete_5 99.71 99.71
# rete_10 100.00 100.00
# re_2 63.43 63.43
# re_5 99.71 99.71
# re_10 100.00 100.00
# te_2 77.40 77.40
# te_5 100.00 100.00
# te_10 100.00 100.00
# proj_2 75.75 75.75
# proj_5 99.22 99.22
# proj_10 100.00 100.00
# re 1.80 1.80
# te 0.01 0.01
# init by mlBCE
# objects benchvise Avg(1)
# ad_2 9.21 9.21
# ad_5 44.52 44.52
# ad_10 90.79 90.79
# rete_2 46.46 46.46
# rete_5 99.52 99.52
# rete_10 100.00 100.00
# re_2 64.31 64.31
# re_5 99.52 99.52
# re_10 100.00 100.00
# te_2 77.50 77.50
# te_5 100.00 100.00
# te_10 100.00 100.00
# proj_2 75.85 75.85
# proj_5 99.22 99.22
# proj_10 100.00 100.00
# re 1.80 1.80
# te 0.01 0.01
| 1,281 | 777 |
import sys
import os
from pathlib import Path
import json
import pickle
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import selenium.webdriver.support.ui as ui
class Browser:
def __init__(self):
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--window-size=1920,1080')
if sys.platform == 'linux':
self.driver = webdriver.Chrome(os.path.abspath('./crawler/driver/linux/chromedriver'), desired_capabilities = chrome_options.to_capabilities())
elif sys.platform == 'win32':
self.driver = webdriver.Chrome(os.path.abspath('./crawler/driver/win/chromedriver.exe'), desired_capabilities = chrome_options.to_capabilities())
elif sys.platform == 'darwin':
self.driver = webdriver.Chrome(os.path.abspath('./crawler/driver/mac/chromedriver'), desired_capabilities = chrome_options.to_capabilities())
self.wait = ui.WebDriverWait(self.driver,30)
def go(self, url):
self.driver.get(url)
def load_cookie_from(self, cookie_file):
if(Path(cookie_file).exists()):
for cookie in pickle.load(open(cookie_file, "rb")):
#TODO it's a workaround
if 'SPC_CDS' in json.dumps(cookie):
continue
self.driver.add_cookie(cookie)
#print('cookie loaded')
def wait_for(self, method):
self.wait.until(method)
def find_by_css(self, path):
self.wait_for(lambda driver: driver.find_element_by_css_selector(path))
return self.driver.find_element_by_css_selector(path)
def find_by_xpath(self, path):
self.wait_for(lambda driver: driver.find_element_by_xpath(path))
return self.driver.find_element_by_xpath(path)
def send_by_css(self, path, *keys):
el = self.find_by_css(path)
el.send_keys(*keys)
def send_by_xpath(self, path, *keys):
el = self.find_by_xpath(path)
el.send_keys(*keys)
def click_by_css(self, path):
el = self.find_by_css(path)
el.click()
def click_by_xpath(self, path):
el = self.find_by_xpath(path)
el.click()
def get_cookies(self):
return self.driver.get_cookies()
def dump_cookie(self, cookie_file):
pickle.dump( self.driver.get_cookies() , open(cookie_file,"wb"))
def quit(self):
self.driver.quit() | 2,489 | 793 |
# flake8: noqa
from selfdrive.car import dbc_dict
from cereal import car
Ecu = car.CarParams.Ecu
class CarControllerParams:
STEER_MAX = 2047 # max_steer 4095
STEER_STEP = 2 # how often we update the steer cmd
STEER_DELTA_UP = 50 # torque increase per refresh, 0.8s to max
STEER_DELTA_DOWN = 70 # torque decrease per refresh
STEER_DRIVER_ALLOWANCE = 60 # allowed driver torque before start limiting
STEER_DRIVER_MULTIPLIER = 10 # weight driver torque heavily
STEER_DRIVER_FACTOR = 1 # from dbc
#SUBARU STOP AND GO - Global
SNG_DISTANCE_LIMIT = 120 # distance trigger value limit for stop and go (0-255)
SNG_DISTANCE_DEADBAND = 10 # deadband for SNG lead car refence distance to cater for Close_Distance sensor noises
THROTTLE_TAP_LIMIT = 5 # send a maximum of 5 throttle tap messages (trial and error)
THROTTLE_TAP_LEVEL = 5 # send a throttle message with value of 5 (trial and error)
SNG_DISTANCE_THRESHOLD = 150
#SUBARU STOP AND GO - Pre-Global
SNG_DISTANCE_THRESHOLD_PREGLOBAL = 3 #SnG trigger when lead car distance > 3m
SNG_DISTANCE_LIMIT_PREGLOBAL = 4 #SnG only trigger if close distance is less than 4
#SUBARU NON-EPB
NON_EPB_STANDSTILL_THRESHOLD = 1000000000 #1 second
NON_EPB_FAKE_SPEED = 3 #km/h
class CAR:
ASCENT = "SUBARU ASCENT LIMITED 2019"
IMPREZA = "SUBARU IMPREZA LIMITED 2019"
FORESTER = "SUBARU FORESTER 2019"
FORESTER_PREGLOBAL = "SUBARU FORESTER 2017 - 2018"
LEGACY_PREGLOBAL = "SUBARU LEGACY 2015 - 2018"
OUTBACK_PREGLOBAL = "SUBARU OUTBACK 2015 - 2017"
OUTBACK_PREGLOBAL_2018 = "SUBARU OUTBACK 2018 - 2019"
FINGERPRINTS = {
CAR.OUTBACK_PREGLOBAL_2018: [{
# OUTBACK 2.0D 2018 ADM
2: 8, 208: 8, 209: 4, 210: 8, 211: 7, 212: 8, 316: 8, 320: 8, 321: 8, 324: 8, 328: 8, 329: 8, 336: 2, 338: 8, 342: 8, 352: 8, 353: 8, 354: 8, 356: 8, 358: 8, 359: 8, 392: 8, 554: 8, 640: 8, 642: 8, 805: 8, 864: 8, 865: 8, 872: 8, 880: 8, 881: 8, 882: 8, 884: 8, 885: 8, 977: 8, 1614: 8, 1632: 8, 1657: 8, 1658: 8, 1672: 8, 1722: 8, 1745: 8, 1786: 5, 1787: 5, 1968: 8, 1976: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
}
STEER_THRESHOLD = {
CAR.ASCENT: 80,
CAR.IMPREZA: 80,
CAR.FORESTER: 80,
CAR.FORESTER_PREGLOBAL: 75,
CAR.LEGACY_PREGLOBAL: 75,
CAR.OUTBACK_PREGLOBAL: 75,
CAR.OUTBACK_PREGLOBAL_2018: 75,
}
DBC = {
CAR.ASCENT: dbc_dict('subaru_global_2017_generated', None),
CAR.IMPREZA: dbc_dict('subaru_global_2017_generated', None),
CAR.FORESTER: dbc_dict('subaru_global_2017_generated', None),
CAR.FORESTER_PREGLOBAL: dbc_dict('subaru_forester_2017_generated', None),
CAR.LEGACY_PREGLOBAL: dbc_dict('subaru_outback_2015_generated', None),
CAR.OUTBACK_PREGLOBAL: dbc_dict('subaru_outback_2015_generated', None),
CAR.OUTBACK_PREGLOBAL_2018: dbc_dict('subaru_outback_2019_generated', None),
}
PREGLOBAL_CARS = [CAR.FORESTER_PREGLOBAL, CAR.LEGACY_PREGLOBAL, CAR.OUTBACK_PREGLOBAL, CAR.OUTBACK_PREGLOBAL_2018]
| 3,001 | 1,659 |
# Generated by Django 3.1.2 on 2021-02-14 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profiles',
name='user_name',
field=models.CharField(max_length=255, null=True),
),
]
| 391 | 135 |
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self, parser):
BaseOptions.initialize(self, parser)
parser.set_defaults(phase='test')
parser.add_argument('--only_for_test', type=str, default='...')
parser.add_argument('--network_pkl', type=str, default='gdrive:networks/stylegan2-ffhq-config-f.pkl')
parser.add_argument('--max_result_snapshots', default=30, help='max result snapshots')
return parser | 492 | 153 |
"""
Tools used elsewhere in this package
"""
import os
import collections
import mdtraj as md
from simtk import openmm as mm
from simtk.openmm import app
from simtk import unit as u
try:
import openmmtools
except ImportError:
HAS_OPENMMTOOLS = False
else:
HAS_OPENMMTOOLS = True
def _traj_from_file_or_traj(file_or_traj):
if isinstance(file_or_traj, md.Trajectory):
traj = file_or_traj
elif os.path.isfile(file_or_traj):
traj = md.load(file_or_traj)
else:
raise TypeError("%s is neither a trajectory nor a filename",
file_or_traj)
return traj
def steps_for_duration(duration, simulation):
if isinstance(duration, u.Quantity):
return int(duration / simulation.integrator.getStepSize())
elif isinstance(duration, int):
return duration
else:
raise RuntimeError("Unable to treat duration: %s", duration)
def simulation_write_pdb(simulation, pdb_outfile):
"""Write out the current state of the simulation as a PDB"""
positions = simulation.context.getState(getPositions=True).getPositions()
with open(pdb_outfile, 'w') as pdb_out:
app.PDBFile.writeFile(simulation.topology, positions, pdb_out)
def simulation_serialize_parts(simulation, basename):
def serialize_part(part, part_name):
with open(basename + '_' + part_name + '.xml', 'w') as f:
f.write(mm.XmlSerializer.serialize(part))
serialize_part(simulation.system, 'sys')
serialize_part(simulation.integrator, 'integ')
simulation.saveState(basename + '_state.xml')
def simulation_from_parts(basename, pdb):
topology = mm.app.PDBFile(pdb).topology
system = basename + '_sys.xml'
integrator = basename + '_integ.xml'
state = basename + '_state.xml'
sim = mm.app.Simulation(topology, system, integrator, state=state)
if HAS_OPENMMTOOLS:
integ = sim.integrator
openmmtools.utils.RestorableOpenMMObject.restore_interface(integ)
return sim
def simulation_to_mdtraj(simulation):
topology, positions = _topology_and_positions(simulation)
md_topology = md.Topology.from_openmm(topology)
xyz = np.array([positions.value_in_unit(u.nanometer)])
trajectory = md.Trajectory(xyz, topology) # TODO unitcells
return trajectory
# with tempfile.NamedTemporaryFile(suffix=".pdb") as tmp:
# app.PDBFile.writeFile(topology, positions, tmp)
# trajectory = md.load(tmp.name)
return trajectory
def residue_type(res):
if res.is_protein:
return 'protein'
elif res.is_nucleic:
return 'nucleic'
elif res.is_water:
return 'water'
else:
return 'other'
def topology_describe(topology):
total_str = ""
for chain in topology.chains:
restypes = collections.Counter([residue_type(res)
for res in chain.residues])
mystr = ", ".join([str(v) + " " + k + " residues"
for k, v in restypes.items()])
total_str += mystr + "\n"
return total_str
| 3,071 | 1,003 |
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.collector import BaseFactCollector
class Hardware:
platform = 'Generic'
# FIXME: remove load_on_init when we can
def __init__(self, module, load_on_init=False):
self.module = module
def populate(self, collected_facts=None):
return {}
class HardwareCollector(BaseFactCollector):
name = 'hardware'
_fact_ids = set(['processor',
'processor_cores',
'processor_count',
# TODO: mounts isnt exactly hardware
'mounts',
'devices'])
_fact_class = Hardware
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
if not module:
return {}
# Network munges cached_facts by side effect, so give it a copy
facts_obj = self._fact_class(module)
facts_dict = facts_obj.populate(collected_facts=collected_facts)
return facts_dict
| 1,746 | 510 |
# * coding: UTF8 *
"""
这里所有的的接口仅需要调用一次即可,具体接口和参数如下所示。
=================================================================================================
"""
def install_codec_driver():
"""
声卡安装,仅需要调用一次。
:param 空:
:returns: 0: 成功,其他: 失败
:raises OSError: EINVAL
"""
pass
def uninstall_codec_driver():
"""
声卡卸载,仅需要调用一次。
:param 空:
:returns: 0: 成功,其他: 失败
:raises OSError: EINVAL
"""
pass
def init():
"""
初始化uVoice功能组件,仅需要调用一次。
:param 空:
:returns: 0: 成功,其他: 失败
:raises OSError: EINVAL
"""
pass
def deinit():
"""
取消初始化uVoice功能组件,仅需要调用一次。
:param 空:
:returns: 0: 成功,其他: 失败
:raises OSError: EINVAL
"""
pass
| 691 | 372 |
#!/usr/bin/env python
#
from parse import compile
def main():
file = open("day3input.txt", "r")
fabric = [[0]*1000 for _ in range(1000)]
for claim_str in file:
claim = Claim(claim_str)
#print claim.data
claim_fabric(fabric, claim)
contested = 0
for x in range(1000):
for y in range(1000):
if fabric[x][y] > 1:
contested += 1
return contested
def claim_fabric(fabric, claim):
for x in range(claim.data['x'], claim.data['x'] + claim.data['width']):
for y in range(claim.data['y'], claim.data['y'] + claim.data['height']):
fabric[x][y] += 1
class Claim:
# sample: #1 @ 257,829: 10x23
parser = compile("#{claim_id:d} @ {x:d},{y:d}: {width:d}x{height:d}")
def __init__(self, claim_str):
self.data = self.parser.parse(claim_str, True)
if __name__== "__main__":
print main() | 919 | 352 |
# Generated by Django 2.2.12 on 2020-05-06 12:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alertmgmt', '0008_logfilterfields_log_src_url'),
]
operations = [
migrations.CreateModel(
name='NewAssignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assignment_name', models.CharField(max_length=128)),
('assignee', models.CharField(max_length=100)),
('assignto', models.CharField(max_length=100)),
('ackstatus', models.IntegerField(default=0)),
],
),
]
| 735 | 229 |
# coding=utf-8
#
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IQ® license pool regkeys.
REST URI
``http://localhost/mgmt/cm/device/licensing/pool/initial-activation``
REST Kind
``cm:device:licensing:pool:initial-activation:*``
"""
from f5.bigiq.resource import Collection
from f5.bigiq.resource import Resource
class Initial_Activations(Collection):
def __init__(self, pool):
super(Initial_Activations, self).__init__(pool)
self._meta_data['required_json_kind'] = \
'cm:device:licensing:pool:initial-activation:initialactivationworkercollectionstate' # NOQA
self._meta_data['allowed_lazy_attributes'] = [Initial_Activation]
self._meta_data['attribute_registry'] = {
'cm:device:licensing:pool:initial-activation:initialactivationworkeritemstate': Initial_Activation # NOQA
}
class Initial_Activation(Resource):
def __init__(self, initial_activations):
super(Initial_Activation, self).__init__(initial_activations)
self._meta_data['required_creation_parameters'] = {'name', 'regKey'}
self._meta_data['required_json_kind'] = \
'cm:device:licensing:pool:initial-activation:initialactivationworkeritemstate'
| 1,772 | 545 |
import argparse
from generator import logic
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, required=True)
args = parser.parse_args()
logic_main=logic.composite_probabilistically
if args.mode == "permute":
pass
elif args.mode == "logic":
logic_main()
else:
print("Specify a flag: either --mode permute or --mode probabilistic")
| 373 | 117 |
class Book:
Title = ""
Rate = 0
def __init__(self, title, rate):
self.Title = title
self.Rate = rate
# print ("I am new ", self.Title)
| 175 | 60 |
"""Main exports"""
from fluxio_parser.parser import parse_project_tree # noqa: F401
__version__ = "0.3.1"
| 109 | 44 |
import dask.dataframe as dd
import pandas as pd
def load_parse_file(file_path, date_column="date"):
"""Loads a file into Pandas dataframe, and parse the datetime columns
Arguments:
file_path: string path to the input file.
Returns:
Dataframe: dask.dataframe from the file
"""
data = dd.read_csv(file_path)
data[date_column] = dd.to_datetime(data[date_column], format='%Y-%m-%d')
return data
def get_frames_by_id(dataframe, index_col=None):
"""Group by the dataframe by index
Arguments:
dataframe: dask.dataframe.
index_col: string with the index_col to order
Returns:
list: list of dask.dataframe with the data filtered
"""
assert index_col != None, "Must specify and index column"
indexs_vals = dataframe[index_col].unique().compute()
dfs = []
for index in indexs_vals:
print("Doing ",index)
d = dataframe[(dataframe[index_col] == index)]
d = d.compute(scheduler='processes')
dfs.append(d)
return dfs
def write_results(dataframes=None, file_name=None):
"""Group by the dataframe by index
Arguments:
dataframes: pandas.dataframe.
Returns:
string: path to the output file
"""
file_name = "output.csv" if file_name == None else file_name
dataframe_ = pd.concat(dataframes, axis=0, copy=False, sort=False)
dataframe_.to_csv(file_name)
return file_name | 1,491 | 447 |
#!/usr/bin/env python3
# Copyright 2019 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
import glob
import logging
import sys
from ebi_eva_common_pyutils.command_utils import run_command_with_output
from ebi_eva_common_pyutils.logger import logging_config as log_cfg
from run_release_in_embassy.release_metadata import vcf_validation_output_file_pattern, asm_report_output_file_pattern
logger = log_cfg.get_logger(__name__)
def analyze_vcf_validation_files(vcf_validation_report_files):
exit_code = 0
vcf_validation_report_error_classes_to_ignore = ["Error: Duplicated variant",
"Warning: Reference and alternate alleles "
"do not share the first nucleotide",
"the input file is not valid",
"the input file is valid",
"not listed in a valid meta-data ALT entry"]
vcf_validation_error_grep_command_chain = " | ".join(['grep -v "{0}"'.format(error_class) for error_class in
vcf_validation_report_error_classes_to_ignore])
for vcf_validation_report_file in vcf_validation_report_files:
logger.info("Analyzing file {0} ....".format(vcf_validation_report_file))
command_to_run = "cat {0} | {1} | wc -l".format(vcf_validation_report_file,
vcf_validation_error_grep_command_chain)
number_of_lines_with_unusual_errors = \
int(run_command_with_output("Checking unusual errors in {0}".format(vcf_validation_report_file),
command_to_run, return_process_output=True))
if number_of_lines_with_unusual_errors > 0:
logger.error("Unusual error(s) found in VCF validation log: {0}. \nRun command\n {1} \nfor details."
.format(vcf_validation_report_file, command_to_run))
exit_code = -1
return exit_code
def analyze_asm_report_files(asm_report_files):
exit_code = 0
assembly_report_error_classes_to_ignore = ["not present in FASTA file", "does not match the reference sequence"]
asm_report_error_grep_command_chain = " | ".join(['grep -v "{0}"'.format(error_class) for error_class in
assembly_report_error_classes_to_ignore])
for asm_report_file in asm_report_files:
logger.info("Analyzing file {0} ....".format(asm_report_file))
command_to_run = "cat {0} | {1} | wc -l".format(asm_report_file, asm_report_error_grep_command_chain)
number_of_lines_with_unusual_errors = \
int(run_command_with_output("Checking unusual errors in {0}".format(asm_report_file), command_to_run,
return_process_output=True))
if number_of_lines_with_unusual_errors > 0:
logger.error("Unusual error(s) found in assembly report log: {0}. \nRun command\n {1} \nfor details."
.format(asm_report_file, command_to_run))
exit_code = -1
return exit_code
def analyze_vcf_validation_results(species_release_folder, assembly_accession):
vcf_validation_report_files = glob.glob("{0}/{1}/{2}".format(species_release_folder, assembly_accession,
vcf_validation_output_file_pattern))
exit_code = analyze_vcf_validation_files(vcf_validation_report_files)
asm_report_files = glob.glob("{0}/{1}/{2}".format(species_release_folder, assembly_accession,
asm_report_output_file_pattern))
exit_code = exit_code or analyze_asm_report_files(asm_report_files)
sys.exit(exit_code)
@click.option("--species-release-folder", required=True)
@click.option("--assembly-accession", required=True)
@click.command()
def main(species_release_folder, assembly_accession):
analyze_vcf_validation_results(species_release_folder, assembly_accession)
if __name__ == '__main__':
main()
| 4,756 | 1,369 |
import numpy as np
import uuid
class Vortex:
""" Vortex class to categorize vortices within a BEC."""
def __init__(self, position, winding, component, v_type=None):
self.x, self.y = position
self.winding = winding
self.v_type = v_type # String: type of vortex (i.e. SQV or HQV)
self.uid = '{}_{}'.format(v_type, uuid.uuid1()) # Unique identifier string
self.isTracked = True # Tracking argument for vortex
self.component = component # Which component of the wavefunction the vortex is in
def get_coords(self):
return self.x, self.y
def get_uid(self):
return self.uid
def get_v_type(self):
return self.v_type
def get_distance(self, vortex): # Calculate distance between two vortices:
return np.sqrt((self.x - vortex.x) ** 2 + (self.y - vortex.y) ** 2)
def update_type(self, vortex_type):
self.v_type = vortex_type
def update_uid(self):
self.uid = '{}_{}'.format(self.v_type, self.uid)
def update_coords(self, pos_x, pos_y):
self.x, self.y = pos_x, pos_y
class VortexMap:
"""Map that keeps track of all vortices within a condensate."""
def __init__(self):
self.vortices_unid = [] # Unidentified vortices
self.vortices_sqv = []
self.vortices_hqv = []
def add_vortex(self, vortex):
# * Adds a vortex to the unidentified pool of the vortexMap
if vortex.v_type == 'SQV':
self.vortices_sqv.append(vortex)
elif vortex.v_type == 'HQV':
self.vortices_hqv.append(vortex)
else:
self.vortices_unid.append(vortex)
def sort_vortices(self, vortex):
# * Function that sorts all identified vortices into their respective pools
if vortex.v_type == 'SQV':
self.vortices_sqv.append(vortex)
if vortex.v_type == 'HQV':
self.vortices_hqv.append(vortex)
def total_vortices(self):
return len(self.vortices_sqv) + len(self.vortices_hqv)
def sqv_number(self, component):
sqv_list = [vortex for vortex in self.vortices_sqv if vortex.component == component]
return len(sqv_list)
def hqv_number(self, component):
hqv_list = [vortex for vortex in self.vortices_hqv if vortex.component == component]
return len(hqv_list)
def identify_vortices(self, threshold):
# * Finds SQVs by finding overlapping vortices in the components
# * Threshold determines the maximum distance between to cores to be classed as a SQV
vortices_1 = [vortex for vortex in self.vortices_unid if vortex.component == '1']
vortices_2 = [vortex for vortex in self.vortices_unid if vortex.component == '2']
for vortex_1 in vortices_1:
for vortex_2 in vortices_2:
if abs(vortex_1.x - vortex_2.x) < threshold:
if abs(vortex_1.y - vortex_2.y) < threshold:
# * If this evaluates to true, the two vortices are within the threshold
# * Firstly, get the average of the positions of the two overlapping vortices
sqv_pos = (vortex_1.x + vortex_2.x) / 2, (vortex_1.y + vortex_2.y) / 2
# * Generate new SQV vortex that gets added to the SQV pool
self.add_vortex(Vortex(sqv_pos, vortex_1.winding, component='both', v_type='SQV'))
# * Remove the corresponding vortex_plus and vortex_minus from the unid pool
if vortex_1 in self.vortices_unid:
self.vortices_unid.remove(vortex_1)
if vortex_2 in self.vortices_unid:
self.vortices_unid.remove(vortex_2)
break
# * Determines HQVs by setting all remaining unidentified vortices to HQVs
for vortex in self.vortices_unid:
vortex.update_type('HQV')
vortex.update_uid()
self.vortices_hqv.append(vortex)
self.vortices_unid = [] # Empties unid list
| 4,115 | 1,375 |
a=int(input())
b=int(input())
print((a*b)%109)
| 47 | 27 |
"""Unit tests for iplib3.subnet"""
import pytest
from iplib3.subnet import ( # pylint: disable=import-error,no-name-in-module
SubnetMask,
PureSubnetMask,
)
from iplib3.constants import ( # pylint: disable=import-error,no-name-in-module
IPV4_MIN_SUBNET_VALUE,
IPV4_MAX_SUBNET_VALUE,
IPV6_MAX_SUBNET_VALUE,
)
def test_pure_subnet_mask():
"""Test the PureSubnetMask base class"""
_ = PureSubnetMask()
def test_pure_subnet_mask_prefix_length():
"""Test PureSubnetMask prefix length"""
subnet = PureSubnetMask()
another = PureSubnetMask()
another._prefix_length = None
assert subnet._prefix_length == IPV4_MIN_SUBNET_VALUE
assert another._prefix_length is None
def test_pure_subnet_mask_string():
"""Test PureSubnetMask string represesetation"""
subnet = PureSubnetMask()
assert str(subnet) == '0'
assert repr(subnet) == "iplib3.PureSubnetMask('0')"
def test_pure_subnet_mask_equality():
"""Test PureSubnetMask equality"""
subnet = PureSubnetMask()
assert subnet == PureSubnetMask()
assert subnet == IPV4_MIN_SUBNET_VALUE
assert subnet == '0'
def test_pure_subnet_mask_inequality():
"""Test PureSubnetMask inequality"""
subnet = PureSubnetMask()
another = PureSubnetMask()
another._prefix_length = None
assert subnet != 3.14
assert subnet != another
def test_subnet_mask_subnet_type():
"""Test SubnetMask subnet type"""
assert SubnetMask()._subnet_type == 'ipv6'
assert SubnetMask('255.255.255.0')._subnet_type == 'ipv4'
def test_subnet_mask_string():
"""Test SubnetMask string representation"""
assert (
repr(SubnetMask(24, subnet_type='ipv4'))
== "iplib3.SubnetMask('255.255.255.0')")
assert repr(SubnetMask(24)) == "iplib3.SubnetMask('24')"
def test_subnet_mask_subnet_to_num():
"""Test SubnetMask subnet to number converter"""
assert SubnetMask._subnet_to_num(None) is None
assert SubnetMask._subnet_to_num(24) == 24
assert SubnetMask._subnet_to_num('24') == 24
assert SubnetMask._subnet_to_num(None, subnet_type='ipv4') is None
assert SubnetMask._subnet_to_num(24, subnet_type='ipv4') == 24
assert SubnetMask._subnet_to_num('24', subnet_type='ipv4') == 24
assert SubnetMask._subnet_to_num('255.255.128.0', subnet_type='ipv4') == 17
def test_subnet_mask_subnet_to_num_errors():
"""Test SubnetMask subnet to number converter errors"""
with pytest.raises(TypeError):
SubnetMask._subnet_to_num([255, 255, 255, 0])
with pytest.raises(ValueError):
SubnetMask._subnet_to_num('255.255.255.0')
with pytest.raises(ValueError):
SubnetMask._subnet_to_num('3e2')
with pytest.raises(ValueError):
SubnetMask._subnet_to_num(IPV4_MAX_SUBNET_VALUE+1, subnet_type='ipv4')
with pytest.raises(ValueError):
SubnetMask._subnet_to_num(IPV6_MAX_SUBNET_VALUE+1)
with pytest.raises(ValueError):
SubnetMask._subnet_to_num('255.6.0.0', subnet_type='ipv4')
def test_subnet_mask_prefix_to_subnet_mask():
"""Test SubnetMask number to mask converter"""
assert (
SubnetMask._prefix_to_subnet_mask(24, subnet_type='ipv4')
== '255.255.255.0'
)
def test_subnet_mask_prefix_to_subnet_mask_errors():
"""Test SubnetMask number to mask converter"""
with pytest.raises(ValueError):
SubnetMask._prefix_to_subnet_mask(24, subnet_type='ipv6')
with pytest.raises(ValueError):
SubnetMask._prefix_to_subnet_mask(IPV4_MAX_SUBNET_VALUE+1, subnet_type='ipv4')
| 3,562 | 1,387 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Illustrate how to run linear model (y ~ x1 + x2) with statistically
valid inference when x1, x2 contains designed noise, when training data
is stored as a table in SQLite database.
"""
from svinfer.processor import DatabaseProcessor
from svinfer.linear_model import LinearRegression
import sqlite3
from linear_regression_with_dataframe import simulate_training_data
if __name__ == "__main__":
# get training data
# assume the variance of the added noise are 4 and 1 for each predictor
# assume the training data is stored as a table called my_data in SQLite database
x_s2 = [4, 1]
data = simulate_training_data(x_s2)
connection = sqlite3.connect(":memory:")
data.to_sql("my_data", con=connection)
# fit y ~ x1 + x2, where x1 and x2 have added noise
db_data = DatabaseProcessor(connection, "my_data")
model = LinearRegression(
["x1", "x2"], # column names for predictors
"y", # column name for the response
x_s2, # variances of the added noises to each predictor
random_state=123, # optional, to ensure reproducibility
).fit(db_data)
# check result
print("beta_tilde is: \n{}".format(model.beta))
# expect results to be close to
# beta_tilde is:
# [10.53475783 12.26662045 -3.11457588]
print("beta_tilde's standard error is: \n{}".format(model.beta_standarderror))
# expect results to be close to
# beta_tilde's standard error is:
# [1.28940235 0.45779356 0.17814397]
print("beta_tile's variance-covariance matrix: \n{}".format(model.beta_vcov))
# expect results to be close to
# beta_tile's variance-covariance matrix:
# [[1.66255843 0.35312458 -0.17656444]
# [0.35312458 0.20957495 -0.07915853]
# [-0.17656444 -0.07915853 0.03173527]]
print("estimated residual variance is {}".format(model.sigma_sq))
# expect results to be close to
# estimated residual variance is 0.5136891806650965
| 2,570 | 908 |
from ..geometry import Cuboid_Collider, Primitive
from ..materials import Material
from ..utils.vector3 import vec3
from ..utils.constants import SKYBOX_DISTANCE
from ..utils.image_functions import load_image, load_image_as_linear_sRGB
from .util.blur_background import blur_skybox
class SkyBox(Primitive):
def __init__(self, cubemap, center = vec3(0.,0.,0.), light_intensity = 0.0, blur = 0.0):
super().__init__(center, SkyBox_Material(cubemap, light_intensity, blur), shadow = False)
l = SKYBOX_DISTANCE
self.light_intensity = light_intensity
#BOTTOM
self.collider_list += [Cuboid_Collider(assigned_primitive = self, center = center, width = 2*l, height =2*l ,length =2*l )]
def get_uv(self, hit):
u,v = hit.collider.get_uv(hit)
u,v = u/4,v/3
return u,v
class SkyBox_Material(Material):
def __init__(self, cubemap, light_intensity, blur):
self.texture = load_image_as_linear_sRGB("sightpy/backgrounds/" + cubemap)
if light_intensity != 0.0:
self.lightmap = load_image("sightpy/backgrounds/lightmaps/" + cubemap)
if blur != 0.0:
self.blur_image = blur_skybox(load_image("sightpy/backgrounds/" + cubemap), blur, cubemap)
self.blur = blur
self.light_intensity = light_intensity
self.repeat = 1.0
def get_texture_color(self, hit, ray):
u,v = hit.get_uv()
if (self.blur != 0.0) :
im = self.blur_image[-((v * self.blur_image.shape[0]*self.repeat ).astype(int)% self.blur_image.shape[0]) , (u * self.blur_image.shape[1]*self.repeat).astype(int) % self.blur_image.shape[1] ].T
else:
im = self.texture[-((v * self.texture.shape[0]*self.repeat ).astype(int)% self.texture.shape[0]) , (u * self.texture.shape[1]*self.repeat).astype(int) % self.texture.shape[1] ].T
if (ray.depth != 0) and (self.light_intensity != 0.0):
ls = self.lightmap[-((v * self.texture.shape[0]*self.repeat ).astype(int)% self.texture.shape[0]) , (u * self.texture.shape[1]*self.repeat).astype(int) % self.texture.shape[1] ].T
color = vec3(im[0] + self.light_intensity * ls[0], im[1] + self.light_intensity * ls[1], im[2] + self.light_intensity * ls[2])
else:
color = vec3(im[0] , im[1] , im[2] )
return color
def get_color(self, scene, ray, hit):
hit.point = (ray.origin + ray.dir * hit.distance)
return hit.material.get_texture_color(hit,ray)
| 2,705 | 1,009 |
from django.apps import AppConfig
class PksConfig(AppConfig):
name = 'pks'
| 81 | 28 |
"""
======================
Exome Cancer pipeline
======================
.. todo::
*Final filtering if SNPs/INDELs is currently done in the
reporting. This should be handled by the pipeline. The SNP output
would also then be passed to the mutational signature task
*Document
*fully make phone home/key option work - GATK public key? Summarise
*Indel calling (size of indels called) Example
The exome cancer pipeline imports unmapped reads from matched sample fastqs or
sra files and aligns them to the genome using BWA. Post alignment
quality control is performed using Picard. The pipeline then performs
local realignment around indels and base quality score recalibration
using GATK. Next variants (SNVs and indels) are called and filtered
1. Align to genome using gapped alignment (BWA)
2. Check alignment quality and target region coverage (Picard)
3. Local realignment and BQSR in GATK
4. Variant calling (SNPs) on control samples using muTect to generate
a "panel of normal" variants
5a. Variant calling (SNPs) with tumour samples using muTect including
filtering
5b. Variant calling (indels) using Strelka
6a. Variant annotation using SNPeff, GATK VariantAnnotator, and SnpSift
6b. Variant annotation with data from eBIO
6c. Load Network of Cancer Genes (NCG) for Variant annotation in reporting
.. note::
An optional downsampling analysis can also be performed to assess how
coverage a control sample affects the called variants
1. Currently the pipeline is not able to deal with replicates, i.e
replicates will be treated seperately.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
Input
-----
Reads are imported by placing files or linking to files in the
:term:`working directory`.
The default file format assumes the following convention:
<patientID>-<tissue>-<replicate>.<suffix>
``patientID`` and ``tissue`` make up an :term:`experiment`, while ``replicate``
denotes the :term:`replicate` within an :term:`experiment`.
The ``suffix`` determines the file type.
The following suffixes/file types are possible:
sra
Short-Read Archive format. Reads will be extracted using the
:file:`fastq-dump` tool.
fastq.gz
Single-end reads in fastq format.
fastq.1.gz, fastq.2.gz
Paired-end reads in fastq format. The two fastq files must be sorted
by read-pair.
.. note::
Quality scores need to be of the same scale for all input
files. Thus it might be difficult to mix different formats.
Documentation
-------------
If you would like the genes of interest to be flagged in your vcf,
make add_genes_of_interest=1 (default=0) and provide a list of comma
separated genes (without spaces) in the ini file.
If you would like to annotate genes of interest with a particular
value in the results table, create a file call [label]_annotations.tsv
in your working directory listing all the genes. For example, to
annotate all genes identified in a previous shRNA screen, add a file
called shRNA_annoations.tsv listing the genes and the results table
will contain a column called "shRNA" with values "shRNA" and "null".
Requirements
------------
On top of the default CGAT setup, the pipeline requires the following
software to be in the path:
+--------------------+------------+-------------------------------------------+
|*Program* |*Version* |*Purpose* |
+--------------------+------------+-------------------------------------------+
|Stampy |>=0.9.0 |read mapping |
+--------------------+------------+-------------------------------------------+
|BWA | |read mapping |
+--------------------+------------+-------------------------------------------+
|SAMtools | |filtering, SNV / indel calling |
+--------------------+------------+-------------------------------------------+
|BEDTools | |filtering |
+--------------------+------------+-------------------------------------------+
|sra-tools | |extracting reads from .sra files |
+--------------------+------------+-------------------------------------------+
|picard |>=1.38 |bam/sam files. The .jar files need to be in|
| | |your CLASSPATH environment variable. |
+--------------------+------------+-------------------------------------------+
|vcf-tools | |VCF filtering |
+--------------------+------------+-------------------------------------------+
|GATK | 2.5-2 |local realignment, BQSR, variant calling |
+--------------------+------------+-------------------------------------------+
|SNPeff | 3.3 | |
+--------------------+------------+-------------------------------------------+
Pipeline output
===============
The major output is a csvdb containing quality control information
and variant information by patientID and an html report with
similar information.
Example
=======
Code
====
"""
# load modules
from ruffus import *
# from rpy2.robjects import r as R
import numpy
import CGAT.Experiment as E
import sys
import os
import sqlite3
import CGAT.IOTools as IOTools
import CGATPipelines.PipelineMapping as PipelineMapping
import CGATPipelines.PipelineMappingQC as PipelineMappingQC
import CGATPipelines.Pipeline as P
import re
import CGATPipelines.PipelineExome as PipelineExome
USECLUSTER = True
#########################################################################
#########################################################################
def connect():
'''connect to database.
Use this method to connect to additional databases.
Returns a database connection.
'''
dbh = sqlite3.connect(PARAMS["database_name"])
return dbh
#########################################################################
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"],
defaults={
'paired_end': False},
only_import=__name__ != "__main__")
PARAMS = P.PARAMS
PipelineMapping.PARAMS = PARAMS
PipelineMappingQC.PARAMS = PARAMS
PipelineExome.PARAMS = PARAMS
#########################################################################
#########################################################################
# Load manual annotations
#########################################################################
@transform("*_annotations.tsv",
suffix(".tsv"),
".load")
def loadManualAnnotations(infile, outfile):
tmp = P.getTempFilename(".")
annotation = P.snip(infile, "_annotations.tsv")
with IOTools.openFile(tmp, "w") as outf:
outf.write("%s\tgene_id\n" % annotation)
with IOTools.openFile(infile, "r") as inf:
for line in inf:
outf.write("%s\t%s" % (annotation, line))
P.load(tmp, outfile, options="--add-index=gene_id")
os.unlink(tmp)
#########################################################################
# Alignment to a reference genome
#########################################################################
@follows(mkdir("bam"))
@transform(("*.fastq.1.gz", "*.fastq.gz", "*.sra"),
regex(r"(\S+).(fastq.1.gz|fastq.gz|sra)"),
r"bam/\1.bam")
def mapReads(infile, outfile):
'''Map reads to the genome using BWA, sort and index BAM file,
generate alignment statistics and deduplicate using Picard'''
job_threads = PARAMS["bwa_threads"]
job_memory = PARAMS["bwa_memory"]
if PARAMS["bwa_algorithm"] == "aln":
m = PipelineMapping.BWA(
remove_non_unique=PARAMS["bwa_remove_non_unique"],
strip_sequence=False)
elif PARAMS["bwa_algorithm"] == "mem":
m = PipelineMapping.BWAMEM(
remove_non_unique=PARAMS["bwa_remove_non_unique"],
strip_sequence=False)
else:
raise ValueError("bwa algorithm '%s' not known" % algorithm)
statement = m.build((infile,), outfile)
print(statement)
P.run()
@merge(mapReads, "picard_duplicate_stats.load")
def loadPicardDuplicateStats(infiles, outfile):
'''Merge Picard duplicate stats into single table and load into SQLite.'''
PipelineMappingQC.loadPicardDuplicateStats(infiles, outfile)
#########################################################################
# Post-alignment QC
#########################################################################
@follows(mapReads)
@merge("bam/*.picard_stats", "picard_stats.load")
def loadPicardAlignStats(infiles, outfile):
'''Merge Picard alignment stats into single table and load into SQLite.'''
PipelineMappingQC.loadPicardAlignmentStats(infiles, outfile)
#########################################################################
@transform(mapReads, regex(r"bam/(\S+).bam"), r"bam/\1.cov")
def buildCoverageStats(infile, outfile):
'''Generate coverage statistics for regions of interest from a
bed file using Picard'''
# TS check whether this is always required or specific to current baits
# file
# baits file requires modification to make picard accept it
# this is performed before CalculateHsMetrics
to_cluster = USECLUSTER
baits = PARAMS["roi_baits"]
modified_baits = infile + "_temp_baits_final.bed"
regions = PARAMS["roi_regions"]
statement = '''samtools view -H %(infile)s > %(infile)s_temp_header.txt;
awk 'NR>2' %(baits)s |
awk -F '\\t' 'BEGIN { OFS="\\t" } {print $1,$2,$3,"+",$4;}'
> %(infile)s_temp_baits.bed;
cat %(infile)s_temp_header.txt %(infile)s_temp_baits.bed
> %(modified_baits)s; checkpoint ;
rm -rf %(infile)s_temp_baits.bed %(infile)s_temp_header.txt
'''
P.run()
PipelineMappingQC.buildPicardCoverageStats(
infile, outfile, modified_baits, modified_baits)
IOTools.zapFile(modified_baits)
@follows(buildCoverageStats)
@merge(buildCoverageStats, "coverage_stats.load")
def loadCoverageStats(infiles, outfile):
PipelineMappingQC.loadPicardCoverageStats(infiles, outfile)
#########################################################################
#########################################################################
#########################################################################
# GATK realign bams
#########################################################################
@transform(mapReads,
regex(r"bam/(\S+).bam"),
r"bam/\1.bqsr.bam")
def GATKpreprocessing(infile, outfile):
'''Reorders BAM according to reference fasta and add read groups using
SAMtools, realigns around indels and recalibrates base quality scores
using GATK'''
to_cluster = USECLUSTER
track = P.snip(os.path.basename(infile), ".bam")
tmpdir_gatk = P.getTempDir()
job_memory = PARAMS["gatk_memory"]
genome = "%s/%s.fa" % (PARAMS["bwa_index_dir"],
PARAMS["genome"])
outfile1 = outfile.replace(".bqsr", ".readgroups.bqsr")
outfile2 = outfile.replace(".bqsr", ".realign.bqsr")
PipelineExome.GATKReadGroups(infile, outfile1, genome,
PARAMS["readgroup_library"],
PARAMS["readgroup_platform"],
PARAMS["readgroup_platform_unit"])
PipelineExome.GATKIndelRealign(outfile1, outfile2, genome,
PARAMS["gatk_threads"])
IOTools.zapFile(outfile1)
PipelineExome.GATKBaseRecal(outfile2, outfile, genome,
PARAMS["gatk_dbsnp"],
PARAMS["gatk_solid_options"])
IOTools.zapFile(outfile2)
@transform(GATKpreprocessing,
regex("bam/(\S+)-%s-(\d+).bqsr.bam" % PARAMS["sample_control"]),
r"bam/\1-%s-\2.merged.bam" % PARAMS["sample_control"])
def mergeSampleBams(infile, outfile):
'''merge control and tumor bams'''
# Note: need to change readgroup headers for merge and subsequent
# splitting of bam files
to_cluster = USECLUSTER
job_memory = PARAMS["gatk_memory"]
tmpdir_gatk = P.getTempDir(shared=True)
outfile_tumor = outfile.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
infile_tumor = infile.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
infile_base = os.path.basename(infile)
infile_tumor_base = infile_base.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
track = P.snip(os.path.basename(infile), ".bam")
track_tumor = track.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
library = PARAMS["readgroup_library"]
platform = PARAMS["readgroup_platform"]
platform_unit = PARAMS["readgroup_platform_unit"]
control_id = "Control.bam"
tumor_id = control_id.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
statement = '''picard AddOrReplaceReadGroups
INPUT=%(infile)s
OUTPUT=%(tmpdir_gatk)s/%(infile_base)s
RGLB=%(library)s RGPL=%(platform)s
RGPU=%(platform_unit)s RGSM=%(track)s
ID=%(track)s
VALIDATION_STRINGENCY=SILENT ;
checkpoint ;'''
statement += '''picard AddOrReplaceReadGroups
INPUT=%(infile_tumor)s
OUTPUT=%(tmpdir_gatk)s/%(infile_tumor_base)s
RGLB=%(library)s RGPL=%(platform)s
RGPU=%(platform_unit)s RGSM=%(track_tumor)s
ID=%(track_tumor)s
VALIDATION_STRINGENCY=SILENT ;
checkpoint ;'''
statement += '''samtools merge -rf
%(outfile)s
%(tmpdir_gatk)s/%(infile_base)s
%(tmpdir_gatk)s/%(infile_tumor_base)s
; checkpoint ;'''
statement += '''samtools index %(outfile)s ;
checkpoint ;'''
statement += '''rm -rf %(tmpdir_gatk)s ;
checkpoint ; '''
P.run()
IOTools.zapFile(infile)
IOTools.zapFile(infile_tumor)
@transform(mergeSampleBams,
regex("bam/(\S+)-%s-(\d+).merged.bam" % PARAMS["sample_control"]),
r"bam/\1-%s-\2.realigned.bqsr.bam" % PARAMS["sample_control"])
def realignMatchedSample(infile, outfile):
''' repeat realignments with merged bam of control and tumor
this should help avoid problems with sample-specific realignments'''
genome = "%s/%s.fa" % (PARAMS["bwa_index_dir"],
PARAMS["genome"])
PipelineExome.GATKIndelRealign(infile, outfile, genome)
IOTools.zapFile(infile)
@transform(realignMatchedSample,
regex("bam/(\S+)-%s-(\d+).realigned.bqsr.bam" %
PARAMS["sample_control"]),
r"bam/\1-%s-\2.realigned.split.bqsr.bam" % PARAMS["sample_control"])
def splitMergedRealigned(infile, outfile):
''' split realignment file and truncate intermediate bams'''
track = P.snip(os.path.basename(infile), ".realigned.bqsr.bam") + ".bqsr"
track_tumor = track.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
outfile_tumor = outfile.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
statement = '''samtools view -hb %(infile)s
-r %(track)s > %(outfile)s;
samtools view -hb %(infile)s
-r %(track_tumor)s > %(outfile_tumor)s; checkpoint ;
samtools index %(outfile)s;
samtools index %(outfile_tumor)s; checkpoint;'''
P.run()
IOTools.zapFile(infile)
@transform(splitMergedRealigned,
regex("bam/(\S+)-%s-(\S+).realigned.split.bqsr.bam" %
PARAMS["sample_control"]),
r"bam/\1-%s-\2.realigned.picard_stats" % PARAMS["sample_control"])
def runPicardOnRealigned(infile, outfile):
to_cluster = USECLUSTER
job_memory = PARAMS["gatk_memory"]
tmpdir_gatk = P.getTempDir()
outfile_tumor = outfile.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
infile_tumor = infile.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
track = P.snip(os.path.basename(infile), ".bam")
track_tumor = track.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
genome = "%s/%s.fa" % (PARAMS["bwa_index_dir"],
PARAMS["genome"])
PipelineMappingQC.buildPicardAlignmentStats(infile, outfile, genome)
PipelineMappingQC.buildPicardAlignmentStats(infile_tumor,
outfile_tumor, genome)
@follows(runPicardOnRealigned)
@merge("bam/*.realigned.picard_stats", "realigned_picard_stats.load")
def loadPicardRealigenedAlignStats(infiles, outfile):
'''Merge Picard alignment stats into single table and load into SQLite.'''
PipelineMappingQC.loadPicardAlignmentStats(infiles, outfile)
#########################################################################
#########################################################################
#########################################################################
# Variant Calling
#########################################################################
@follows(mkdir("normal_panel_variants"))
@transform(splitMergedRealigned,
regex(r"bam/(\S+)-%s-(\S).realigned.split.bqsr.bam" %
PARAMS["sample_control"]),
r"normal_panel_variants/\1_normal_mutect.vcf")
def callControlVariants(infile, outfile):
'''run mutect to call snps in control sample'''
basename = P.snip(outfile, "_normal_mutect.vcf")
call_stats_out = basename + "_call_stats.out"
mutect_log = basename + ".log"
cosmic, dbsnp, = (PARAMS["mutect_cosmic"],
PARAMS["gatk_dbsnp"])
genome = "%s/%s.fa" % (PARAMS["bwa_index_dir"],
PARAMS["genome"])
PipelineExome.mutectSNPCaller(infile, outfile, mutect_log, genome, cosmic,
dbsnp, call_stats_out, PARAMS[
'mutect_memory'],
PARAMS['mutect_threads'], artifact=True)
@transform(callControlVariants,
suffix(".vcf"),
"_slim.vcf.gz")
def indexControlVariants(infile, outfile):
'''index control vcf for intersection by vcftools'''
outfile = P.snip(outfile, ".gz")
statement = '''cut -f1-8 %(infile)s > %(outfile)s;
bgzip -f %(outfile)s;
tabix -f %(outfile)s.gz'''
P.run()
# paramaterise vcf intersection (number of req. observations - currently 1)
@merge(indexControlVariants,
"normal_panel_variants/combined.vcf")
def mergeControlVariants(infiles, outfile):
''' intersect control vcfs to generate a panel of normals for mutect'''
infiles = " ".join(infiles)
# remove module command when Sebastian has made latest version executable
statement = '''module load bio/vcftools/0.1.08a;
vcf-isec -o -n +1 %(infiles)s
> %(outfile)s'''
P.run()
@follows(mkdir("variants"), callControlVariants)
@transform(splitMergedRealigned,
regex(r"bam/(\S+)-%s-(\S).realigned.split.bqsr.bam" %
PARAMS["sample_control"]),
add_inputs(mergeControlVariants),
r"variants/\1.mutect.snp.vcf")
def runMutect(infiles, outfile):
'''calls somatic SNPs using MuTect'''
infile, normal_panel = infiles
infile_tumour = infile.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
basename = P.snip(outfile, ".mutect.snp.vcf")
call_stats_out = basename + "_call_stats.out"
mutect_log = basename + ".log"
(cosmic, dbsnp, quality, max_alt_qual, max_alt,
max_fraction, tumor_LOD, strand_LOD) = (
PARAMS["mutect_cosmic"], PARAMS["gatk_dbsnp"],
PARAMS["mutect_quality"], PARAMS["mutect_max_alt_qual"],
PARAMS["mutect_max_alt"], PARAMS["mutect_max_fraction"],
PARAMS["mutect_lod"], PARAMS["mutect_strand_lod"])
genome = "%s/%s.fa" % (PARAMS["bwa_index_dir"],
PARAMS["genome"])
PipelineExome.mutectSNPCaller(
infile_tumour, outfile, mutect_log, genome,
cosmic, dbsnp, call_stats_out,
PARAMS['mutect_memory'], PARAMS['mutect_threads'],
quality, max_alt_qual,
max_alt, max_fraction, tumor_LOD, strand_LOD,
normal_panel, infile)
@transform(runMutect,
regex(r"variants/(\S+).mutect.snp.vcf"),
r"variants/\1_call_stats.load")
def loadMutectExtendedOutput(infile, outfile):
'''Load mutect extended output into database'''
infile = infile.replace(".mutect.snp.vcf", "_call_stats.out")
indices = "contig,position"
P.load(infile, outfile, options="--add-index=%(indices)s" % locals())
@transform(splitMergedRealigned,
regex(r"bam/(\S+)-%s-(\S).realigned.split.bqsr.bam" %
PARAMS["sample_control"]),
r"variants/\1/results/all.somatic.indels.vcf")
def indelCaller(infile, outfile):
'''Call somatic indels using Strelka'''
infile_tumour = infile.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
outdir = "/".join(outfile.split("/")[0:2])
genome = "%s/%s.fa" % (PARAMS["bwa_index_dir"],
PARAMS["genome"])
PipelineExome.strelkaINDELCaller(infile, infile_tumour, outfile,
genome, PARAMS['strelka_config'], outdir,
PARAMS['strelka_memory'],
PARAMS['strelka_threads'])
##########################################################################
##########################################################################
##########################################################################
# repeat mutect in reverse and on subsampled control bam as quality control
##########################################################################
# this analysis should be part of an optional check of mutect parameters
# mutect paramters should be identical to the runMutect function above
@follows(mergeControlVariants)
@transform(splitMergedRealigned,
regex(r"bam/(\S+)-%s-(\S).realigned.split.bqsr.bam" %
PARAMS["sample_control"]),
add_inputs(mergeControlVariants),
r"variants/\1.mutect.reverse.snp.vcf")
def runMutectReverse(infiles, outfile):
'''Use control as tumor and vis versa to estimate false positive rate'''
infile, normal_panel = infiles
infile_tumour = infile.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
basename = P.snip(outfile, "_normal_mutect.vcf")
call_stats_out = basename + "_call_stats.out"
mutect_log = basename + ".log"
basename = P.snip(outfile, ".mutect.reverse.snp.vcf")
call_stats_out = basename + "_call_stats.reverse.out"
coverage_wig_out = basename + "_coverage.reverse.wig"
mutect_log = basename + ".reverse.log"
(cosmic, dbsnp, quality, max_alt_qual, max_alt,
max_fraction, tumor_LOD) = (
PARAMS["mutect_cosmic"], PARAMS["gatk_dbsnp"],
PARAMS["mutect_quality"], PARAMS["mutect_max_alt_qual"],
PARAMS["mutect_max_alt"], PARAMS["mutect_max_fraction"],
PARAMS["mutect_LOD"])
genome = "%s/%s.fa" % (PARAMS["bwa_index_dir"],
PARAMS["genome"])
PipelineExome.mutectSNPCaller(infile, outfile, mutect_log, genome,
cosmic, dbsnp, call_stats_out,
PARAMS['mutect_memory'],
PARAMS['mutect_threads'],
quality, max_alt_qual,
max_alt, max_fraction, tumor_LOD,
normal_panel, infile_tumour)
# generalise the functions below
# 1. identify sample with highest coverage in control
# - should this check coverage in tumour also?
# 2. subset control bam
# 3. run mutect calling function with subset against unsubsetted tumour
# 4. summary table
adeno_bam = "bam/NU16C-Control-1.realigned.bqsr.bam"
@subdivide(adeno_bam,
regex("(\S+).bqsr.bam"),
[r"\1.0.1.bqsr.bam",
r"\1.0.2.bqsr.bam",
r"\1.0.3.bqsr.bam",
r"\1.0.4.bqsr.bam",
r"\1.0.5.bqsr.bam",
r"\1.0.6.bqsr.bam",
r"\1.0.7.bqsr.bam",
r"\1.0.8.bqsr.bam",
r"\1.0.9.bqsr.bam",
r"\1.1.0.bqsr.bam"])
def subsetControlBam(infile, outfiles):
statements = []
n = 0
for fraction in numpy.arange(0.1, 1.1, 0.1):
outfile = outfiles[n]
n += 1
statement = '''samtools view -s %(fraction)s -b %(infile)s
> %(outfile)s'''
P.run()
@transform(subsetControlBam,
suffix(".bam"),
".bam.bai")
def indexSubsets(infile, outfile):
statement = '''samtools index %(infile)s'''
P.run()
@follows(indexSubsets)
@transform(subsetControlBam,
regex(r"bam/(\S+)-%s-1.realigned.(\S+).bqsr.bam" %
PARAMS["sample_control"]),
add_inputs(mergeControlVariants),
r"variants/\1-downsampled-\2.mutect.snp.vcf")
def runMutectOnDownsampled(infiles, outfile):
'''call somatic SNPs using MuTect on downsampled bams'''
infile, normal_panel = infiles
infile_tumour = infile.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
basename = P.snip(outfile, "_normal_mutect.vcf")
call_stats_out = basename + "_call_stats.out"
mutect_log = basename + ".log"
(cosmic, dbsnp, quality, max_alt_qual, max_alt,
max_fraction, tumor_LOD) = (
PARAMS["mutect_cosmic"], PARAMS["gatk_dbsnp"],
PARAMS["mutect_quality"], PARAMS["mutect_max_alt_qual"],
PARAMS["mutect_max_alt"], PARAMS["mutect_max_fraction"],
PARAMS["mutect_LOD"])
genome = "%s/%s.fa" % (PARAMS["bwa_index_dir"],
PARAMS["genome"])
PipelineExome.mutectSNPCaller(infile_tumour, outfile, mutect_log, genome,
cosmic, dbsnp, call_stats_out,
PARAMS['mutect_memory'], PARAMS[
'mutect_threads'],
quality, max_alt_qual,
max_alt, max_fraction, tumor_LOD,
normal_panel, infile)
##############################################################################
##############################################################################
##############################################################################
# Variant Annotation and Recalibration
##############################################################################
@collate(splitMergedRealigned,
regex(r"bam/(\S+)-(\S+)-(\S+).realigned.split.bqsr.bam"),
r"bam/\1.list")
def listOfBAMs(infiles, outfile):
'''generates a file containing a list of BAMs for each patient,
for use in variant calling'''
with IOTools.openFile(outfile, "w") as outf:
for infile in infiles:
infile_tumour = infile.replace(
PARAMS["sample_control"], PARAMS["sample_tumour"])
outf.write(infile + '\n')
outf.write(infile_tumour + '\n')
@transform(runMutect,
regex(r"variants/(\S+).mutect.snp.vcf"),
r"variants/\1.mutect.snp.snpeff.vcf")
def annotateVariantsSNPeff(infile, outfile):
'''Annotate SNP variants using SNPeff'''
to_cluster = USECLUSTER
job_memory = "4G"
job_threads = 2
snpeff_genome = PARAMS["annotation_snpeff_genome"]
config = PARAMS["annotation_snpeff_config"]
statement = '''java -Xmx4G -jar /ifs/apps/bio/snpEff-3.3-dev/snpEff.jar
-c %(config)s -v %(snpeff_genome)s -o gatk
%(infile)s > %(outfile)s'''
P.run()
@transform(indelCaller,
regex("variants/(\S+)/results/all.somatic.indels.vcf"),
r"variants/\1.indels.snpeff.vcf")
def annotateVariantsINDELsSNPeff(infile, outfile):
'''Annotate INDEL variants using SNPeff'''
to_cluster = USECLUSTER
job_memory = "4G"
job_threads = 2
snpeff_genome = PARAMS["annotation_snpeff_genome"]
config = PARAMS["annotation_snpeff_config"]
statement = '''java -Xmx4G -jar /ifs/apps/bio/snpEff-3.3-dev/snpEff.jar
-c %(config)s -v %(snpeff_genome)s -o gatk
%(infile)s > %(outfile)s'''
P.run()
#########################################################################
# Annotate SNP and INDEL variants
#########################################################################
# Need to check whether variant annotatot is using both bams
# from a single patient?
# should just be the tumour bam or else scores will be wrong!
@follows(annotateVariantsSNPeff, listOfBAMs)
@transform(runMutect,
regex(r"variants/(\S+).mutect.snp.vcf"),
add_inputs(r"bam/\1.list",
r"variants/\1.mutect.snp.snpeff.vcf"),
r"variants/\1.mutect.snp.annotated.vcf")
def variantAnnotator(infiles, outfile):
'''Annotate variant file using GATK VariantAnnotator'''
to_cluster = USECLUSTER
infile, bamlist, effFile = infiles
dbsnp = PARAMS["gatk_dbsnp"]
statement = '''GenomeAnalysisTK
-T VariantAnnotator
-R %(bwa_index_dir)s/%(genome)s.fa
-I %(bamlist)s
-A SnpEff --snpEffFile %(effFile)s
-o %(outfile)s
--variant %(infile)s
-L %(infile)s
--dbsnp %(dbsnp)s
-A HaplotypeScore
-A MappingQualityRankSumTest
-A ReadPosRankSumTest
-A AlleleBalanceBySample'''
P.run()
@follows(annotateVariantsINDELsSNPeff, listOfBAMs)
@transform(indelCaller,
regex("variants/(\S+)/results/all.somatic.indels.vcf"),
add_inputs(r"bam/\1.list", r"variants/\1.indels.snpeff.vcf"),
r"variants/\1.indels.annotated.vcf")
def variantAnnotatorIndels(infiles, outfile):
'''Annotate variant file using GATK VariantAnnotator'''
to_cluster = USECLUSTER
infile, bamlist, effFile = infiles
statement = '''GenomeAnalysisTK
-T VariantAnnotator
-R %(bwa_index_dir)s/%(genome)s.fa
-I %(bamlist)s
-A SnpEff --snpEffFile %(effFile)s
-o %(outfile)s
--variant %(infile)s
-L %(infile)s
-A Coverage
-A FisherStrand
-A HaplotypeScore
-A MappingQualityRankSumTest
-A ReadPosRankSumTest
-A AlleleBalanceBySample
-A RMSMappingQuality'''
P.run()
######################################################################
# this does not work - insufficient number of indels in mills+
# therefore this task is not a dependency of task full
@transform(variantAnnotatorIndels,
suffix(".annotated.vcf"),
".annotated.recalibrated.vcf")
def variantRecalibrator(infile, outfile):
'''Create variant recalibration file for indels'''
to_cluster = USECLUSTER
job_memory = PARAMS["gatk_memory"]
job_threads = 6
track = P.snip(os.path.basename(outfile), ".annotated.recalibrated.vcf")
mills = PARAMS["gatk_mills"]
statement = '''GenomeAnalysisTK
-T VariantRecalibrator
-R %(bwa_index_dir)s/%(genome)s.fa
-input %(infile)s
-resource:mills,known=true,training=true,truth=true,prior=12.0
%(mills)s
-an DP -an MQRankSum -an ReadPosRankSum
-mode INDEL
-tranche 100.0 -tranche 99.9 -tranche 99.0 -tranche 90.0
--maxGaussians 4
-recalFile %(outfile)s
-tranchesFile variants/%(track)s.tranches
-rscriptFile variants/%(track)s.plots.R'''
P.run()
##############################################################################
# Filter SNPs and INDELs
##############################################################################
@transform(variantAnnotatorIndels,
suffix(".annotated.vcf"),
".annotated.filtered.vcf")
def filterIndels(infile, outfile):
''' use SnpSift to filter INDELS using VCF fields'''
statement = '''cat %(infile)s |
java -Xmx2g -jar /ifs/apps/bio/snpEff-3.1/SnpSift.jar filter
"(QSI_NT>%(filter_indel_nt)s &
IHP<%(filter_indel_ihp)s &
RC<%(filter_indel_rc)s &
IC<%(filter_indel_rc)s) "
> %(outfile)s '''
P.run()
@transform(variantAnnotator,
regex("variants/(\S+).mutect.snp.annotated.vcf"),
r"variants/\1.mutect.snp.annotated.filtered.vcf")
def filterMutect(infile, outfile):
''' filter mutect snps using allele frequencies '''
logfile = outfile.replace(".vcf", ".log")
min_t_alt = PARAMS["filter_minimum_tumor_allele"]
min_t_alt_freq = PARAMS["filter_minimum_tumor_allele_frequency"]
min_n_depth = PARAMS["filter_minimum_normal_depth"]
max_n_alt_freq = PARAMS["filter_maximum_normal_allele_frequency"]
min_ratio = PARAMS["filter_minimum_ratio"]
PipelineExome.filterMutect(
infile, outfile, logfile,
PARAMS["sample_control"], PARAMS["sample_tumour"],
min_t_alt, min_n_depth, max_n_alt_freq,
min_t_alt_freq, min_ratio)
##############################################################################
# Intersect filtered SNPs and INDELs
##############################################################################
@mkdir("intersection.dir")
@collate((filterIndels, filterMutect),
regex(r"variants/(\S+)\.(\S+).annotated.filtered.vcf"),
r"intersection.dir/overlap_\2_heatmap.png")
def intersectHeatmap(infiles, outfile):
''' intersect DE test_ids across the different quantifiers'''
PipelineExome.intersectionHeatmap(infiles, outfile)
#########################################################################
#########################################################################
# convert vcf to tsv files and load into database
@transform(filterMutect,
regex("variants/(\S+).annotated.filtered.vcf"),
r"variants/\1.annotated.filtered.tsv")
def snpvcfToTable(infile, outfile):
'''Converts vcf to tab-delimited file'''
to_cluster = USECLUSTER
statement = '''GenomeAnalysisTK
-T VariantsToTable -R %(bwa_index_dir)s/%(genome)s.fa
-V %(infile)s --showFiltered --allowMissingData
-F CHROM -F POS -F ID -F REF -F ALT -F QUAL -F FILTER
-F INFO -F BaseQRankSum
-F HaplotypeScore -F MQRankSum -F ReadPosRankSum
-F SNPEFF_EFFECT -F SNPEFF_IMPACT -F SNPEFF_FUNCTIONAL_CLASS
-F SNPEFF_CODON_CHANGE -F SNPEFF_AMINO_ACID_CHANGE
-F SNPEFF_GENE_NAME -F SNPEFF_GENE_BIOTYPE
-F SNPEFF_TRANSCRIPT_ID -F SNPEFF_EXON_ID
-GF GT -GF AD -GF SS -GF FA -GF AB -GF DP
-o %(outfile)s'''
P.run()
@transform(filterIndels,
regex("variants/(\S+).annotated.filtered.vcf"),
r"variants/\1.annotated.filtered.tsv")
def indelvcfToTable(infile, outfile):
'''Converts vcf to tab-delimited file'''
to_cluster = USECLUSTER
statement = '''GenomeAnalysisTK
-T VariantsToTable -R %(bwa_index_dir)s/%(genome)s.fa
-V %(infile)s --showFiltered --allowMissingData
-F CHROM -F POS -F ID -F REF -F ALT -F QUAL -F FILTER
-F INFO -F BaseQRankSum
-F HaplotypeScore -F MQRankSum -F ReadPosRankSum
-F SNPEFF_EFFECT -F SNPEFF_IMPACT -F SNPEFF_FUNCTIONAL_CLASS
-F SNPEFF_CODON_CHANGE -F SNPEFF_AMINO_ACID_CHANGE
-F SNPEFF_GENE_NAME -F SNPEFF_GENE_BIOTYPE
-F SNPEFF_TRANSCRIPT_ID -F SNPEFF_EXON_ID
-F TQSI -F TSQI_NT -F DP -F IC -F IHP -F NT
-F QSI -F QSI_NT -F RC -F RU -F SGT
-GF DP -GF DP2 -GF DP50 -GF SUBDP50 -GF TAR -GF TIR -GF TOR
-o %(outfile)s'''
P.run()
@transform([snpvcfToTable,
indelvcfToTable],
regex(r"variants/(\S+).annotated.filtered.tsv"),
r"variants/\1_annotated.load")
def loadVariantAnnotation(infile, outfile):
'''Load VCF annotations into database'''
if infile.endswith("indels.annotated.filtered.tsv"):
indices = "CHROM,POS,SNPEFF_GENE_NAME"
elif infile.endswith("mutect.snp.annotated.filtered.tsv"):
indices = "CHROM,POS,SNPEFF_GENE_NAME"
P.load(infile, outfile, options="--add-index=%(indices)s" % locals())
#########################################################################
# Genes of interest
# check this will run in the correct position if option selected
# @active_if(PARAMS["annotation_add_genes_of_interest"] == 1)
# @transform((annotateVariantsSNPsift),
# regex(r"variants/(\S+).haplotypeCaller.snpsift.vcf"),
# r"variants/\1.genes.vcf")
# def findGenes(infile, outfile):
# '''Adds expression "GENE_OF_INTEREST" to the FILTER column of the vcf
# if variant is within a gene of interest as defined in the ini
# file'''
#
# geneList = P.asList(PARAMS["annotation_genes_of_interest"])
# expression = '\'||SNPEFF_GENE_NAME==\''.join(geneList)
# statement = '''GenomeAnalysisTK -T VariantFiltration
# -R %%(bwa_index_dir)s/%%(genome)s.fa
# --variant %(infile)s
# --filterExpression "SNPEFF_GENE_NAME=='%(expression)s'"
# --filterName "GENE_OF_INTEREST" -o %(outfile)s''' % locals()
# P.run()
#########################################################################
#########################################################################
#########################################################################
# vcf statistics - this only summarises the nucleotide changes
# this currently does not provide useful output!
@transform((variantAnnotator,
variantAnnotatorIndels),
regex(r"variants/(\S+).vcf"),
r"variants/\1.vcfstats")
def buildVCFstats(infile, outfile):
'''Calculate statistics on VCF file'''
to_cluster = USECLUSTER
statement = '''vcf-stats %(infile)s
> %(outfile)s 2>>%(outfile)s.log;'''
P.run()
@merge(buildVCFstats, "vcf_stats.load")
def loadVCFstats(infiles, outfile):
'''Import variant statistics into SQLite'''
filenames = " ".join(infiles)
tablename = P.toTable(outfile)
csv2db_options = PARAMS["csv2db_options"]
E.info("Loading vcf stats...")
statement = '''cgat vcfstats2db
%(filenames)s >> %(outfile)s; '''
statement += '''cat vcfstats.txt |
cgat csv2db %(csv2db_options)s
--allow-empty-file --add-index=track --table=vcf_stats
>> %(outfile)s; '''
P.run()
#########################################################################
@transform(runMutect,
suffix(".mutect.snp.vcf"),
"_mutect_filtering_summary.tsv")
def summariseFiltering(infile, outfile):
infile = infile.replace(".mutect.snp.vcf", "_call_stats.out")
PipelineExome.parseMutectCallStats(infile, outfile, submit=True)
@transform(summariseFiltering,
regex(r"variants/(\S+)_mutect_filtering_summary.tsv"),
r"variants/\1_mutect_filtering_summary.load")
def loadMutectFilteringSummary(infile, outfile):
'''Load mutect extended output into database'''
dbh = connect()
tablename = P.toTable(outfile)
statement = '''cat %(infile)s |
cgat csv2db
--table %(tablename)s --retry --ignore-empty
> %(outfile)s'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@originate("eBio_studies.tsv")
def defineEBioStudies(outfile):
''' For the cancer types specified in pipeline.ini, identify the
relevent studies in eBio '''
cancer_types = PARAMS["annotation_ebio_cancer_types"]
PipelineExome.defineEBioStudies(cancer_types, outfile, submit=False)
@transform(defineEBioStudies,
suffix("eBio_studies.tsv"),
add_inputs(filterIndels, filterMutect),
"eBio_studies_gene_frequencies.tsv")
def extractEBioinfo(infiles, outfile):
'''find the number of mutations identitified in previous studies (ebio_ids)
for the mutated genes in the annotated vcfs'''
eBio_ids = infiles[0]
vcfs = infiles[1:]
PipelineExome.extractEBioinfo(eBio_ids, vcfs, outfile, submit=False)
@transform(extractEBioinfo,
suffix(".tsv"),
".load")
def loadEBioInfo(infile, outfile):
'''load the frequencies from the eBIO portal'''
P.load(infile, outfile, options="--add-index=gene")
#########################################################################
#########################################################################
#########################################################################
# load Network of Cancer Genes table
# parameterise file location:
@originate("cancergenes.load")
def loadNCG(outfile):
'''Load NCG into database'''
infile = PARAMS["cancergenes_table"]
# infile = "/ifs/projects/proj053/backup/NCG/cancergenes2016.tsv"
P.load(infile, outfile, options="--add-index=symbol")
#########################################################################
#########################################################################
#########################################################################
# analyse mutational siganture of filtered variants
@merge(filterMutect,
["variants/mutational_signature.tsv",
"variants/mutational_signature_table.tsv"])
def mutationalSignature(infiles, outfiles):
PipelineExome.compileMutationalSignature(
infiles, outfiles)
@transform(mutationalSignature,
suffix(".tsv"),
".load")
def loadMutationalSignature(infiles, outfile):
outfile2 = re.sub(".load", "_table.load", outfile)
P.load(infiles[0], outfile)
P.load(infiles[1], outfile2)
#########################################################################
#########################################################################
#########################################################################
@follows(loadManualAnnotations,
loadMutectFilteringSummary,
loadMutectExtendedOutput,
loadVariantAnnotation,
loadCoverageStats,
loadPicardRealigenedAlignStats,
loadPicardAlignStats,
loadNCG,
loadMutationalSignature,
loadEBioInfo,
intersectHeatmap)
def full():
pass
@follows(defineEBioStudies)
def test():
pass
@follows(runMutectOnDownsampled,
runMutectReverse)
def TestMutect():
'''This target runs function which can be used to assess the chosen
mutect parameters'''
# @follows(loadROI,
# loadROI2Gene)
# def loadMetadata():
# pass
@follows(mapReads)
def mapping():
pass
@follows(loadPicardDuplicateStats,
loadPicardAlignStats,
buildCoverageStats,
loadCoverageStats)
def postMappingQC():
pass
@follows(GATKpreprocessing,
runPicardOnRealigned)
def gatk():
pass
@follows(runMutect,
indelCaller)
def callVariants():
pass
@follows(loadVariantAnnotation)
def tabulation():
pass
@follows(buildVCFstats,
loadVCFstats)
def vcfstats():
pass
#########################################################################
#########################################################################
#########################################################################
@follows()
def publish():
'''publish files.'''
P.publish_report()
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting documentation build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating documentation")
P.run_report(clean=False)
def main(argv=None):
if argv is None:
argv = sys.argv
P.main(argv)
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| 45,398 | 14,550 |
from __future__ import absolute_import
import six
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import before_now, iso_format
class ProjectEventDetailsTest(APITestCase, SnubaTestCase):
def setUp(self):
super(ProjectEventDetailsTest, self).setUp()
self.login_as(user=self.user)
project = self.create_project()
one_min_ago = iso_format(before_now(minutes=1))
two_min_ago = iso_format(before_now(minutes=2))
three_min_ago = iso_format(before_now(minutes=3))
four_min_ago = iso_format(before_now(minutes=4))
self.prev_event = self.store_event(
data={"event_id": "a" * 32, "timestamp": four_min_ago, "fingerprint": ["group-1"]},
project_id=project.id,
)
self.cur_event = self.store_event(
data={"event_id": "b" * 32, "timestamp": three_min_ago, "fingerprint": ["group-1"]},
project_id=project.id,
)
self.next_event = self.store_event(
data={
"event_id": "c" * 32,
"timestamp": two_min_ago,
"fingerprint": ["group-1"],
"environment": "production",
"tags": {"environment": "production"},
},
project_id=project.id,
)
# Event in different group
self.store_event(
data={
"event_id": "d" * 32,
"timestamp": one_min_ago,
"fingerprint": ["group-2"],
"environment": "production",
"tags": {"environment": "production"},
},
project_id=project.id,
)
def test_simple(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.cur_event.event_id,
"project_slug": self.cur_event.project.slug,
"organization_slug": self.cur_event.project.organization.slug,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(self.cur_event.event_id)
assert response.data["nextEventID"] == six.text_type(self.next_event.event_id)
assert response.data["previousEventID"] == six.text_type(self.prev_event.event_id)
assert response.data["groupID"] == six.text_type(self.cur_event.group.id)
def test_snuba_no_prev(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.prev_event.event_id,
"project_slug": self.prev_event.project.slug,
"organization_slug": self.prev_event.project.organization.slug,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(self.prev_event.event_id)
assert response.data["previousEventID"] is None
assert response.data["nextEventID"] == self.cur_event.event_id
assert response.data["groupID"] == six.text_type(self.prev_event.group.id)
def test_snuba_with_environment(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.cur_event.event_id,
"project_slug": self.cur_event.project.slug,
"organization_slug": self.cur_event.project.organization.slug,
},
)
response = self.client.get(
url, format="json", data={"enable_snuba": "1", "environment": ["production", "staging"]}
)
response = self.client.get(
url, format="json", data={"environment": ["production", "staging"]}
)
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(self.cur_event.event_id)
assert response.data["previousEventID"] is None
assert response.data["nextEventID"] == self.next_event.event_id
assert response.data["groupID"] == six.text_type(self.prev_event.group.id)
def test_ignores_different_group(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.next_event.event_id,
"project_slug": self.next_event.project.slug,
"organization_slug": self.next_event.project.organization.slug,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(self.next_event.event_id)
assert response.data["nextEventID"] is None
class ProjectEventJsonEndpointTest(APITestCase, SnubaTestCase):
def setUp(self):
super(ProjectEventJsonEndpointTest, self).setUp()
self.login_as(user=self.user)
self.event_id = "c" * 32
self.fingerprint = ["group_2"]
self.min_ago = iso_format(before_now(minutes=1))
self.event = self.store_event(
data={
"event_id": self.event_id,
"timestamp": self.min_ago,
"fingerprint": self.fingerprint,
"user": {"email": self.user.email},
},
project_id=self.project.id,
)
self.url = reverse(
"sentry-api-0-event-json",
kwargs={
"organization_slug": self.organization.slug,
"project_slug": self.project.slug,
"event_id": self.event_id,
},
)
def assert_event(self, data):
assert data["event_id"] == self.event_id
assert data["user"]["email"] == self.user.email
assert data["datetime"][:19] == self.min_ago
assert data["fingerprint"] == self.fingerprint
def test_simple(self):
response = self.client.get(self.url, format="json")
assert response.status_code == 200, response.content
self.assert_event(response.data)
def test_event_does_not_exist(self):
self.url = reverse(
"sentry-api-0-event-json",
kwargs={
"organization_slug": self.organization.slug,
"project_slug": self.project.slug,
"event_id": "no" * 16,
},
)
response = self.client.get(self.url, format="json")
assert response.status_code == 404, response.content
assert response.data == {"detail": "Event not found"}
def test_user_unauthorized(self):
user = self.create_user()
self.login_as(user)
response = self.client.get(self.url, format="json")
assert response.status_code == 403, response.content
assert response.data == {"detail": "You do not have permission to perform this action."}
def test_project_not_associated_with_event(self):
project2 = self.create_project(organization=self.organization)
url = reverse(
"sentry-api-0-event-json",
kwargs={
"organization_slug": self.organization.slug,
"project_slug": project2.slug,
"event_id": self.event_id,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 404, response.content
assert response.data == {"detail": "Event not found"}
| 7,581 | 2,248 |
from je_auto_control.windows.screen.win32_screen import size
| 61 | 20 |
import reprlib
from django.db import models
class Message(models.Model):
"""留言消息类
"""
name = models.CharField('用户名', max_length=20)
email = models.EmailField('邮箱', max_length=200)
message = models.TextField('留言')
active = models.BooleanField('有效', default=True)
posted = models.DateTimeField('发布时间', auto_now_add=True)
def __str__(self):
return f'{self.name}{reprlib.repr(self.message)}'
| 435 | 161 |
from fractions import Fraction
x, d = input().split(' ')
d = int(d)
k = len(x) - x.index('.') - d - 1
a, b = x[0:-d].replace('.', ''), 10 ** k
ab = Fraction(int(a), b)
rd = Fraction(int(x[-d:]), (10 ** d - 1) * b)
result = ab + rd
print(str(result.numerator) + '/' + str(result.denominator))
| 293 | 131 |
import argparse
import asyncio
import logging
import struct
from typing import Dict, Optional
from dnslib.dns import DNSRecord
from aioquic.asyncio import QuicConnectionProtocol, serve
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.events import QuicEvent, StreamDataReceived
from aioquic.quic.logger import QuicFileLogger
from aioquic.tls import SessionTicket
class DnsServerProtocol(QuicConnectionProtocol):
def quic_event_received(self, event: QuicEvent):
if isinstance(event, StreamDataReceived):
# parse query
length = struct.unpack("!H", bytes(event.data[:2]))[0]
query = DNSRecord.parse(event.data[2 : 2 + length])
# perform lookup and serialize answer
data = query.send(args.resolver, 53)
data = struct.pack("!H", len(data)) + data
# send answer
self._quic.send_stream_data(event.stream_id, data, end_stream=True)
class SessionTicketStore:
"""
Simple in-memory store for session tickets.
"""
def __init__(self) -> None:
self.tickets: Dict[bytes, SessionTicket] = {}
def add(self, ticket: SessionTicket) -> None:
self.tickets[ticket.ticket] = ticket
def pop(self, label: bytes) -> Optional[SessionTicket]:
return self.tickets.pop(label, None)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="DNS over QUIC server")
parser.add_argument(
"--host",
type=str,
default="::",
help="listen on the specified address (defaults to ::)",
)
parser.add_argument(
"--port",
type=int,
default=4784,
help="listen on the specified port (defaults to 4784)",
)
parser.add_argument(
"-k",
"--private-key",
type=str,
help="load the TLS private key from the specified file",
)
parser.add_argument(
"-c",
"--certificate",
type=str,
required=True,
help="load the TLS certificate from the specified file",
)
parser.add_argument(
"--resolver",
type=str,
default="8.8.8.8",
help="Upstream Classic DNS resolver to use",
)
parser.add_argument(
"--retry",
action="store_true",
help="send a retry for new connections",
)
parser.add_argument(
"-q",
"--quic-log",
type=str,
help="log QUIC events to QLOG files in the specified directory",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="increase logging verbosity"
)
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s %(levelname)s %(name)s %(message)s",
level=logging.DEBUG if args.verbose else logging.INFO,
)
if args.quic_log:
quic_logger = QuicFileLogger(args.quic_log)
else:
quic_logger = None
configuration = QuicConfiguration(
alpn_protocols=["doq-i03"],
is_client=False,
quic_logger=quic_logger,
)
configuration.load_cert_chain(args.certificate, args.private_key)
ticket_store = SessionTicketStore()
loop = asyncio.get_event_loop()
loop.run_until_complete(
serve(
args.host,
args.port,
configuration=configuration,
create_protocol=DnsServerProtocol,
session_ticket_fetcher=ticket_store.pop,
session_ticket_handler=ticket_store.add,
retry=args.retry,
)
)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
| 3,636 | 1,124 |
# Copyright (c) 2018, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
import logging, re, subprocess, io, warnings
from bs4 import BeautifulSoup
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
from django.template import TemplateDoesNotExist
from django.template.exceptions import TemplateSyntaxError
from django.template.response import TemplateResponse
from django.utils.module_loading import import_string
from django.utils import six
from django.utils.functional import cached_property
import weasyprint
from .. import settings
from ..compat import BaseEngine, _dirs_undefined, RemovedInDjango110Warning
from ..helpers import build_absolute_uri
LOGGER = logging.getLogger(__name__)
class PdfTemplateResponse(TemplateResponse):
"""
Response as PDF content.
"""
#pylint:disable=too-many-arguments
def __init__(self, request, template, context=None, content_type=None,
status=None, **kwargs):
# Django 1.9 added (charset=None, using=None) to the prototype.
# Django 1.10 removed (current_app=None) to the prototype.
# We donot declare them explicitely but through **kwargs instead
# so that our prototype is compatible with from Django 1.7
# through to Django 1.10.
super(PdfTemplateResponse, self).__init__(request, template,
context=context, content_type='application/pdf', status=status,
**kwargs)
@property
def rendered_content(self):
"""
Converts the HTML content generated from the template
as a Pdf document on the fly.
"""
html_content = super(PdfTemplateResponse, self).rendered_content
soup = BeautifulSoup(html_content.encode('utf-8'), 'html.parser')
for lnk in soup.find_all('a'):
href = lnk.get('href')
if href and href.startswith('/'):
lnk['href'] = build_absolute_uri(self._request, href)
html_content = soup.prettify()
cstr = io.BytesIO()
try:
doc = weasyprint.HTML(string=html_content)
doc.write_pdf(cstr)
except RuntimeError as _:
raise
return cstr.getvalue()
class PdfTemplateError(Exception):
pass
class PdfEngine(BaseEngine):
#pylint: disable=no-member
app_dirname = 'pdf'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(PdfEngine, self).__init__(params)
self.file_charset = options.get(
'file_charset', django_settings.FILE_CHARSET)
self.loaders = options.get('loaders', [])
# This is an ugly way to add the search paths for .pdf template files.
@cached_property
def template_loaders(self):
return self.get_template_loaders(self.loaders)
def get_template_loaders(self, template_loaders):
loaders = []
for loader in template_loaders:
if isinstance(loader, (tuple, list)):
args = list(loader[1:])
loader = loader[0]
else:
args = []
if isinstance(loader, six.string_types):
loader_class = import_string(loader)
if getattr(loader_class, '_accepts_engine_in_init', False):
args.insert(0, self)
loader = loader_class(self, *args)
if loader is not None:
loaders.append(loader)
else:
raise ImproperlyConfigured(
"Invalid value in template loaders configuration: %r" % loader)
return loaders
def find_template(self, template_name, dirs=None, skip=None):
tried = []
# if dirs is None:
# dirs = self.dirs
# for search_dir in dirs:
for loader in self.template_loaders:
if hasattr(loader, 'get_contents'):
# From Django 1.9, this is the code that should be executed.
for origin in loader.get_template_sources(
template_name, template_dirs=dirs):
if skip is not None and origin in skip:
tried.append((origin, 'Skipped'))
continue
try:
contents = loader.get_contents(origin)
except TemplateDoesNotExist:
tried.append((origin, 'Source does not exist'))
continue
else:
template = Template(
contents, origin, origin.template_name)
return template, template.origin
else:
# This code is there to support Django 1.8 only.
try:
source, template_path = loader.load_template_source(
template_name, template_dirs=dirs)
origin = self.make_origin(
template_path, loader.load_template_source,
template_name, dirs)
template = Template(source, origin, template_path)
return template, template.origin
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(template_name, tried=tried)
def from_string(self, template_code):
raise TemplateSyntaxError(
"The from_string() method is not implemented")
def get_template(self, template_name, dirs=_dirs_undefined):
#pylint:disable=arguments-differ
if template_name and template_name.endswith('.pdf'):
if dirs is _dirs_undefined:
dirs = None
else:
warnings.warn(
"The dirs argument of get_template is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
template, origin = self.find_template(template_name, dirs)
if not hasattr(template, 'render'):
# template needs to be compiled
template = Template(template, origin, template_name)
return template
raise TemplateDoesNotExist(template_name)
class Template(object):
"""
Fills a PDF template
"""
def __init__(self, template_string, origin=None, name=None):
#pylint:disable=unused-argument
self.name = name
self.origin = origin
def render(self, context=None, request=None):
#pylint:disable=unused-argument
if self.origin:
template_path = self.origin.name
else:
template_path = self.name
output, err = self.fill_form(context, template_path)
if err:
raise PdfTemplateError(err)
return output
@staticmethod
def fill_form(fields, src, pdf_flatform_bin=None):
if pdf_flatform_bin is None:
assert hasattr(settings, 'PDF_FLATFORM_BIN'), "PDF generation"\
" requires podofo-flatform (https://github.com/djaodjin/podofo-flatform)."\
" Edit your PDF_FLATFORM_BIN settings accordingly."
pdf_flatform_bin = settings.PDF_FLATFORM_BIN
cmd = [pdf_flatform_bin]
for key, value in six.iteritems(fields):
if not isinstance(value, six.string_types):
value = str(value)
# We substitute non-standard whitespaces here because
# they interact poorly with the Python utf-8 encoder.
value = re.sub(r"\s", ' ', value)
if len(value) > 0:
# We don't want to end-up with ``--fill key=``
cmd += ["--fill", '%s=%s' % (key, value)]
cmd += [src, '-']
cmdline = cmd[0]
for param in cmd[1:]:
try:
key, value = param.split('=')
if any(char in value for char in [' ', ';']):
value = '"%s"' % value
cmdline += " %s=%s" % (key, value)
except ValueError:
cmdline += " " + param
LOGGER.info("RUN: %s", ' '.join(cmd))
return subprocess.check_output(cmd), None
| 9,483 | 2,660 |
# -*- coding: utf-8 -*-
# @Author: Administrator
# @Date: 2018-05-17 11:09:22
# @Last Modified by: Administrator
# @Last Modified time: 2018-05-17 11:23:24
class Dict(dict):
'''
Simple dict but also support access as x.y style.
>>> d1 = Dict()
>>> d1['x'] = 100
>>> d1.x
100
>>> d1.y = 200
>>> d1['y']
200
>>> d2 = Dict(a=1, b=2, c='3')
>>> d2.c
'3'
>>> d2['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> d2.empty
Traceback (most recent call last):
...
AttributeError: 'Dict' object has no attribute 'empty'
'''
def __init__(self, **kw):
super(Dict, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def fact(n):
'''
Calculate 1*2*3...(n-1)*n
>>> fact(1)
1
>>> fact(10)
3628800
>>> fact(-1)
Traceback (most recent call last):
File "D:\\programTools\\python\\lib\\doctest.py", line 1330, in __run
compileflags, 1), test.globs)
File "<doctest __main__.fact[2]>", line 1, in <module>
fact(-1)
File "E:\\localRepositories\\ML\\python\\practices\\docset.py", line 53, in fact
raise ValueError()
ValueError
'''
if n < 1:
raise ValueError()
if n == 1:
return 1
else:
return n*fact(n-1)
if __name__=='__main__':
import doctest
doctest.testmod() | 1,484 | 618 |
import cv2
cap = cv2.VideoCapture(0)
while(True):
ret,frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitKey(1)&0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows() | 185 | 83 |
def gen(a,j):
if j>=len(a):
print a
return
i=j
while i<9:
a[j]=i+1
gen(a,j+1)
j-=1
def main():
a=['0','0']
#print len(a)
gen(a,0)
if __name__=='__main__':
main();
| 231 | 106 |
import ruamel.yaml as yaml
import numpy as np
import matplotlib.pyplot as plt
import MatplotlibSettings
from scipy.interpolate import make_interp_spline, BSpline
# Loada data
data = np.loadtxt("FOvsAsy2.dat")
f, (ax1, ax2) = plt.subplots(2, 1, sharex = "all", gridspec_kw = dict(width_ratios = [1], height_ratios = [4, 1]))
plt.subplots_adjust(wspace = 0, hspace = 0)
ax1.set_title(r"\textbf{SIDIS at $\mathcal{O}(\alpha_s)$, $\sqrt{s}=10.5$ GeV}")
ax1.text(0.0002, 0.2, r"\textbf{$Q^2 = 2$ GeV$^2$}", fontsize = 16)
ax1.text(0.0002, 0.1, r"\textbf{$x = 0.1$}", fontsize = 16)
ax1.text(0.0002, 0.05, r"\textbf{$z = 0.2$}", fontsize = 16)
ax1.set(ylabel = r"$\displaystyle\left|\frac{d\sigma}{dy dz dQ dq_T}\right|$")
ax1.set_xscale("log")
ax1.set_yscale("log")
ax1.set_xlim([0.0001, 1])
ax1.set_ylim([0.0001, 10])
ax1.plot(data[:, 0], np.absolute(data[:, 1]), color = "red", label = r"\textbf{Fixed order}")
ax1.plot(data[:, 0], np.absolute(data[:, 2]), color = "blue", label = r"\textbf{Asymptotic}")
ax1.plot(data[:, 0], np.absolute(data[:, 1] - data[:, 2]), color = "orange", label = r"\textbf{Difference}")
ax1.legend(fontsize = 20)
ax2.set_xlabel(r"\textbf{$q_T$ [GeV]}")
ax2.set_ylabel(r"\textbf{Ratio}", fontsize = 16)
ax2.set_ylim([0.55, 1.45])
ax2.plot(data[:, 0], np.absolute(data[:, 1] / data[:, 2]), color = "green")
ax2.plot(data[:, 0], np.absolute(data[:, 1] / data[:, 1]), color = "black", ls = "--", lw = 1.5)
ax2.set_xlim([0.0001, 1])
plt.savefig("FOvsAsy2.pdf")
plt.close()
| 1,498 | 724 |
# -*- coding: utf-8 -*-
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#This is a strategy tester with a brute force optimizer
#Pandas_datareader is deprecated, use YahooGrabber
#Import modules
import numpy as np
from pandas_datareader import data
import random as rand
import pandas as pd
import time as t
#Number of iterations
iterations = range(0,40000)
#Empty data structures
empty = []
asone = pd.DataFrame()
#Start timer
start = t.time()
#Request data
s = data.DataReader('^GSPC', 'yahoo', start='1/1/1900', end='01/01/2050')
#Calculate log returns
s['LogRet'] = np.log(s['Adj Close']/s['Adj Close'].shift(1))
s['LogRet'] = s['LogRet'].fillna(0)
#For number of iterations
for i in iterations:
#Generate random params
a = rand.randint(1,60)
b = rand.randint(2,504)
#Constraint
if a > b:
continue
#Generate random params
c = (rand.random())/10
e = (rand.random())/4
#Constraint
if c > e:
continue
#Generate random params
d = (rand.random())/10
f = (rand.random())/4
#Constraint
if d > f:
continue
#Calculate SMA
s['a'] = s['Adj Close'].rolling(window=a, center=False).mean()
s['b'] = s['Adj Close'].rolling(window=b, center=False).mean()
#SMA spread
s['a-b'] = s['a'] - s['b']
#SMA spread in %
s['Trend']= s['a-b']/s['Adj Close']
s['Trend'] = s['Trend'].fillna(0)
#Directional methodology
s['Touch'] = np.where(s['Trend'] > c, 1, 0)
s['Touch'] = np.where(s['Trend'] < -d, -1, s['Touch'])
s['Sustain'] = np.where(s['Touch'].shift(1) == 1, 1, 0)
s['Sustain'] = np.where(s['Sustain'].shift(1) == 1, 1,
s['Sustain'])
s['Sustain'] = np.where(s['Touch'].shift(1) == -1, -1, 0)
s['Sustain'] = np.where(s['Sustain'].shift(1) == -1, -1,
s['Sustain'])
s['Sustain'] = np.where(s['Trend'] > e, 0, s['Sustain'])
s['Sustain'] = np.where(s['Trend'] < -f , 0, s['Sustain'])
s['Regime'] = s['Touch'] + s['Sustain']
#Apply postition to returns
s['Strategy'] = (s['Regime']).shift(1)*s['LogRet']
s['Strategy'] = s['Strategy'].fillna(0)
#Ones
endgains = 1
endreturns = 1
#Compound returns
for m in s['LogRet']:
slate = endreturns * (1+m)
endreturns = slate
for n in s['Strategy']:
otherslate = endgains * (1+n)
endgains = otherslate
#Constraint
if endreturns * 1.2 > endgains:
continue
#Save params and metrics to list
empty.append(a)
empty.append(b)
empty.append(c)
empty.append(d)
empty.append(e)
empty.append(f)
empty.append(endreturns)
empty.append(endgains)
#List to series
emptyseries pd.Series(empty)
#Series to dataframe
asone[i] = emptyseries.values
#Clear list
empty[:] = []
#End timer
end = t.time()
#Metric of choice
z = asone.iloc[7]
#Threshold
w = np.percentile(z, 99)
v = [] #this variable stores the Nth percentile of top params
u = pd.DataFrame() #this variable stores your params
#For all metrics
for h in z:
#If greater than threshold
if h > w:
#Add to list
v.append(h)
#For top metrics
for j in v:
#Get column ID of metric
r = asone.columns[(asone == j).iloc[7]]
#Add param set to dataframe
u = pd.concat([u,asone[r]], axis = 1)
#Top metrics
y = max(z)
#Column ID of top param set
x = asone.columns[(asone == y).iloc[7]]
#Top param set
print(asone[x])
#Timer stats
print(end-start)
| 3,578 | 1,352 |
'''
Script to parse out the raw text of articles from the NYT Articles Corpus
This script will look for a directory named raw and find any .ta.xml
files inside, parse out the "text" field in the file, strip all newlines and
carriage returns from the file and then write the text out, one article per line
to two files in an 80/20 split named "nyt-articles-test.txt" and
"nyt-articles-train.txt"
'''
import os, json, random
import xml.etree.ElementTree as xml
corpus_location = './raw'
pretraining_output_file_path = './processed/nyt-articles-train.txt'
dev_output_file_path = './processed/nyt-articles-dev.txt'
sampling_output_file_path = './processed/nyt-articles-test.txt'
def clean(text):
return text.replace('\n', ' ').replace('\r', '') + '\n'
def get_outfile(filename):
rng = random.random()
if rng < 0.90:
return pretraining_output_file_path
elif rng < 0.95:
return dev_output_file_path
else:
return sampling_output_file_path
def makedirs(filename):
''' https://stackoverflow.com/a/12517490 '''
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
return filename
if __name__ == '__main__':
if os.path.exists(corpus_location) and os.path.isdir(corpus_location):
total = len(os.listdir(corpus_location))
for index, filename in enumerate(os.listdir(corpus_location)):
if filename.endswith('.ta.xml'):
path = os.path.join(corpus_location, filename)
outfile = get_outfile(path)
with open(path, 'r+') as f:
with open(makedirs(outfile), 'a+') as out_f:
data = json.load(f)
out_f.write(clean(data['text']))
print('Read in file {0}/{1}: {2}'.format(index, total, path))
| 2,006 | 626 |
'''
@Descripttion:
@Author: BerryBC
@Date: 2020-02-24 23:40:18
@LastEditors: BerryBC
@LastEditTime: 2020-04-29 22:28:49
'''
import json
import Lib.LLearn as LLearn
from channels.generic.websocket import WebsocketConsumer
class wsCreatSklearnModel(WebsocketConsumer):
def funFB2C(self,strMsg, intCode):
self.send(text_data=json.dumps({
'msg': strMsg, 'code': intCode
}))
def connect(self):
self.accept()
self.funFB2C('OK', 1)
print(' Client Start Sklearn Learn Websocket.')
def disconnect(self, close_code):
print(' Learn Websocket disconnected')
def receive(self, text_data):
objRevData = json.loads(text_data)
intCode = objRevData['doCode']
if intCode == 0:
LLearn.funGoLearn(self.funFB2C)
self.funFB2C('Done', 3)
| 853 | 322 |
import unittest
from main.core.process_pic import Image
class TestImage(unittest.TestCase):
def test_read(self):
uris = [
"https://res.cloudinary.com/dwf6x1ohn/image/upload/v1534347950/bgnppredgmslafb5pkpw.jpg",
"https://res.cloudinary.com/dwf6x1ohn/image/upload/v1534347979/wptzfdqidfnlyhgt3kti.jpg"
]
sizes = [
(540, 547),
(259, 194)
]
for (uri, size) in zip(uris, sizes):
image = Image(uri)
self.assertEqual(size[0], image.width())
self.assertEqual(size[1], image.height())
self.assertEqual(size[0] * size[1], image.size())
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestImage)
unittest.TextTestRunner(verbosity=2).run(suite)
| 821 | 309 |
# coding: utf-8
from pprint import pformat
from ..utils import to_dict
class Error(object):
def __init__(self, error=None, description=None, parameter=None, disabled_by=None):
self._error = error
self._description = description
self._parameter = parameter
self._disabled_by = disabled_by
@property
def error(self):
return self._error
@error.setter
def error(self, error):
self._error = error
@property
def description(self):
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def parameter(self):
return self._parameter
@parameter.setter
def parameter(self, parameter):
self._parameter = parameter
@property
def disabled_by(self):
return self._disabled_by
@disabled_by.setter
def disabled_by(self, disabled_by):
self._disabled_by = disabled_by
def to_dict(self):
"""
Returns the model properties as a dict
"""
return to_dict(self.__dict__)
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 1,670 | 463 |
# SPDX-License-Identifier: Apache-2.0
"""
tfl_nn
"""
from tf2onnx.handler import tfl_op
from tf2onnx.tflite_handlers.tfl_math import separate_fused_activation_function
# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name
@tfl_op(["TFL_TRANSPOSE_CONV"], tf_op="Conv2DBackpropInput")
class TflTransposeConv:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
# No need to change 'padding' attribute
stride_h = node.get_attr_int("stride_h")
stride_w = node.get_attr_int("stride_w")
node.set_attr("strides", [1, stride_h, stride_w, 1])
del node.attr["stride_h"]
del node.attr["stride_w"]
transpose_node = ctx.insert_new_node_on_input(node, "Transpose", node.input[1], name=None, perm=[1, 2, 0, 3])
transpose_node.skip_conversion = True
node.set_attr("data_format", "NHWC")
@tfl_op(["TFL_CONV_2D"], tf_op="Conv2D")
class TflConv2D:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
separate_fused_activation_function(ctx, node)
# No need to change 'padding' attribute
stride_h = node.get_attr_int("stride_h")
stride_w = node.get_attr_int("stride_w")
dilation_w_factor = node.get_attr_int("dilation_w_factor")
dilation_h_factor = node.get_attr_int("dilation_h_factor")
node.set_attr("strides", [1, stride_h, stride_w, 1])
node.set_attr("dilations", [1, dilation_h_factor, dilation_w_factor, 1])
del node.attr["stride_h"]
del node.attr["stride_w"]
del node.attr["dilation_h_factor"]
del node.attr["dilation_w_factor"]
transpose_node = ctx.insert_new_node_on_input(node, "Transpose", node.input[1], name=None, perm=[1, 2, 3, 0])
transpose_node.skip_conversion = True
node.set_attr("data_format", "NHWC")
@tfl_op(["TFL_AVERAGE_POOL_2D"], tf_op="AvgPool")
@tfl_op(["TFL_MAX_POOL_2D"], tf_op="MaxPool")
class TflAveragePool:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
separate_fused_activation_function(ctx, node)
# No need to change 'padding' attribute
stride_h = node.get_attr_int("stride_h")
stride_w = node.get_attr_int("stride_w")
filter_height = node.get_attr_int("filter_height")
filter_width = node.get_attr_int("filter_width")
node.set_attr("strides", [1, stride_h, stride_w, 1])
node.set_attr("ksize", [1, filter_height, filter_width, 1])
del node.attr["stride_h"]
del node.attr["stride_w"]
del node.attr["filter_height"]
del node.attr["filter_width"]
node.set_attr("data_format", "NHWC")
@tfl_op(["TFL_DEPTHWISE_CONV_2D"], tf_op="DepthwiseConv2dNative")
class TflDepthwiseConv2D:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
separate_fused_activation_function(ctx, node)
# No need to change 'padding' or 'depth_multiplier' attributes
stride_h = node.get_attr_int("stride_h")
stride_w = node.get_attr_int("stride_w")
dilation_w_factor = node.get_attr_int("dilation_w_factor")
dilation_h_factor = node.get_attr_int("dilation_h_factor")
node.set_attr("strides", [1, stride_h, stride_w, 1])
node.set_attr("dilations", [1, dilation_h_factor, dilation_w_factor, 1])
del node.attr["stride_h"]
del node.attr["stride_w"]
del node.attr["dilation_h_factor"]
del node.attr["dilation_w_factor"]
transpose_node = ctx.insert_new_node_on_input(node, "Transpose", node.input[1], name=None, perm=[1, 2, 3, 0])
transpose_node.skip_conversion = True
node.set_attr("data_format", "NHWC")
@tfl_op(["TFL_BATCH_TO_SPACE_ND"], tf_op="BatchToSpaceND")
class TflSlice:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
pass
@tfl_op(["TFL_SPACE_TO_BATCH_ND"], tf_op="SpaceToBatchND")
class TFlSpaceToBatchNDOp:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
pass
@tfl_op(["TFL_SPACE_TO_DEPTH"], tf_op="SpaceToDepth")
class TFlSpaceToDepthOp:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
node.set_attr("data_format", "NHWC")
@tfl_op(["TFL_NON_MAX_SUPPRESSION_V4"], tf_op="NonMaxSuppressionV4")
class TflNonMaxSuppressionV4Op:
@classmethod
def to_tf(cls, ctx, node, **kwargs):
node.set_attr("pad_to_max_output_size", 1)
| 4,390 | 1,696 |
from graphics.render import open_window
open_window()
| 58 | 18 |
import math
catOp = float(input('Valor Cateto Oposto: '))
catAd = float (input('Valor Cateto adjacente: '))
hip = (catOp**2) + (catAd**2)
# ou hip = math.hypot (catOp , catAd)
hip2 = math.sqrt(hip)
print (f'O valor da hipotenusa é de {hip2:.2f}')
| 247 | 105 |
from orator import Model
class Incident(Model):
pass
| 60 | 20 |
import json
import requests
import pprint
import time
from behave import *
from starlette import status
@when('"{holder}" will have a credential_offer from "{issuer}"')
@then('"{holder}" will have a credential_offer from "{issuer}"')
def step_impl(context, holder: str, issuer: str):
response = requests.get(
context.config.userdata.get("traction_host")
+ "/tenant/v0/credentials/holder/offer",
headers=context.config.userdata[holder]["auth_headers"],
)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert len(resp_json) == 1, resp_json
contact_id = context.config.userdata[holder]["connections"][issuer]["contact_id"]
assert resp_json[0]["credential"]["contact_id"] == contact_id
assert resp_json[0]["credential"]["issue_state"] == "offer_received"
context.config.userdata[holder]["cred_offers"] = [
a["credential"] for a in resp_json
]
@when('"{holder}" will accept credential_offer from "{issuer}"')
@then('"{holder}" will accept credential_offer from "{issuer}"')
def step_impl(context, holder, issuer):
cred_issue_id = context.config.userdata[holder]["cred_offers"][0]["id"]
response = requests.post(
context.config.userdata.get("traction_host")
+ "/tenant/v0/credentials/holder/accept_offer"
+ "?cred_issue_id="
+ cred_issue_id,
headers=context.config.userdata[holder]["auth_headers"],
)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert resp_json["credential"]["issue_state"] == "request_sent", resp_json
time.sleep(2)
@then('"{holder}" will have a credential')
def step_impl(context, holder):
response = requests.get(
context.config.userdata.get("traction_host") + "/tenant/v0/credentials/holder/",
headers=context.config.userdata[holder]["auth_headers"],
)
assert response.status_code == status.HTTP_200_OK, response.__dict__
resp_json = json.loads(response.content)
assert len(resp_json) == 1, resp_json
| 2,137 | 702 |
a = int(input('enter side: \n'))
b = '* '
i = 0
while i < a:
print(b)
b += '* '
i += 1 | 99 | 50 |
class my_timer(object):
def __init__(self, original_function):
self.original_function = original_function
def __call__(self, *args, **kwargs):
import time
t1 = time.time()
result = self.original_function(*args, **kwargs)
t2 = time.time()
print(f"{self.original_function.__name__}() function ran in: {t2 - t1} sec")
return result
@my_timer
def my_loop():
sum = 0
for i in range(10000000):
sum += i*i
print(sum)
my_loop()
| 527 | 186 |
#!/usr/bin/python3
import re
from typing import List
import numpy as np
from keras.models import load_model
from action import (add_class_json, add_attribute, create_association, create_inheritance, create_composition,
return_error_to_user)
from data import ADD_WORDS, CONTAINS_WORDS, HAVE_WORDS, ISA_WORDS
from model import predict, getIntent, keyIntent
from npparser import get_chunks, get_NP_subtrees, get_num_nonnested_NP_subtrees, get_noun_from_np
from utils import (first_letter_lowercase, first_letter_uppercase, contains_one_of, get_DT_for_word, is_attribute,
get_detected_keywords, strip_punctuation)
classes_created = [] # Must keep track of this to avoid errors
def process_response_model(user_input: str) -> str:
message_text = strip_punctuation(user_input.lower())
intent = get_intent(predict(user_input))
if intent == "add_class":
return add_class_action(message_text)
elif intent == "add_attribute":
return add_attribute_action(message_text)
elif intent == "create_composition":
return make_composition(message_text)
elif intent == "create_association":
return make_association(message_text)
elif intent == "create_inheritance":
return make_inheritance(message_text)
else:
return process_response_baseline(user_input)
# The following three functions call into the same NP parser as the baseline, once the intent is determined.
def add_class_action(message_text):
return handle_add_kw(message_text)
def make_composition(message_text):
return handle_contain_kw(message_text)
def make_inheritance(message_text):
return handle_isa_kw(message_text)
# Since handle_have_kw tries to guess whether it needs to add an attribute (A student has a name) or an association
# (A student has an address), the logic for the following two functions needs to be specified separately.
def add_attribute_action(message_text):
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st == 0:
return return_error_to_user("I really don't understand what you meant. Please rephrase.")
elif n_st == 1:
class_name = get_noun_from_np(nps[0])
if class_name in classes_created:
return return_error_to_user(f"What do want to specify about {class_name}?")
else:
dt = get_DT_for_word(class_name)
return return_error_to_user(f"Are trying to add a class? Try saying 'Create {dt} {class_name}.'")
else:
class_name = get_noun_from_np(nps[0])
attribute_name = first_letter_lowercase(get_noun_from_np(nps[1]))
if class_name in classes_created:
classes_created.append(class_name)
return add_attribute(class_name, attribute_name)
def make_association(message_text):
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st == 0:
return return_error_to_user("I really don't understand what you meant. Please rephrase.")
elif n_st == 1:
class_name = get_noun_from_np(nps[0])
if class_name in classes_created:
return return_error_to_user(f"What do want to specify about {class_name}?")
else:
dt = get_DT_for_word(class_name)
return return_error_to_user(f"Are trying to add a class? Try saying 'Create {dt} {class_name}.'")
else:
class1 = get_noun_from_np(nps[0])
class2 = get_noun_from_np(nps[1])
if class1 in classes_created:
classes_created.append(class1)
if class2 not in classes_created:
classes_created.append(class2)
return create_association(class1, class2)
def process_response_baseline(user_input: str) -> str:
"""
Function used to reply with a baseline response based on the Socio model.
This function assumes valid input.
"""
print("Processing message in baseline mode.")
message_text = strip_punctuation(user_input.lower())
detected_keywords = get_detected_keywords(message_text)
nk = len(detected_keywords)
if nk == 0:
return handle_no_kw(message_text)
elif nk == 1:
kw = list(detected_keywords.keys())[0]
if kw == "ADD":
return handle_add_kw(message_text)
elif kw == "CONTAIN":
return handle_contain_kw(message_text)
elif kw == "HAVE":
return handle_have_kw(message_text)
elif kw == "ISA":
return handle_isa_kw(message_text)
elif nk == 2:
if "CONTAIN" in detected_keywords.keys() and "ISA" in detected_keywords.keys(): # "can consist of"
return handle_contain_kw(message_text)
else:
print("nk = 2", detected_keywords)
return process_response_fallback(message_text)
else:
# TODO Handle more complex multiple keyword scenarios
print("nk =", nk, detected_keywords)
return process_response_fallback(message_text)
def handle_add_kw(message_text: str) -> str:
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st == 0:
kw = get_detected_keywords(message_text).get("ADD", "add")
return return_error_to_user(f"Please specify what you want to {kw}.")
elif n_st == 1:
class_name = get_noun_from_np(nps[0])
return add_class(class_name)
elif n_st == 2:
class_name = get_noun_from_np(nps[1])
attribute_name = first_letter_lowercase(get_noun_from_np(nps[0]))
return add_attribute(class_name, attribute_name)
else:
return process_response_fallback(message_text)
def handle_contain_kw(message_text: str) -> str:
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st < 2:
return return_error_to_user(
"I don't get what you meant. If you want to make a composition, specify the two classes.")
elif n_st == 2:
first_noun = get_noun_from_np(nps[0])
second_noun = get_noun_from_np(nps[1])
if first_noun not in classes_created:
classes_created.append(first_noun)
if is_attribute(get_noun_from_np(nps[1])):
return add_attribute(first_noun, first_letter_lowercase(second_noun))
else:
whole = first_noun
part = second_noun
if part not in classes_created:
classes_created.append(part)
return create_composition(whole, part)
else:
return process_response_fallback(message_text)
def handle_have_kw(message_text: str) -> str:
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st == 0:
return return_error_to_user("I really don't understand what you meant. Please rephrase.")
elif n_st == 1:
class_name = get_noun_from_np(nps[0])
if class_name in classes_created:
return return_error_to_user(f"What do want to specify about {class_name}?")
else:
dt = get_DT_for_word(class_name)
return return_error_to_user(f"Are trying to add a class? Try saying 'Create {dt} {class_name}.'")
else:
# TODO In the future, also allow multiple attributes ("Student has a name and email").
# This requires updating the website.
class_name = get_noun_from_np(nps[0])
second_noun = get_noun_from_np(nps[1])
if class_name in classes_created:
classes_created.append(class_name)
if is_attribute(second_noun):
return add_attribute(class_name, first_letter_lowercase(second_noun))
else:
if second_noun not in classes_created:
classes_created.append(second_noun)
return create_association(class_name, second_noun)
return process_response_fallback(message_text)
def handle_isa_kw(message_text: str) -> str:
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st < 2:
return return_error_to_user("If you're trying to create an inheritance, clearly specify both classes.")
else:
if ((" serve" in message_text and " as " in message_text) or
(" play" in message_text and " role" in message_text)):
child = get_noun_from_np(nps[1])
parent = get_noun_from_np(nps[0])
else:
child = get_noun_from_np(nps[0])
parent = get_noun_from_np(nps[1])
if child not in classes_created:
classes_created.append(child)
if parent not in classes_created:
classes_created.append(parent)
return create_inheritance(child, parent)
return process_response_fallback(message_text)
def handle_no_kw(message_text: str) -> str:
"""
Add an association if possible, otherwise create a class.
"""
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st == 0:
return return_error_to_user("I really don't understand what you meant. Please rephrase.")
elif n_st == 1:
class_name = get_noun_from_np(nps[0])
return add_class(class_name)
elif n_st == 2:
class1 = get_noun_from_np(nps[0])
class2 = get_noun_from_np(nps[1])
if class1 not in classes_created:
classes_created.append(class1)
if class2 not in classes_created:
classes_created.append(class2)
return create_association(class1, class2)
return process_response_fallback(message_text)
def process_response_fallback(user_input: str) -> str:
"""
Fallback method from Younes' undergrad project, to be used for the cases not handled by Socio's logic.
"""
print("Processing request in fallback mode")
message_text = user_input.lower()
words = message_text.split(' ')
# This logic is not always correct, eg "Add attribute in class."
if contains_one_of(message_text, ADD_WORDS):
for i in range(len(words) - 2):
if words[i] in ADD_WORDS:
# strip punctuation
class_name = first_letter_uppercase(strip_punctuation(words[i + 2]))
return add_class(class_name)
if "has a" in message_text:
for i in range(len(words) - 2):
if words[i] == 'has':
class_name = first_letter_uppercase(words[i - 1])
attribute_name = strip_punctuation(words[i + 2])
return add_attribute(class_name, attribute_name)
if "is composed of" in message_text:
for i in range(len(words) - 2):
if words[i] == "is":
whole_class_name = first_letter_uppercase(words[i - 1])
part_class_name = first_letter_uppercase(strip_punctuation(words[i + 3]))
# assume the plural when part_class_name ends with s
if part_class_name[-1] == "s":
part_class_name = part_class_name[:-1]
return create_composition(whole_class_name, part_class_name)
# not very useful, but good for testing
if "is associated with" in message_text:
for i in range(len(words) - 3):
if words[i] == "is":
class_name1 = first_letter_uppercase(words[i - 1])
if words[i + 3] in ["a", "an"]:
class_name2 = words[i + 4]
else:
class_name2 = words[i + 3]
class_name2 = first_letter_uppercase(strip_punctuation(class_name2))
return create_association(class_name1, class_name2)
if "is a" in message_text:
for i in range(len(words) - 2):
if words[i] == "is":
child = first_letter_uppercase(words[i - 1])
parent = first_letter_uppercase(strip_punctuation(words[i + 2]))
return create_inheritance(child, parent)
return return_error_to_user("Sorry, I could not process your request :(")
def get_intent(predicts):
prediction = predicts[0]
intents = np.array(keyIntent)
ids = np.argsort(-prediction)
intents = intents[ids]
predictions = -np.sort(-prediction)
return intents[np.argmax(predictions)]
# These functions are kept here since they modify the global state
def add_class(class_name: str) -> str:
global classes_created
if class_name in classes_created:
return return_error_to_user(f"{class_name} is already created, so let's not make it again.")
return add_class_json(class_name)
def reset_classes_created():
global classes_created
classes_created = []
| 12,870 | 4,120 |
#! /usr/bin/python3
import sys
from pennylane import numpy as np
import pennylane as qml
def generating_fourier_state(n_qubits, m):
"""Function which, given the number of qubits and an integer m, returns the circuit and the angles that generate the state
QFT|m> following the above template.
Args:
- n_qubits (int): number of qubits in the circuit.
- m (int): basis state that we generate. For example, for 'm = 3' and 'n_qubits = 4'
we would generate the state QFT|0011> (3 in binary is 11).
Returns:
- (qml.QNode): circuit used to generate the state.
- (list[float]): angles that generate the state QFT|m>.
"""
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def circuit(angles):
"""This is the quantum circuit that we will use."""
# QHACK #
# Add the template of the statement with the angles passed as an argument.
for w in range(n_qubits):
qml.Hadamard(wires=w)
qml.RZ(angles[w],wires=w)
# QHACK #
# We apply QFT^-1 to return to the computational basis.
# This will help us to see how well we have done.
qml.adjoint(qml.QFT)(wires=range(n_qubits))
# We return the probabilities of seeing each basis state.
return qml.probs(wires=range(n_qubits))
def error(angles):
"""This function will determine, given a set of angles, how well it approximates
the desired state. Here it will be necessary to call the circuit to work with these results.
"""
probs = circuit(angles)
# QHACK #
# The return error should be smaller when the state m is more likely to be obtained.
target=np.zeros(2**n_qubits)
target[m]=1
loss=np.sum((target-probs)**2)
return loss
# QHACK #
# This subroutine will find the angles that minimize the error function.
# Do not modify anything from here.
opt = qml.AdamOptimizer(stepsize=0.8)
epochs = 5000
angles = np.zeros(n_qubits, requires_grad=True)
for epoch in range(epochs):
angles = opt.step(error, angles)
angles = np.clip(opt.step(error, angles), -2 * np.pi, 2 * np.pi)
return circuit, angles
if __name__ == "__main__":
# DO NOT MODIFY anything in this code block
inputs = sys.stdin.read().split(",")
n_qubits = int(inputs[0])
m = int(inputs[1])
output = generating_fourier_state(n_qubits, m)
output[0](output[1])
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def check_with_arbitrary_state():
for i in range(n_qubits):
qml.RY(i, wires=i)
for op in output[0].qtape.operations:
qml.apply(op)
return qml.state()
print(",".join([f"{p.real.round(5)},{p.imag.round(5)}" for p in check_with_arbitrary_state()]))
| 2,890 | 964 |
# # # ####
# import_os.path as os_path
from os import path, makedirs
# #########################
# 1) Python Absoulute path
# ########################
#
# current absolute path
# file_path = r"c:\repos\Library"
# current_file_path = path.abspath(__file__)
# print(current_file_path)
# print(path.dirname(current_file_path))
# print(path.basename(current_file_path))
# Get current directory
current_directory = path.dirname(path.abspath(__file__))
# print(current_directory)
# Concat file path
jason_file_path = path.join(
current_directory, 'test_demo', 'jason_file', 'parse_jason_dat.jason'
)
# if path.exists(jason_file_path):
# print("hello JSON")
#
xml_file_path = path.join(
current_directory, 'test_demo', 'xml_file', 'parse_xml_data.xml'
)
#
# if path.exists(xml_file_path):
# print("hello XML")
#
text_file_path = path.join(
current_directory, 'test_demo', 'xml_file', 'parse_xml_data.xml'
)
# print("hello text")
#
CSV_file_path = path.join(
current_directory, 'test_demo', 'xml_file', 'parse_xml_data.xml'
)
# print("hello csv")
#
#
# class_09 = path.join(
# current_directory, 'test_demo', 'class_09', 'test_dr', 'whynot dr'
# )
# print(class_09)
#
if not path.exists(path.dirname(text_file_path)):
makedirs(path.dirname(text_file_path))
file_data = "This is my classo9 file, which is created for test purpose,"
with open(text_file_path, 'w+') as text_file:
text_file.writelines(file_data)
from pprint import pprint
# with open(text_file_path, 'r+') as text_file_read:
# data = text_file_read.readlines()
# pprint(data, width=120/
# if path.exists(text_file_path):
# print("exists")
# with open(text_file_path, 'r+') as text_file_read:
# for line in text_file_read:
# print(line.replace("\n", ''))
def generator_parse_file(file_path):
with open(file_path, 'r+') as text_file:
for line in text_file:
yield line.replace("\n", '')
for i in generator_parse_file(text_file_path):
print(i)
#
| 2,004 | 750 |
# Generated by Django 4.0.3 on 2022-03-21 00:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('animal_size', '0001_initial'),
('zoo', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='animal',
name='size',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='animal_size.animalsize', to_field='name'),
),
]
| 542 | 192 |
# -*- coding: utf-8 -*-
"""
@author: nicolas.posocco
"""
from .binary_reliability_curve import binary_reliability_curve
def binary_calibration_error_curve(model=None, X=None, Y=None,
kernel=None, bandwidth=None,
positive_scores=None,
positive_scores_for_positive_gt=None,
positive_class_probability=None):
reliability_curve = binary_reliability_curve(model=model, X=X, Y=Y,
kernel=kernel, bandwidth=bandwidth,
positive_scores=positive_scores,
positive_scores_for_positive_gt=positive_scores_for_positive_gt,
positive_class_probability=positive_class_probability)
result = {"scores": reliability_curve["scores"],
}
return result
| 996 | 266 |
"""Timeseries Class"""
from .index import Indexed
from . import general_utils
class Timeseries(Indexed):
"""Parent class for Tidegauge and other timeseries type datasets
Common methods ...
"""
pass
| 217 | 61 |
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to that could be included in `numpy` but aren't.
"""
import numpy as np
# np seed must be in [0, 2**32 - 1] = [0, uint32 max]
SEED_MAX_INCL = np.iinfo(np.uint32).max
# Access default numpy rng in way that is short and sphinx friendly
random = np.random.random.__self__
def random_seed(random=random):
"""Draw a random seed compatible with :class:`numpy:numpy.random.RandomState`.
Parameters
----------
random : :class:`numpy:numpy.random.RandomState`
Random stream to use to draw the random seed.
Returns
-------
seed : int
Seed for a new random stream in ``[0, 2**32-1)``.
"""
# np randint is exclusive on the high value, py randint is inclusive. We
# must use inclusive limit here to work with both. We are missing one
# possibility here (2**32-1), but I don't think that matters.
seed = random.randint(0, SEED_MAX_INCL)
return seed
def shuffle_2d(X, random=random):
"""Generalization of :func:`numpy:numpy.random.shuffle` of 2D array.
Performs in-place shuffling of `X`. So, it has no return value.
Parameters
----------
X : :class:`numpy:numpy.ndarray` of shape (n, m)
Array-like 2D data to shuffle in place. Shuffles order of rows and order of elements within a row.
random : :class:`numpy:numpy.random.RandomState`
Random stream to use to draw the random seed.
"""
random.shuffle(X)
for rr in X:
random.shuffle(rr)
def strat_split(X, n_splits, inplace=False, random=random):
"""Make a stratified random split of items.
Parameters
----------
X : :class:`numpy:numpy.ndarray` of shape (n, m)
Data we would like to split randomly into groups. We should get the same number +/-1 of elements from each row
in each group.
n_splits : int
How many groups we want to split into.
inplace : bool
If true, this function will cause in place modifications to `X`.
random : :class:`numpy:numpy.random.RandomState`
Random stream to use for reproducibility.
Returns
-------
Y : list(:class:`numpy:numpy.ndarray`)
Stratified split of `X` where each row of `Y` contains the same number +/-1 of elements from each row of `X`.
Must be a list of arrays since each row may have a different length.
"""
# Arguably, this function could go in stats
assert np.ndim(X) == 2
assert n_splits > 0
if not inplace:
X = np.array(X, copy=True)
shuffle_2d(X, random=random)
# Note this is like X.T.ravel()
Y = np.array_split(np.ravel(X, order="F"), n_splits)
# Just for good measure make sure this is shuffled too, prob not needed.
shuffle_2d(Y, random=random)
return Y
def isclose_lte(x, y):
"""Check that less than or equal to (lte, ``x <= y``) is approximately true between all elements of `x` and `y`.
This is similar to :func:`numpy:numpy.allclose` for equality. Shapes of all input variables must be broadcast
compatible.
Parameters
----------
x : :class:`numpy:numpy.ndarray`
Lower limit in ``<=`` check.
y : :class:`numpy:numpy.ndarray`
Upper limit in ``<=`` check.
Returns
-------
lte : bool
True if ``x <= y`` is approximately true element-wise.
"""
# Use np.less_equal to ensure always np type consistently
lte = np.less_equal(x, y) | np.isclose(x, y)
return lte
def clip_chk(x, lb, ub, allow_nan=False):
"""Clip all element of `x` to be between `lb` and `ub` like :func:`numpy:numpy.clip`, but also check
:func:`numpy:numpy.isclose`.
Shapes of all input variables must be broadcast compatible.
Parameters
----------
x : :class:`numpy:numpy.ndarray`
Array containing elements to clip.
lb : :class:`numpy:numpy.ndarray`
Lower limit in clip.
ub : :class:`numpy:numpy.ndarray`
Upper limit in clip.
allow_nan : bool
If true, we allow ``nan`` to be present in `x` without out raising an error.
Returns
-------
x : :class:`numpy:numpy.ndarray`
An array with the elements of `x`, but where values < `lb` are replaced with `lb`, and those > `ub` with `ub`.
"""
assert np.all(lb <= ub) # np.clip does not do this check
x = np.asarray(x)
# These are asserts not exceptions since clip_chk most used internally.
if allow_nan:
assert np.all(isclose_lte(lb, x) | np.isnan(x))
assert np.all(isclose_lte(x, ub) | np.isnan(x))
else:
assert np.all(isclose_lte(lb, x))
assert np.all(isclose_lte(x, ub))
x = np.clip(x, lb, ub)
return x
def snap_to(x, fixed_val=None):
"""Snap input `x` to the `fixed_val` unless `fixed_val` is `None`, where `x` is returned.
Parameters
----------
x : :class:`numpy:numpy.ndarray`
Array containing elements to snap.
fixed_val : :class:`numpy:numpy.ndarray` or None
Values to be returned if `x` is close, otherwise an error is raised. If `fixed_val` is `None`, `x` is returned.
Returns
-------
fixed_val : :class:`numpy:numpy.ndarray`
Snapped to value of `x`.
"""
if fixed_val is None:
return x
# Include == for discrete types where allclose doesn't work
if not (np.all(x == fixed_val) or np.allclose(x, fixed_val)):
raise ValueError("Expected fixed value %s, got %s." % (repr(fixed_val), repr(x)))
assert np.all(x == fixed_val) or np.allclose(x, fixed_val)
fixed_val = np.broadcast_to(fixed_val, np.shape(x))
return fixed_val
def linear_rescale(X, lb0, ub0, lb1, ub1, enforce_bounds=True):
"""Linearly transform all elements of `X`, bounded between `lb0` and `ub0`, to be between `lb1` and `ub1`.
Shapes of all input variables must be broadcast compatible.
Parameters
----------
X : :class:`numpy:numpy.ndarray`
Array containing elements to rescale.
lb0 : :class:`numpy:numpy.ndarray`
Current lower bound of `X`.
ub0 : :class:`numpy:numpy.ndarray`
Current upper bound of `X`.
lb1 : :class:`numpy:numpy.ndarray`
Desired lower bound of `X`.
ub1 : :class:`numpy:numpy.ndarray`
Desired upper bound of `X`.
enforce_bounds : bool
If True, perform input bounds check (and clipping if slight violation) on the input `X` and again on the
output. This argument is not meant to be vectorized like the other input variables.
Returns
-------
X : :class:`numpy:numpy.ndarray`
Elements of input `X` after linear rescaling.
"""
assert np.all(np.isfinite(lb0))
assert np.all(np.isfinite(lb1))
assert np.all(np.isfinite(ub0))
assert np.all(np.isfinite(ub1))
assert np.all(lb0 < ub0)
assert np.all(lb1 <= ub1)
m = np.true_divide(ub1 - lb1, ub0 - lb0)
assert np.all(m >= 0)
if enforce_bounds:
X = clip_chk(X, lb0, ub0) # This will flag any non-finite X input.
X = clip_chk(m * (X - lb0) + lb1, lb1, ub1)
else:
X = m * (X - lb0) + lb1
return X
def argmin_2d(X):
"""Take the arg minimum of a 2D array."""
assert X.size > 0, "argmin of empty array not defined"
ii, jj = np.unravel_index(X.argmin(), X.shape)
return ii, jj
def cummin(x_val, x_key):
"""Get the cumulative minimum of `x_val` when ranked according to `x_key`.
Parameters
----------
x_val : :class:`numpy:numpy.ndarray` of shape (n, d)
The array to get the cumulative minimum of along axis 0.
x_key : :class:`numpy:numpy.ndarray` of shape (n, d)
The array for ranking elements as to what is the minimum.
Returns
-------
c_min : :class:`numpy:numpy.ndarray` of shape (n, d)
The cumulative minimum array.
"""
assert x_val.shape == x_key.shape
assert x_val.ndim == 2
assert not np.any(np.isnan(x_key)), "cummin not defined for nan key"
n, _ = x_val.shape
xm = np.minimum.accumulate(x_key, axis=0)
idx = np.maximum.accumulate((x_key <= xm) * np.arange(n)[:, None])
c_min = np.take_along_axis(x_val, idx, axis=0)
return c_min
| 8,714 | 2,908 |
from kra import models
def get_containers_summary(container_ids=None):
if container_ids is not None:
container_ids_str = ','.join(str(cid) for cid in container_ids)
container_filter = f'AND id IN ({container_ids_str})'
else:
container_filter = ''
return models.Container.objects.raw(r"""
SELECT
*
FROM %(container_tblname)s AS c
LEFT JOIN LATERAL (
SELECT * FROM (
SELECT
since,
till,
total_seconds,
total_cpu_m_seconds,
max_memory_mi,
max_cpu_m,
total_memory_mi_seconds,
(total_memory_mi_seconds / total_seconds) AS avg_memory_mi,
(total_cpu_m_seconds / total_seconds) AS avg_cpu_m
FROM (
SELECT
*,
extract(epoch FROM (till - since)) AS total_seconds
FROM (
SELECT
c.started_at AS since,
max(measured_at) AS till,
max(cpu_m) AS max_cpu_m,
max(cpu_m_seconds) AS total_cpu_m_seconds,
max(memory_mi) AS max_memory_mi,
sum(delta_memory_mi_seconds) AS total_memory_mi_seconds
FROM (
SELECT
*,
memory_mi * delta_seconds AS delta_memory_mi_seconds,
delta_cpu_m_seconds / delta_seconds AS cpu_m
FROM (
SELECT
measured_at,
cpu_m_seconds,
memory_mi,
(
CASE
WHEN lag(measured_at) OVER w IS NOT NULL
THEN extract(epoch FROM (measured_at - lag(measured_at) OVER w))
ELSE extract(epoch FROM (measured_at - c.started_at))
END
) AS delta_seconds,
(
CASE
WHEN lag(cpu_m_seconds) OVER w IS NOT NULL
THEN cpu_m_seconds - lag(cpu_m_seconds) OVER w
ELSE cpu_m_seconds
END
) AS delta_cpu_m_seconds
FROM %(ru_tblname)s
WHERE container_id = c.id
WINDOW w AS (ORDER BY measured_at)
) AS pass1q0
) AS pass1q1
) AS pass1q2
) AS pass1q3
) AS pass1
LEFT JOIN LATERAL (
SELECT
sqrt(total_stddev_memory_mi2_seconds / total_seconds) AS stddev_memory_mi,
sqrt(total_stddev_cpu_m2_seconds / total_seconds) AS stddev_cpu_m
FROM (
SELECT
sum(stddev_memory_mi2_seconds) AS total_stddev_memory_mi2_seconds,
sum(stddev_cpu_m2_seconds) AS total_stddev_cpu_m2_seconds
FROM (
SELECT
((memory_mi - avg_memory_mi)^2 * delta_seconds) AS stddev_memory_mi2_seconds,
((delta_cpu_m_seconds / delta_seconds - avg_cpu_m)^2 * delta_seconds) AS stddev_cpu_m2_seconds
FROM (
SELECT
cpu_m_seconds,
memory_mi,
(
CASE
WHEN lag(measured_at) OVER w IS NOT NULL
THEN extract(epoch FROM (measured_at - lag(measured_at) OVER w))
ELSE extract(epoch FROM (measured_at - c.started_at))
END
) AS delta_seconds,
(
CASE
WHEN lag(cpu_m_seconds) OVER w IS NOT NULL
THEN cpu_m_seconds - lag(cpu_m_seconds) OVER w
ELSE cpu_m_seconds
END
) AS delta_cpu_m_seconds
FROM %(ru_tblname)s
WHERE container_id = c.id
WINDOW w AS (ORDER BY measured_at)
) AS pass2q0
) AS pass2q1
) AS pass2q2
) AS pass2 ON TRUE
) AS summary ON TRUE
WHERE total_seconds IS NOT NULL %(container_filter)s
""" % {
'container_tblname': models.Container._meta.db_table,
'ru_tblname': models.ResourceUsage._meta.db_table,
'container_filter': container_filter,
})
| 5,141 | 1,260 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 15:47:19 2018
@author: Yann Roussel and Tuan Bui
Editted by: Emine Topcu on Oct 2021
"""
from random import gauss
from Beat_and_glide import Beat_and_glide_base
from Izhikevich_class import Izhikevich_9P, Leaky_Integrator
class Beat_and_glide_with_sigmas(Beat_and_glide_base):
sigmaD = 0
sigmaL = 0
sigmaP = 0
sigmaW = 0
def __init__ (self, stim0 = 2.89, sigma = 0, sigma_LR = 0.1, sigmaD = 0, sigmaL = 0, sigmaP = 0, sigmaW = 0,
E_glu = 0, E_gly = -70, cv = 0.80,
nMN = 15, ndI6 = 15, nV0v = 15, nV2a = 15, nV1 = 15, nMuscle = 15,
R_str = 1.0):
super().__init__(stim0, sigma, sigma_LR, E_glu, E_gly, cv,
nMN, ndI6, nV0v, nV2a, nV1, nMuscle, R_str)
self.sigmaD = sigmaD
self.sigmaL = sigmaL
self.sigmaP = sigmaP
self.sigmaW = sigmaW
def initNeurons(self):
## Declare Neuron Types
self.L_MN = [ Izhikevich_9P(a = 0.5*gauss(1, self.sigmaP),
b = 0.01*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 100*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -65*gauss(1, self.sigmaP),
vt = -58*gauss(1, self.sigmaP),
k = 0.5*gauss(1, self.sigmaP),
Cm = 20*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.0+1.6*i*gauss(1, self.sigma),
y = -1) for i in range(self.nMN)]
self.R_MN = [ Izhikevich_9P(a = 0.5*gauss(1, self.sigmaP),
b = 0.01*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 100*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -65*gauss(1, self.sigmaP),
vt = -58*gauss(1, self.sigmaP),
k = 0.5*gauss(1, self.sigmaP),
Cm = 20*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.0+1.6*i*gauss(1, self.sigma),
y = 1) for i in range(self.nMN)]
self.L_dI6 = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = -1) for i in range(self.ndI6)]
self.R_dI6 = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = 1) for i in range(self.ndI6)]
self.L_V0v = [ Izhikevich_9P(a = 0.01*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 2*gauss(1, self.sigmaP),
vmax = 8*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = -1) for i in range(self.nV0v)]
self.R_V0v = [ Izhikevich_9P(a = 0.01*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 2*gauss(1, self.sigmaP),
vmax = 8*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = 1) for i in range(self.nV0v)]
self.L_V2a = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = -1) for i in range(self.nV2a)]
self.R_V2a = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 5.1+1.6*i*gauss(1, self.sigma),
y = 1) for i in range(self.nV2a)]
self.L_V1 = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 7.1+1.6*i*gauss(1, self.sigma),
y = -1) for i in range(self.nV1)]
self.R_V1 = [ Izhikevich_9P(a = 0.1*gauss(1, self.sigmaP),
b = 0.002*gauss(1, self.sigmaP),
c = -55*gauss(1, self.sigmaP),
d = 4*gauss(1, self.sigmaP),
vmax = 10*gauss(1, self.sigmaP),
vr = -60*gauss(1, self.sigmaP),
vt = -54*gauss(1, self.sigmaP),
k = 0.3*gauss(1, self.sigmaP),
Cm = 10*gauss(1, self.sigmaP),
dt = self.getdt(),
x = 7.1+1.6*i*gauss(1, self.sigma),
y = 1) for i in range(self.nV1)]
self.L_Muscle = [ Leaky_Integrator(1.0, 3.0, self.getdt(), 5.0+1.6*i,-1) for i in range(self.nMuscle)]
self.R_Muscle = [ Leaky_Integrator(1.0, 3.0, self.getdt(), 5.0+1.6*i, 1) for i in range(self.nMuscle)]
def getStimulus(self, t):
if t > 2000: # Let the initial conditions dissipate for the first 200 ms
return self.stim0 * gauss(1, self.sigmaD)
return 0
def rangeNoiseMultiplier(self):
return gauss(1, self.sigmaL)
def weightNoiseMultiplier(self):
return gauss(1, self.sigmaW)
| 9,712 | 3,351 |
# -*- mode: python; coding: utf-8; -*-
VERSION = (1, 3, 3)
__version__ = '.'.join(map(str, VERSION))
__author__ = 'Joe Vasquez'
__email__ = 'joe.vasquez@gmail.com'
__license__ = 'MIT' | 183 | 83 |
pattern_zero=[0.0, 0.027755102041, 0.05387755102, 0.057142857143, 0.078367346939, 0.084897959184, 0.101224489796, 0.111020408163, 0.114285714286, 0.122448979592, 0.135510204082, 0.142040816327, 0.158367346939, 0.16, 0.168163265306, 0.171428571429, 0.176326530612, 0.179591836735, 0.191020408163, 0.192653061224, 0.199183673469, 0.204081632653, 0.215510204082, 0.217142857143, 0.225306122449, 0.228571428571, 0.233469387755, 0.236734693878, 0.24, 0.244897959184, 0.248163265306, 0.249795918367, 0.256326530612, 0.261224489796, 0.272653061224, 0.274285714286, 0.282448979592, 0.285714285714, 0.290612244898, 0.29387755102, 0.297142857143, 0.302040816327, 0.305306122449, 0.30693877551, 0.313469387755, 0.318367346939, 0.329795918367, 0.331428571429, 0.339591836735, 0.342857142857, 0.347755102041, 0.351020408163, 0.354285714286, 0.359183673469, 0.362448979592, 0.364081632653, 0.370612244898, 0.375510204082, 0.38693877551, 0.388571428571, 0.396734693878, 0.4, 0.404897959184, 0.408163265306, 0.411428571429, 0.416326530612, 0.419591836735, 0.421224489796, 0.427755102041, 0.432653061224, 0.444081632653, 0.445714285714, 0.45387755102, 0.457142857143, 0.462040816327, 0.465306122449, 0.468571428571, 0.473469387755, 0.476734693878, 0.478367346939, 0.484897959184, 0.489795918367, 0.501224489796, 0.502857142857, 0.511020408163, 0.514285714286, 0.519183673469, 0.522448979592, 0.525714285714, 0.530612244898, 0.53387755102, 0.535510204082, 0.542040816327, 0.54693877551, 0.558367346939, 0.56, 0.568163265306, 0.571428571429, 0.576326530612, 0.579591836735, 0.582857142857, 0.587755102041, 0.591020408163, 0.592653061224, 0.599183673469, 0.604081632653, 0.615510204082, 0.617142857143, 0.625306122449, 0.628571428571, 0.633469387755, 0.636734693878, 0.64, 0.644897959184, 0.648163265306, 0.649795918367, 0.656326530612, 0.661224489796, 0.672653061224, 0.674285714286, 0.682448979592, 0.685714285714, 0.690612244898, 0.69387755102, 0.697142857143, 0.702040816327, 0.705306122449, 0.70693877551, 0.713469387755, 0.718367346939, 0.729795918367, 0.731428571429, 0.739591836735, 0.742857142857, 0.747755102041, 0.751020408163, 0.754285714286, 0.759183673469, 0.762448979592, 0.764081632653, 0.770612244898, 0.775510204082, 0.78693877551, 0.788571428571, 0.796734693878, 0.8, 0.804897959184, 0.808163265306, 0.811428571429, 0.816326530612, 0.819591836735, 0.821224489796, 0.827755102041, 0.832653061224, 0.844081632653, 0.845714285714, 0.85387755102, 0.857142857143, 0.862040816327, 0.865306122449, 0.868571428571, 0.873469387755, 0.876734693878, 0.878367346939, 0.884897959184, 0.889795918367, 0.901224489796, 0.902857142857, 0.911020408163, 0.914285714286, 0.919183673469, 0.922448979592, 0.925714285714, 0.930612244898, 0.93387755102, 0.935510204082, 0.942040816327, 0.94693877551, 0.958367346939, 0.96, 0.968163265306, 0.971428571429, 0.976326530612, 0.979591836735, 0.982857142857, 0.987755102041, 0.991020408163, 0.992653061224, 0.999183673469]
pattern_odd=[0.004081632653, 0.015510204082, 0.017142857143, 0.025306122449, 0.028571428571, 0.033469387755, 0.036734693878, 0.04, 0.044897959184, 0.048163265306, 0.049795918367, 0.056326530612, 0.061224489796, 0.072653061224, 0.074285714286, 0.082448979592, 0.085714285714, 0.090612244898, 0.09387755102, 0.097142857143, 0.102040816327, 0.105306122449, 0.10693877551, 0.113469387755, 0.118367346939, 0.129795918367, 0.131428571429, 0.139591836735, 0.142857142857, 0.147755102041, 0.151020408163, 0.154285714286, 0.159183673469, 0.162448979592, 0.164081632653, 0.170612244898, 0.175510204082, 0.18693877551, 0.188571428571, 0.196734693878, 0.2, 0.204897959184, 0.208163265306, 0.211428571429, 0.216326530612, 0.219591836735, 0.221224489796, 0.227755102041, 0.232653061224, 0.244081632653, 0.245714285714, 0.25387755102, 0.257142857143, 0.262040816327, 0.265306122449, 0.268571428571, 0.273469387755, 0.276734693878, 0.278367346939, 0.284897959184, 0.289795918367, 0.301224489796, 0.302857142857, 0.311020408163, 0.314285714286, 0.319183673469, 0.322448979592, 0.325714285714, 0.330612244898, 0.33387755102, 0.335510204082, 0.342040816327, 0.34693877551, 0.358367346939, 0.36, 0.368163265306, 0.371428571429, 0.376326530612, 0.379591836735, 0.382857142857, 0.387755102041, 0.391020408163, 0.392653061224, 0.399183673469, 0.404081632653, 0.415510204082, 0.417142857143, 0.425306122449, 0.428571428571, 0.433469387755, 0.436734693878, 0.44, 0.444897959184, 0.448163265306, 0.449795918367, 0.456326530612, 0.461224489796, 0.472653061224, 0.474285714286, 0.482448979592, 0.485714285714, 0.490612244898, 0.49387755102, 0.497142857143, 0.502040816327, 0.505306122449, 0.50693877551, 0.513469387755, 0.518367346939, 0.529795918367, 0.531428571429, 0.539591836735, 0.542857142857, 0.547755102041, 0.551020408163, 0.554285714286, 0.559183673469, 0.562448979592, 0.564081632653, 0.570612244898, 0.575510204082, 0.58693877551, 0.588571428571, 0.596734693878, 0.6, 0.604897959184, 0.608163265306, 0.611428571429, 0.616326530612, 0.619591836735, 0.621224489796, 0.627755102041, 0.632653061224, 0.644081632653, 0.645714285714, 0.65387755102, 0.657142857143, 0.662040816327, 0.665306122449, 0.668571428571, 0.673469387755, 0.676734693878, 0.678367346939, 0.684897959184, 0.689795918367, 0.701224489796, 0.702857142857, 0.711020408163, 0.714285714286, 0.719183673469, 0.722448979592, 0.725714285714, 0.730612244898, 0.73387755102, 0.735510204082, 0.742040816327, 0.74693877551, 0.758367346939, 0.76, 0.768163265306, 0.771428571429, 0.776326530612, 0.779591836735, 0.782857142857, 0.787755102041, 0.791020408163, 0.792653061224, 0.799183673469, 0.804081632653, 0.815510204082, 0.817142857143, 0.825306122449, 0.828571428571, 0.833469387755, 0.836734693878, 0.84, 0.844897959184, 0.848163265306, 0.849795918367, 0.856326530612, 0.861224489796, 0.872653061224, 0.874285714286, 0.882448979592, 0.885714285714, 0.890612244898, 0.89387755102, 0.897142857143, 0.902040816327, 0.905306122449, 0.90693877551, 0.913469387755, 0.918367346939, 0.929795918367, 0.931428571429, 0.939591836735, 0.942857142857, 0.947755102041, 0.951020408163, 0.954285714286, 0.959183673469, 0.962448979592, 0.964081632653, 0.970612244898, 0.975510204082, 0.98693877551, 0.988571428571, 0.996734693878]
pattern_even=[0.0, 0.004897959184, 0.008163265306, 0.011428571429, 0.016326530612, 0.019591836735, 0.021224489796, 0.027755102041, 0.032653061224, 0.044081632653, 0.045714285714, 0.05387755102, 0.057142857143, 0.062040816327, 0.065306122449, 0.068571428571, 0.073469387755, 0.076734693878, 0.078367346939, 0.084897959184, 0.089795918367, 0.101224489796, 0.102857142857, 0.111020408163, 0.114285714286, 0.119183673469, 0.122448979592, 0.125714285714, 0.130612244898, 0.13387755102, 0.135510204082, 0.142040816327, 0.14693877551, 0.158367346939, 0.16, 0.168163265306, 0.171428571429, 0.176326530612, 0.179591836735, 0.182857142857, 0.187755102041, 0.191020408163, 0.192653061224, 0.199183673469, 0.204081632653, 0.215510204082, 0.217142857143, 0.225306122449, 0.228571428571, 0.233469387755, 0.236734693878, 0.24, 0.244897959184, 0.248163265306, 0.249795918367, 0.256326530612, 0.261224489796, 0.272653061224, 0.274285714286, 0.282448979592, 0.285714285714, 0.290612244898, 0.29387755102, 0.297142857143, 0.302040816327, 0.305306122449, 0.30693877551, 0.313469387755, 0.318367346939, 0.329795918367, 0.331428571429, 0.339591836735, 0.342857142857, 0.347755102041, 0.351020408163, 0.354285714286, 0.359183673469, 0.362448979592, 0.364081632653, 0.370612244898, 0.375510204082, 0.38693877551, 0.388571428571, 0.396734693878, 0.4, 0.404897959184, 0.408163265306, 0.411428571429, 0.416326530612, 0.419591836735, 0.421224489796, 0.427755102041, 0.432653061224, 0.444081632653, 0.445714285714, 0.45387755102, 0.457142857143, 0.462040816327, 0.465306122449, 0.468571428571, 0.473469387755, 0.476734693878, 0.478367346939, 0.484897959184, 0.489795918367, 0.501224489796, 0.502857142857, 0.511020408163, 0.514285714286, 0.519183673469, 0.522448979592, 0.525714285714, 0.530612244898, 0.53387755102, 0.535510204082, 0.542040816327, 0.54693877551, 0.558367346939, 0.56, 0.568163265306, 0.571428571429, 0.576326530612, 0.579591836735, 0.582857142857, 0.587755102041, 0.591020408163, 0.592653061224, 0.599183673469, 0.604081632653, 0.615510204082, 0.617142857143, 0.625306122449, 0.628571428571, 0.633469387755, 0.636734693878, 0.64, 0.644897959184, 0.648163265306, 0.649795918367, 0.656326530612, 0.661224489796, 0.672653061224, 0.674285714286, 0.682448979592, 0.685714285714, 0.690612244898, 0.69387755102, 0.697142857143, 0.702040816327, 0.705306122449, 0.70693877551, 0.713469387755, 0.718367346939, 0.729795918367, 0.731428571429, 0.739591836735, 0.742857142857, 0.747755102041, 0.751020408163, 0.754285714286, 0.759183673469, 0.762448979592, 0.764081632653, 0.770612244898, 0.775510204082, 0.78693877551, 0.788571428571, 0.796734693878, 0.8, 0.804897959184, 0.808163265306, 0.811428571429, 0.816326530612, 0.819591836735, 0.821224489796, 0.827755102041, 0.832653061224, 0.844081632653, 0.845714285714, 0.85387755102, 0.857142857143, 0.862040816327, 0.865306122449, 0.868571428571, 0.873469387755, 0.876734693878, 0.878367346939, 0.884897959184, 0.889795918367, 0.901224489796, 0.902857142857, 0.911020408163, 0.914285714286, 0.919183673469, 0.922448979592, 0.925714285714, 0.930612244898, 0.93387755102, 0.935510204082, 0.942040816327, 0.94693877551, 0.958367346939, 0.96, 0.968163265306, 0.971428571429, 0.976326530612, 0.979591836735, 0.982857142857, 0.987755102041, 0.991020408163, 0.992653061224, 0.999183673469]
averages_even={0.0: [0.0], 0.102857142857: [0.2, 0.8], 0.302040816327: [0.5714285714286, 0.4285714285714], 0.068571428571: [0.6, 0.4], 0.192653061224: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.171428571429: [0.0], 0.411428571429: [0.6, 0.4], 0.85387755102: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.468571428571: [0.6, 0.4], 0.089795918367: [0.2857142857143, 0.7142857142857], 0.96: [0.2, 0.8], 0.582857142857: [0.6, 0.4], 0.604081632653: [0.2857142857143, 0.7142857142857], 0.968163265306: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.530612244898: [0.5714285714286, 0.4285714285714], 0.261224489796: [0.2857142857143, 0.7142857142857], 0.076734693878: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.045714285714: [0.2, 0.8], 0.796734693878: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.697142857143: [0.6, 0.4], 0.249795918367: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.644897959184: [0.5714285714286, 0.4285714285714], 0.236734693878: [0.1428571428571, 0.8571428571429], 0.364081632653: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.811428571429: [0.6, 0.4], 0.592653061224: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.633469387755: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.759183673469: [0.5714285714286, 0.4285714285714], 0.770612244898: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.421224489796: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.925714285714: [0.6, 0.4], 0.70693877551: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.285714285714: [0.0], 0.579591836735: [0.1428571428571, 0.8571428571429], 0.94693877551: [0.2857142857143, 0.7142857142857], 0.478367346939: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.519183673469: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.032653061224: [0.2857142857143, 0.7142857142857], 0.342857142857: [0.0], 0.987755102041: [0.5714285714286, 0.4285714285714], 0.158367346939: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.935510204082: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.359183673469: [0.5714285714286, 0.4285714285714], 0.587755102041: [0.5714285714286, 0.4285714285714], 0.8: [0.0], 0.4: [0.0], 0.290612244898: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.179591836735: [0.1428571428571, 0.8571428571429], 0.705306122449: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.489795918367: [0.2857142857143, 0.7142857142857], 0.992653061224: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.114285714286: [0.0], 0.347755102041: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.062040816327: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.56: [0.2, 0.8], 0.636734693878: [0.1428571428571, 0.8571428571429], 0.215510204082: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.911020408163: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.101224489796: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.832653061224: [0.2857142857143, 0.7142857142857], 0.591020408163: [0.4571428571429, 0.2571428571429, 0.5428571428571, 0.7428571428571], 0.674285714286: [0.2, 0.8], 0.976326530612: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.462040816327: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.044081632653: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.747755102041: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.884897959184: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.788571428571: [0.2, 0.8], 0.868571428571: [0.6, 0.4], 0.685714285714: [0.0], 0.775510204082: [0.2857142857143, 0.7142857142857], 0.819591836735: [0.4571428571429, 0.2571428571429, 0.5428571428571, 0.7428571428571], 0.902857142857: [0.2, 0.8], 0.502857142857: [0.2, 0.8], 0.274285714286: [0.2, 0.8], 0.233469387755: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.125714285714: [0.6, 0.4], 0.64: [0.6, 0.4], 0.473469387755: [0.5714285714286, 0.4285714285714], 0.331428571429: [0.2, 0.8], 0.982857142857: [0.6, 0.4], 0.057142857143: [0.0], 0.873469387755: [0.5714285714286, 0.4285714285714], 0.305306122449: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.69387755102: [0.1428571428571, 0.8571428571429], 0.416326530612: [0.5714285714286, 0.4285714285714], 0.388571428571: [0.2, 0.8], 0.558367346939: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.30693877551: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.187755102041: [0.5714285714286, 0.4285714285714], 0.362448979592: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.731428571429: [0.2, 0.8], 0.808163265306: [0.1428571428571, 0.8571428571429], 0.027755102041: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.445714285714: [0.2, 0.8], 0.168163265306: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.419591836735: [0.4571428571429, 0.2571428571429, 0.5428571428571, 0.7428571428571], 0.008163265306: [0.1428571428571, 0.8571428571429], 0.29387755102: [0.1428571428571, 0.8571428571429], 0.78693877551: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.142040816327: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.615510204082: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.501224489796: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.119183673469: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.999183673469: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.599183673469: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.225306122449: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.682448979592: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.54693877551: [0.2857142857143, 0.7142857142857], 0.228571428571: [0.0], 0.713469387755: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.199183673469: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.661224489796: [0.2857142857143, 0.7142857142857], 0.525714285714: [0.6, 0.4], 0.764081632653: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.827755102041: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.535510204082: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.672653061224: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.404897959184: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.751020408163: [0.1428571428571, 0.8571428571429], 0.542040816327: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.16: [0.2, 0.8], 0.942040816327: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.05387755102: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.073469387755: [0.5714285714286, 0.4285714285714], 0.889795918367: [0.2857142857143, 0.7142857142857], 0.865306122449: [0.1428571428571, 0.8571428571429], 0.754285714286: [0.6, 0.4], 0.13387755102: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.351020408163: [0.1428571428571, 0.8571428571429], 0.93387755102: [0.2571428571429, 0.5428571428571, 0.4571428571429, 0.7428571428571], 0.217142857143: [0.2, 0.8], 0.649795918367: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.702040816327: [0.5714285714286, 0.4285714285714], 0.514285714286: [0.0], 0.204081632653: [0.2857142857143, 0.7142857142857], 0.979591836735: [0.1428571428571, 0.8571428571429], 0.191020408163: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.804897959184: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.272653061224: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.628571428571: [0.0], 0.465306122449: [0.1428571428571, 0.8571428571429], 0.922448979592: [0.1428571428571, 0.8571428571429], 0.576326530612: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.878367346939: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.329795918367: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.742857142857: [0.0], 0.011428571429: [0.6, 0.4], 0.019591836735: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.690612244898: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.248163265306: [0.4571428571429, 0.2571428571429, 0.5428571428571, 0.7428571428571], 0.38693877551: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.857142857143: [0.0], 0.901224489796: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.14693877551: [0.2857142857143, 0.7142857142857], 0.457142857143: [0.0], 0.568163265306: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.111020408163: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.176326530612: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.971428571429: [0.0], 0.53387755102: [0.4571428571429, 0.2571428571429, 0.5428571428571, 0.7428571428571], 0.617142857143: [0.2, 0.8], 0.122448979592: [0.1428571428571, 0.8571428571429], 0.919183673469: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.282448979592: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.648163265306: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.135510204082: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.182857142857: [0.6, 0.4], 0.084897959184: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.256326530612: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.408163265306: [0.1428571428571, 0.8571428571429], 0.914285714286: [0.0], 0.021224489796: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.816326530612: [0.5714285714286, 0.4285714285714], 0.476734693878: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.762448979592: [0.4571428571429, 0.2571428571429, 0.5428571428571, 0.7428571428571], 0.845714285714: [0.2, 0.8], 0.004897959184: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.396734693878: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.876734693878: [0.4571428571429, 0.2571428571429, 0.5428571428571, 0.7428571428571], 0.24: [0.6, 0.4], 0.370612244898: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.065306122449: [0.1428571428571, 0.8571428571429], 0.45387755102: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.571428571429: [0.0], 0.991020408163: [0.4571428571429, 0.2571428571429, 0.5428571428571, 0.7428571428571], 0.930612244898: [0.5714285714286, 0.4285714285714], 0.427755102041: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.318367346939: [0.2857142857143, 0.7142857142857], 0.862040816327: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.313469387755: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.078367346939: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.522448979592: [0.1428571428571, 0.8571428571429], 0.484897959184: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.375510204082: [0.2857142857143, 0.7142857142857], 0.444081632653: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.339591836735: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.432653061224: [0.2857142857143, 0.7142857142857], 0.729795918367: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.511020408163: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.130612244898: [0.5714285714286, 0.4285714285714], 0.718367346939: [0.2857142857143, 0.7142857142857], 0.297142857143: [0.6, 0.4], 0.244897959184: [0.5714285714286, 0.4285714285714], 0.016326530612: [0.5714285714286, 0.4285714285714], 0.844081632653: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.625306122449: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.821224489796: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.354285714286: [0.6, 0.4], 0.656326530612: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.958367346939: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.739591836735: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571]}
averages_odd={0.562448979592: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.151020408163: [0.1428571428571, 0.8571428571429], 0.645714285714: [0.2, 0.8], 0.947755102041: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.048163265306: [0.2571428571429, 0.5428571428571, 0.4571428571429, 0.7428571428571], 0.676734693878: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.044897959184: [0.5714285714286, 0.4285714285714], 0.76: [0.2, 0.8], 0.221224489796: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.265306122449: [0.1428571428571, 0.8571428571429], 0.208163265306: [0.1428571428571, 0.8571428571429], 0.874285714286: [0.2, 0.8], 0.779591836735: [0.1428571428571, 0.8571428571429], 0.322448979592: [0.1428571428571, 0.8571428571429], 0.905306122449: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.118367346939: [0.2857142857143, 0.7142857142857], 0.988571428571: [0.2, 0.8], 0.551020408163: [0.1428571428571, 0.8571428571429], 0.379591836735: [0.1428571428571, 0.8571428571429], 0.105306122449: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.65387755102: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.665306122449: [0.1428571428571, 0.8571428571429], 0.529795918367: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.142857142857: [0.0], 0.436734693878: [0.1428571428571, 0.8571428571429], 0.129795918367: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.301224489796: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.644081632653: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.085714285714: [0.0], 0.49387755102: [0.1428571428571, 0.8571428571429], 0.954285714286: [0.6, 0.4], 0.89387755102: [0.1428571428571, 0.8571428571429], 0.358367346939: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.758367346939: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.539591836735: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.2: [0.0], 0.072653061224: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.951020408163: [0.1428571428571, 0.8571428571429], 0.18693877551: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.415510204082: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.872653061224: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.028571428571: [0.0], 0.513469387755: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.518367346939: [0.2857142857143, 0.7142857142857], 0.897142857143: [0.6, 0.4], 0.684897959184: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.472653061224: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.98693877551: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.25387755102: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.768163265306: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.025306122449: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.147755102041: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.632653061224: [0.2857142857143, 0.7142857142857], 0.244081632653: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.799183673469: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.596734693878: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.311020408163: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.74693877551: [0.2857142857143, 0.7142857142857], 0.284897959184: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.611428571429: [0.6, 0.4], 0.913469387755: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.368163265306: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.204897959184: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.861224489796: [0.2857142857143, 0.7142857142857], 0.570612244898: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.342040816327: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.725714285714: [0.6, 0.4], 0.50693877551: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.017142857143: [0.8, 0.2], 0.425306122449: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.673469387755: [0.5714285714286, 0.4285714285714], 0.975510204082: [0.2857142857143, 0.7142857142857], 0.399183673469: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.289795918367: [0.2857142857143, 0.7142857142857], 0.84: [0.6, 0.4], 0.621224489796: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.791020408163: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.482448979592: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.787755102041: [0.5714285714286, 0.4285714285714], 0.456326530612: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.34693877551: [0.2857142857143, 0.7142857142857], 0.735510204082: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.097142857143: [0.6, 0.4], 0.139591836735: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.6: [0.0], 0.902040816327: [0.5714285714286, 0.4285714285714], 0.090612244898: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.547755102041: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.404081632653: [0.2857142857143, 0.7142857142857], 0.849795918367: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.714285714286: [0.0], 0.942857142857: [0.0], 0.268571428571: [0.6, 0.4], 0.461224489796: [0.2857142857143, 0.7142857142857], 0.964081632653: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.196734693878: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.828571428571: [0.0], 0.325714285714: [0.6, 0.4], 0.776326530612: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.056326530612: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.170612244898: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.382857142857: [0.6, 0.4], 0.273469387755: [0.5714285714286, 0.4285714285714], 0.931428571429: [0.2, 0.8], 0.588571428571: [0.2, 0.8], 0.890612244898: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.049795918367: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.619591836735: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.44: [0.6, 0.4], 0.330612244898: [0.5714285714286, 0.4285714285714], 0.702857142857: [0.2, 0.8], 0.131428571429: [0.2, 0.8], 0.227755102041: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.497142857143: [0.6, 0.4], 0.387755102041: [0.5714285714286, 0.4285714285714], 0.278367346939: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.04: [0.6, 0.4], 0.036734693878: [0.1428571428571, 0.8571428571429], 0.848163265306: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.444897959184: [0.5714285714286, 0.4285714285714], 0.604897959184: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.335510204082: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.722448979592: [0.1428571428571, 0.8571428571429], 0.188571428571: [0.2, 0.8], 0.033469387755: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.175510204082: [0.2857142857143, 0.7142857142857], 0.392653061224: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.608163265306: [0.1428571428571, 0.8571428571429], 0.162448979592: [0.4571428571429, 0.2571428571429, 0.5428571428571, 0.7428571428571], 0.962448979592: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.257142857143: [0.0], 0.102040816327: [0.5714285714286, 0.4285714285714], 0.449795918367: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.245714285714: [0.2, 0.8], 0.58693877551: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.314285714286: [0.0], 0.232653061224: [0.2857142857143, 0.7142857142857], 0.662040816327: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.836734693878: [0.1428571428571, 0.8571428571429], 0.219591836735: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.082448979592: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.701224489796: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.371428571429: [0.0], 0.262040816327: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.882448979592: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.015510204082: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.815510204082: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.428571428571: [0.0], 0.319183673469: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.627755102041: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.929795918367: [0.3142857142857, 0.8857142857143, 0.1142857142857, 0.6857142857143], 0.711020408163: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.485714285714: [0.0], 0.376326530612: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.575510204082: [0.2857142857143, 0.7142857142857], 0.154285714286: [0.6, 0.4], 0.742040816327: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.825306122449: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.433469387755: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.689795918367: [0.2857142857143, 0.7142857142857], 0.554285714286: [0.6, 0.4], 0.856326530612: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.939591836735: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.490612244898: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.502040816327: [0.5714285714286, 0.4285714285714], 0.804081632653: [0.2857142857143, 0.7142857142857], 0.211428571429: [0.6, 0.4], 0.668571428571: [0.6, 0.4], 0.970612244898: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.616326530612: [0.5714285714286, 0.4285714285714], 0.918367346939: [0.2857142857143, 0.7142857142857], 0.004081632653: [0.2857142857143, 0.7142857142857], 0.782857142857: [0.6, 0.4], 0.564081632653: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.302857142857: [0.2, 0.8], 0.113469387755: [0.8285714285714, 0.9714285714286, 0.0285714285714, 0.1714285714286], 0.730612244898: [0.5714285714286, 0.4285714285714], 0.276734693878: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.10693877551: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.159183673469: [0.5714285714286, 0.4285714285714], 0.678367346939: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.996734693878: [0.9428571428571, 0.6571428571429, 0.0571428571429, 0.3428571428571], 0.36: [0.8, 0.2], 0.542857142857: [0.0], 0.844897959184: [0.5714285714286, 0.4285714285714], 0.33387755102: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.09387755102: [0.1428571428571, 0.8571428571429], 0.792653061224: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.417142857143: [0.2, 0.8], 0.559183673469: [0.5714285714286, 0.4285714285714], 0.657142857143: [0.0], 0.959183673469: [0.5714285714286, 0.4285714285714], 0.391020408163: [0.2571428571429, 0.5428571428571, 0.4571428571429, 0.7428571428571], 0.216326530612: [0.5714285714286, 0.4285714285714], 0.90693877551: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143], 0.474285714286: [0.2, 0.8], 0.771428571429: [0.0], 0.505306122449: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.074285714286: [0.2, 0.8], 0.448163265306: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.817142857143: [0.2, 0.8], 0.719183673469: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.061224489796: [0.2857142857143, 0.7142857142857], 0.73387755102: [0.2571428571429, 0.7428571428571, 0.4571428571429, 0.5428571428571], 0.885714285714: [0.0], 0.531428571429: [0.2, 0.8], 0.833469387755: [0.6285714285714, 0.7714285714286, 0.2285714285714, 0.3714285714286], 0.164081632653: [0.5142857142857, 0.4857142857143, 0.9142857142857, 0.0857142857143]} | 35,632 | 34,745 |
class Solution:
def _search(self,l,r,x):
if r >= l:
mid = l + (r - l) // 2
# If element is present at the middle itself
if self.nums[mid][-1] >= x and self.nums[mid][0] <=x:
return self._search_small(0,len(self.nums[mid])-1,x,mid)
elif self.nums[mid][-1] > x:
return self._search(l, mid-1, x)
# Else the element can only be present
# in right subarray
else:
return self._search(mid + 1, r, x)
else:
return False
def _search_small(self,l,r,x,a):
if len(self.nums[a]) == 0 and x == 0:
return False
if r >= l:
mid = l + (r - l) // 2
# If element is present at the middle itself
if self.nums[a][mid] == x:
return True
elif self.nums[a][mid] > x:
return self._search_small(l, mid-1, x,a)
# Else the element can only be present
# in right subarray
else:
return self._search_small(mid + 1, r, x,a)
else:
return False
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
self.nums = matrix
return self._search(0,len(self.nums)-1,target) | 1,332 | 413 |
import Gnuplot
import mpi
from Spheral import *
from math import *
import numpy
import os
from SpheralTestUtilities import multiSort
SpheralGnuPlotCache = []
from spheralDimensions import spheralDimensions
dims = spheralDimensions()
#-------------------------------------------------------------------------------
# Define a dummy Gnuplot class, so that non-master processes can silently
# and harmlessly accept Gnuplot commands.
#-------------------------------------------------------------------------------
class fakeGnuplot:
def __init__(self):
return
def __call__(self, *arghs, **keyw):
return
def plot(self, *arghs, **keyw):
return
def replot(self, *arghs, **keyw):
return
def refresh(self, *arghs, **keyw):
return
def xlabel(self, *arghs, **keyw):
return
def ylabel(self, *arghs, **keyw):
return
def title(self, *arghs, **keyw):
return
def hardcopy(self, *arghs, **keyw):
return
def generateNewGnuPlot(persist = False):
if mpi.rank == 0:
result = Gnuplot.Gnuplot(persist = persist)
if "GNUTERM" in os.environ.keys():
result("set term %s" % os.environ["GNUTERM"])
return result
else:
return fakeGnuplot()
#-------------------------------------------------------------------------------
# Since the default Gnuplot.py doesn't support png output, I'll add it here
# myself.
#-------------------------------------------------------------------------------
def pngFile(plot, filename,
color = 1,
fontSize = "medium"):
setLine = "set terminal png " + fontSize
if color:
setLine += " color"
if filename[-4:] != ".png":
filename += ".png"
plot(setLine)
plot.set_string("output", filename)
plot.refresh()
plot("set terminal x11")
plot.set_string("output")
return
#-------------------------------------------------------------------------------
# Calculate the radial velocity component, given a FieldList of positions
# and a FieldList of velocities.
#-------------------------------------------------------------------------------
def radialVelocityFieldList(positions,
velocities):
dim = type(positions).__name__[-2:]
radialVelocity = None
fieldConstructor = None
if dim == "1d":
radialVelocity = ScalarFieldList1d()
fieldConstructor = ScalarField1d
elif dim == "2d":
radialVelocity = ScalarFieldList2d()
fieldConstructor = ScalarField2d
elif dim == "3d":
radialVelocity = ScalarFieldList3d()
fieldConstructor = ScalarField3d
radialVelocity.copyFields()
for field in positions:
radialVelocity.appendField(fieldConstructor("radial velocity", field.nodeList()))
assert positions.numFields == velocities.numFields == radialVelocity.numFields
for fieldID in xrange(positions.numFields):
rfield = positions[fieldID]
vfield = velocities[fieldID]
vrfield = radialVelocity[fieldID]
assert rfield.numElements == vfield.numElements == vrfield.numElements
for nodeID in xrange(rfield.numElements):
r = rfield[nodeID]
v = vfield[nodeID]
runit = r.unitVector()
vrfield[nodeID] = v.dot(runit)
return radialVelocity
#-------------------------------------------------------------------------------
# Calculate the azimuthal velocity component, given a FieldList of positions
# and a FieldList of velocities.
#-------------------------------------------------------------------------------
def azimuthalVelocityFieldList(positions,
velocities):
dim = type(positions).__name__[-2:]
azimuthalVelocity = None
fieldConstructor = None
if dim == "1d":
azimuthalVelocity = ScalarFieldList1d()
fieldConstructor = ScalarField1d
elif dim == "2d":
azimuthalVelocity = ScalarFieldList2d()
fieldConstructor = ScalarField2d
elif dim == "3d":
azimuthalVelocity = ScalarFieldList3d()
fieldConstructor = ScalarField3d
azimuthalVelocity.copyFields()
for field in positions:
azimuthalVelocity.appendField(fieldConstructor("azimuthal velocity", field.nodeList()))
assert positions.numFields == velocities.numFields == azimuthalVelocity.numFields
for fieldID in xrange(positions.numFields):
rfield = positions[fieldID]
vfield = velocities[fieldID]
vafield = azimuthalVelocity[fieldID]
assert rfield.numElements == vfield.numElements == vafield.numElements
for nodeID in xrange(rfield.numElements):
r = rfield[nodeID]
v = vfield[nodeID]
raz = r.unitVector()
x = raz.x
y = raz.y
raz.x = -y
raz.y = x
vafield[nodeID] = v.dot(raz)
return azimuthalVelocity
#-------------------------------------------------------------------------------
# Helper method to determine the angular momentum per node.
#-------------------------------------------------------------------------------
def angularMomentum(mass, position, velocity):
assert mass.numFields == position.numFields == velocity.numFields
result = []
for massField, positionField, velocityField in zip(mass,
position,
velocity):
assert (massField.nodeList().numInternalNodes ==
positionField.nodeList().numInternalNodes ==
velocityField.nodeList().numInternalNodes)
for j in xrange(massField.nodeList().numInternalNodes):
result.append((positionField[j].cross(velocityField[j]))*massField[j])
return result
#-------------------------------------------------------------------------------
# Plot a FieldList
#-------------------------------------------------------------------------------
def plotFieldList(fieldList,
xFunction = "%s.x",
yFunction = "%s",
plotGhosts = False,
colorNodeLists = False,
plot = None,
userXRange = [None, None],
userYRange = [None, None],
plotStyle = "lines",
lineStyle = "linetype -1 linewidth 1 pointtype 4 pointsize 1.0",
winTitle = None,
lineTitle = "",
xlabel = None,
ylabel = None,
filterFunc = None):
if plot is None:
plot = generateNewGnuPlot()
SpheralGnuPlotCache.append(plot)
def nullFilter(pos):
return True
if filterFunc is None:
filterFunc = nullFilter
# Gather the fieldList info across all processors to process 0.
globalNumNodes = []
globalX = []
globalY = []
for field in fieldList:
if plotGhosts:
xvals = field.nodeList().positions().allValues()
yvals = field.allValues()
else:
xvals = field.nodeList().positions().internalValues()
yvals = field.internalValues()
localX = []
localY = []
for x, y in zip(xvals, yvals):
if filterFunc(x):
localX.append(eval(xFunction % "x"))
localY.append(eval(yFunction % "y"))
n = len(localX)
if mpi:
globalNumNodes.append(mpi.allreduce(n, mpi.SUM))
globalX.extend(mpi.allreduce(localX, mpi.SUM))
globalY.extend(mpi.allreduce(localY, mpi.SUM))
else:
globalNumNodes.append(n)
globalX.extend(localX)
globalY.extend(localY)
if mpi.rank == 0:
# Find the total number of nodes.
totalNumNodes = sum(globalNumNodes)
assert(len(globalNumNodes) == fieldList.numFields)
assert(len(globalX) == totalNumNodes)
assert(len(globalY) == totalNumNodes)
# Copy the input ranges, since for some reason these seem to have been
# preserved between calls?
xRange = userXRange[:]
yRange = userYRange[:]
# Set the line style
## plot("set linestyle 1 " + lineStyle)
# Set the labels.
if winTitle: plot.title(winTitle)
if xlabel: plot.xlabel(xlabel)
if ylabel: plot.ylabel(ylabel)
# Set the ranges.
xmin = 1e30
xmax = -1e30
ymin = 1e30
ymax = -1e30
for x in globalX:
xmin = min(xmin, x)
xmax = max(xmax, x)
for y in globalY:
ymin = min(ymin, y)
ymax = max(ymax, y)
if xmin == xmax:
xmin = xmin - 0.5
xmax = xmax + 0.5
if ymin == ymax:
ymin = ymin - 0.5
ymax = ymax + 0.5
if xRange[0] == None: xRange[0] = xmin
if xRange[1] == None: xRange[1] = xmax
if yRange[0] == None: yRange[0] = ymin - 0.05*max(1e-5, ymax - ymin)
if yRange[1] == None: yRange[1] = ymax + 0.05*max(1e-5, ymax - ymin)
plot("set xrange [%f:%f]" % tuple(xRange))
plot("set yrange [%f:%f]" % tuple(yRange))
# Finally, loop over the fields and do the deed.
assert(len(globalX) == len(globalY))
if colorNodeLists:
legendNodeList = {}
for i in xrange(fieldList.numFields):
legendNodeList[i] = lineTitle + ": " + fieldList[i].nodeList().name
cumulativeNumNodes = 0
for fieldID in xrange(len(globalNumNodes)):
n = globalNumNodes[fieldID]
iNodeList = fieldID % fieldList.numFields
x = numpy.array(globalX[cumulativeNumNodes:
cumulativeNumNodes + n])
y = numpy.array(globalY[cumulativeNumNodes:
cumulativeNumNodes + n])
if n:
## plot("set linestyle %i lt %i pt %i" % (iNodeList + 1,
## iNodeList + 1,
## iNodeList + 1))
legend = legendNodeList[iNodeList]
legendNodeList[iNodeList] = None
data = Gnuplot.Data(x, y,
with_ = plotStyle + " lt %i" % iNodeList,
title = legend,
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
cumulativeNumNodes += n
else:
x = numpy.array(globalX)
y = numpy.array(globalY)
data = Gnuplot.Data(x, y,
with_ = plotStyle + " lt -1 pt 3",
title = lineTitle,
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
lineTitle = None
# That's it, return the Gnuplot object.
mpi.barrier()
return plot
#-------------------------------------------------------------------------------
# Plot the mass density, velocity, pressure, and smoothing scale for the fluid
# node lists in the given data base. Implicitly assuming 1-D.
#-------------------------------------------------------------------------------
def plotState(thingus,
plotGhosts = False,
colorNodeLists = False,
plotStyle = "points",
xFunction = "%s.x",
vecyFunction = "%s.x",
tenyFunction = "%s.xx ** -1",
lineTitle = "Simulation",
filterFunc = None):
dim = type(thingus).__name__[-2:]
if isinstance(thingus, eval("State%s" % dim)):
rho = thingus.scalarFields(HydroFieldNames.massDensity)
vel = thingus.vectorFields(HydroFieldNames.velocity)
eps = thingus.scalarFields(HydroFieldNames.specificThermalEnergy)
P = thingus.scalarFields(HydroFieldNames.pressure)
H = thingus.symTensorFields(HydroFieldNames.H)
else:
assert isinstance(thingus, eval("DataBase%s" % dim))
rho = thingus.fluidMassDensity
vel = thingus.fluidVelocity
eps = thingus.fluidSpecificThermalEnergy
P = thingus.newFluidScalarFieldList(0.0, "pressure")
thingus.fluidPressure(P)
H = thingus.fluidHfield
rhoPlot = plotFieldList(rho,
xFunction = xFunction,
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = plotStyle,
winTitle = "Mass Density",
lineTitle = lineTitle,
xlabel="x",
filterFunc = filterFunc)
velPlot = plotFieldList(vel,
xFunction = xFunction,
yFunction = vecyFunction,
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = plotStyle,
winTitle = "Velocity",
lineTitle = lineTitle,
xlabel="x",
filterFunc = filterFunc)
epsPlot = plotFieldList(eps,
xFunction = xFunction,
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = plotStyle,
winTitle = "Specific Thermal Energy",
lineTitle = lineTitle,
xlabel="x",
filterFunc = filterFunc)
PPlot = plotFieldList(P,
xFunction = xFunction,
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = plotStyle,
winTitle = "Pressure",
lineTitle = lineTitle,
xlabel="x",
filterFunc = filterFunc)
HPlot = plotFieldList(H,
xFunction = xFunction,
yFunction = tenyFunction,
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = plotStyle,
winTitle = "Smoothing scale",
lineTitle = lineTitle,
xlabel="x",
filterFunc = filterFunc)
return rhoPlot, velPlot, epsPlot, PPlot, HPlot
#-------------------------------------------------------------------------------
# Plot the state vs. radius
#-------------------------------------------------------------------------------
def plotRadialState(dataBase,
plotGhosts = False,
colorNodeLists = False,
lineTitle = "Simulation",
filterFunc = None):
rhoPlot = plotFieldList(dataBase.fluidMassDensity,
xFunction = "%s.magnitude()",
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = "points",
winTitle = "Mass density",
lineTitle = lineTitle,
xlabel = "r",
filterFunc = filterFunc)
radialVelocity = radialVelocityFieldList(dataBase.fluidPosition,
dataBase.fluidVelocity)
velPlot = plotFieldList(radialVelocity,
xFunction = "%s.magnitude()",
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = "points",
winTitle = " Radial Velocity",
lineTitle = lineTitle,
xlabel = "r",
filterFunc = filterFunc)
epsPlot = plotFieldList(dataBase.fluidSpecificThermalEnergy,
xFunction = "%s.magnitude()",
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = "points",
winTitle = "Specific Thermal Energy",
lineTitle = lineTitle,
xlabel = "r",
filterFunc = filterFunc)
fluidPressure = dataBase.newFluidScalarFieldList(0.0, "pressure")
dataBase.fluidPressure(fluidPressure)
PPlot = plotFieldList(fluidPressure,
xFunction = "%s.magnitude()",
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = "points",
winTitle = "Pressure",
lineTitle = lineTitle,
xlabel = "r",
filterFunc = filterFunc)
HPlot = plotFieldList(dataBase.fluidHfield,
xFunction = "%s.magnitude()",
yFunction = "%s.xx**-1",
plotGhosts = plotGhosts,
colorNodeLists = colorNodeLists,
plotStyle = "points",
winTitle = "Smoothing scale",
lineTitle = lineTitle,
xlabel = "r",
filterFunc = filterFunc)
return rhoPlot, velPlot, epsPlot, PPlot, HPlot
#-------------------------------------------------------------------------------
# Overplot the answer on results from plotState.
#-------------------------------------------------------------------------------
def plotAnswer(answerObject, time,
rhoPlot = None,
velPlot = None,
epsPlot = None,
PPlot = None,
APlot = None,
HPlot = None,
x = None):
try:
x, v, u, rho, P, h = answerObject.solution(time, x)
A = None
except:
try:
x, v, u, rho, P, A, h = answerObject.solution(time, x)
except:
x, v, u, rho, P = answerObject.solution(time, x)
A = None
h = None
if rhoPlot is not None:
data = Gnuplot.Data(x, rho,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
rhoPlot.replot(data)
if velPlot is not None:
data = Gnuplot.Data(x, v,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
velPlot.replot(data)
if epsPlot is not None:
data = Gnuplot.Data(x, u,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
epsPlot.replot(data)
if PPlot is not None:
data = Gnuplot.Data(x, P,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
PPlot.replot(data)
if APlot is not None and A:
data = Gnuplot.Data(x, A,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
APlot.replot(data)
if HPlot is not None:
data = Gnuplot.Data(x, h,
with_="lines lt 7 lw 2",
title="Solution",
inline = True)
SpheralGnuPlotCache.append(data)
HPlot.replot(data)
return
#-------------------------------------------------------------------------------
# Plot the node positions
#-------------------------------------------------------------------------------
def plotNodePositions2d(thingy,
xFunction = "%s.x",
yFunction = "%s.y",
plotGhosts = False,
colorNodeLists = True,
colorDomains = False,
title = "",
style = "points",
persist = None):
assert colorNodeLists + colorDomains <= 1
if isinstance(thingy, DataBase2d):
nodeLists = thingy.nodeLists()
else:
nodeLists = thingy
# Gather the node positions across all domains.
# Loop over all the NodeLists.
xNodes = []
yNodes = []
for nodeList in nodeLists:
if plotGhosts:
pos = nodeList.positions().allValues()
else:
pos = nodeList.positions().internalValues()
xNodes.append([eval(xFunction % "x") for x in pos])
yNodes.append([eval(yFunction % "x") for x in pos])
assert len(xNodes) == len(nodeLists)
assert len(xNodes) == len(yNodes)
globalXNodes = mpi.gather(xNodes)
globalYNodes = mpi.gather(yNodes)
if mpi.rank == 0:
assert len(globalXNodes) == mpi.procs
assert len(globalYNodes) == mpi.procs
xlist, ylist = [], []
if colorDomains:
for xDomain, yDomain in zip(globalXNodes, globalYNodes):
assert len(xDomain) == len(nodeLists)
assert len(yDomain) == len(nodeLists)
xlist.append([])
ylist.append([])
for xx in xDomain:
xlist[-1].extend(xx)
for yy in yDomain:
ylist[-1].extend(yy)
assert len(xlist) == mpi.procs
assert len(ylist) == mpi.procs
elif colorNodeLists:
for i in xrange(len(nodeLists)):
xlist.append([])
ylist.append([])
for xDomain, yDomain in zip(globalXNodes, globalYNodes):
assert len(xDomain) == len(nodeLists)
assert len(yDomain) == len(nodeLists)
for i in xrange(len(nodeLists)):
xlist[i].extend(xDomain[i])
ylist[i].extend(yDomain[i])
assert len(xlist) == len(nodeLists)
assert len(ylist) == len(nodeLists)
else:
xlist, ylist = [[]], [[]]
for xDomain, yDomain in zip(globalXNodes, globalYNodes):
print len(xDomain), len(nodeLists)
assert len(xDomain) == len(nodeLists)
assert len(yDomain) == len(nodeLists)
for i in xrange(len(nodeLists)):
xlist[0].extend(xDomain[i])
ylist[0].extend(yDomain[i])
plot = generateNewGnuPlot(persist = persist)
plot("set size square")
plot.title = title
assert len(xlist) == len(ylist)
for x, y in zip(xlist, ylist):
data = Gnuplot.Data(x, y,
with_ = style,
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
return plot
else:
return fakeGnuplot()
#-------------------------------------------------------------------------------
# Plot all the nodes in the given data base, and then color the control/ghost
# nodes of the given boundary condition independently.
#-------------------------------------------------------------------------------
def plotBoundaryNodes(dataBase, boundary):
# First build one set of position pairs for all of the nodes in the
# data base.
positions = []
for nodeList in dataBase.nodeLists():
for r in list(nodeList.positions())[:nodeList.numInternalNodes]:
positions.append((r.x, r.y))
# Now build a list of the control node positions from the boundary
# condition.
controlPositions = []
for nodeList in dataBase.nodeLists():
controlNodes = boundary.controlNodes(nodeList)
for nodeID in controlNodes:
r = nodeList.positions()[nodeID]
controlPositions.append((r.x, r.y))
# Now build a list of the ghost node positions from the boundary
# condition.
ghostPositions = []
for nodeList in dataBase.nodeLists():
ghostNodes = boundary.ghostNodes(nodeList)
for nodeID in ghostNodes:
r = nodeList.positions()[nodeID]
ghostPositions.append((r.x, r.y))
# Finally we can plot these various sets of nodes.
plot = plotXYTuples([positions, controlPositions, ghostPositions])
return plot
#-------------------------------------------------------------------------------
# Plot the given sequences of (x,y) pairs, each with a distinct color.
# [ [(x0,y0), (x1,y1), ...],
# [(x0,y0), (x1,y1), ...],
# .
# .
# .
# [(x0,y0), (x1,y1), ...] ]
#-------------------------------------------------------------------------------
def plotXYTuples(listOfXYTuples):
# Find the (min,max) of X and Y for all sets.
xmin, ymin, xmax, ymax = findPairMinMax(listOfXYTuples[0])
for seq in listOfXYTuples[1:]:
xmin0, ymin0, xmax0, ymax0 = findPairMinMax(seq)
xmin = min(xmin, xmin0)
ymin = min(ymin, ymin0)
xmax = max(xmax, xmax0)
ymax = max(ymax, ymax0)
# Create our plot result.
plot = generateNewGnuPlot()
plot("set size square")
# Loop over the list of sequences of positions.
icolor = 0
for seq in listOfXYTuples:
icolor += 1
# Build the local arrays of x and y.
x = numpy.array([0.0]*len(seq))
y = numpy.array([0.0]*len(seq))
for i in xrange(len(seq)):
x[i] = seq[i][0]
y[i] = seq[i][1]
# Build the gnuplot data.
data = Gnuplot.Data(x, y,
with_ = "points",
inline = True)
SpheralGnuPlotCache.append(data)
# Plot this set of data.
## plot("set linestyle %i lt %i pt 1" % (icolor, icolor))
plot.replot(data)
# That"s it, return the plot.
return plot
#-------------------------------------------------------------------------------
# Find the (min, max) of a set of pairs.
#-------------------------------------------------------------------------------
def findPairMinMax(listOfPairs):
minX, minY = 1e90, 1e90
maxX, maxY = -1e90, -1e90
for pair in listOfPairs:
minX = min(minX, pair[0])
minY = min(minY, pair[1])
maxX = max(maxX, pair[0])
maxY = max(maxY, pair[1])
return minX, minY, maxX, maxY
#-------------------------------------------------------------------------------
# Plot the velocity field as a set of arrows.
# This is maintained here for backward compatibility, as a specialization of
# plotVectorField2d.
#-------------------------------------------------------------------------------
def plotVelocityField2d(dataBase,
plotGhosts = False,
velMultiplier = 1.0,
colorNodeLists = False,
colorDomains = False,
title = ""):
return plotVectorField2d(dataBase,
dataBase.globalVelocity,
plotGhosts,
velMultiplier,
colorNodeLists,
colorDomains,
title)
#-------------------------------------------------------------------------------
# Plot the node spacing in 1D.
#-------------------------------------------------------------------------------
def plotNodeSpacing1d(dataBase):
pos = dataBase.globalPosition
xvals = []
for ifield in xrange(len(pos)):
xvals += [pos[ifield][i].x for i in xrange(pos[ifield].numInternalElements)]
xvals = mpi.allreduce(xvals, mpi.SUM)
xvals.sort()
deltas = [xvals[i+1] - xvals[i] for i in xrange(len(xvals) - 1)] + [xvals[-1] - xvals[-2]]
plot = generateNewGnuPlot()
d = Gnuplot.Data(xvals, deltas, with_="lines")
plot.plot(d)
return plot
#-------------------------------------------------------------------------------
# Plot an arbitrary vector field as a set of arrows.
#-------------------------------------------------------------------------------
def plotVectorField2d(dataBase, fieldList,
plotGhosts = False,
vectorMultiplier = 1.0,
colorNodeLists = False,
colorDomains = False,
title = ""):
assert colorNodeLists + colorDomains <= 1
# Gather the node positions and vectors across all domains.
# Loop over all the NodeLists.
localNumNodes = []
xNodes = []
yNodes = []
vxNodes = []
vyNodes = []
for i in xrange(dataBase.numNodeLists):
nodeList = dataBase.nodeLists()[i]
assert i < fieldList.numFields
vectorField = fieldList[i]
if plotGhosts:
n = nodeList.numNodes
else:
n = nodeList.numInternalNodes
localNumNodes.append(n)
xNodes += numpy.array(map(lambda x: x.x, list(nodeList.positions())[:n]))
yNodes += numpy.array(map(lambda x: x.y, list(nodeList.positions())[:n]))
vxNodes += numpy.array(map(lambda x: x.x, list(vectorField)[:n]))*vectorMultiplier
vyNodes += numpy.array(map(lambda x: x.y, list(vectorField)[:n]))*vectorMultiplier
assert len(xNodes) == len(yNodes) == len(vxNodes) == len(vyNodes)
numDomainNodes = [len(xNodes)]
numNodesPerDomain = mpi.gather(numDomainNodes)
globalNumNodes = mpi.gather(localNumNodes)
globalXNodes = mpi.gather(xNodes)
globalYNodes = mpi.gather(yNodes)
globalVxNodes = mpi.gather(vxNodes)
globalVyNodes = mpi.gather(vyNodes)
if mpi.rank == 0:
plot = generateNewGnuPlot()
plot("set size square")
plot.title = title
if colorDomains:
cumulativeN = 0
for domain in xrange(len(numNodesPerDomain)):
n = numNodesPerDomain[domain]
x = numpy.array(globalXNodes[cumulativeN:cumulativeN + n])
y = numpy.array(globalYNodes[cumulativeN:cumulativeN + n])
vx = numpy.array(globalVxNodes[cumulativeN:cumulativeN + n])
vy = numpy.array(globalVyNodes[cumulativeN:cumulativeN + n])
cumulativeN += n
## plot("set linestyle %i lt %i pt %i" % (domain + 1,
## domain + 1,
## domain + 1))
data = Gnuplot.Data(x, y, vx, vy,
with_ = "vector ls %i" % (domain + 1),
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
elif colorNodeLists:
cumulativeN = 0
for i in xrange(len(globalNumNodes)):
n = globalNumNodes[i]
if n > 0:
iNodeList = i % dataBase.numNodeLists
x = numpy.array(globalXNodes[cumulativeN:cumulativeN + n])
y = numpy.array(globalYNodes[cumulativeN:cumulativeN + n])
vx = numpy.array(globalVxNodes[cumulativeN:cumulativeN + n])
vy = numpy.array(globalVyNodes[cumulativeN:cumulativeN + n])
cumulativeN += n
## plot("set linestyle %i lt %i pt %i" % (iNodeList + 1,
## iNodeList + 1,
## iNodeList + 1))
data = Gnuplot.Data(x, y, vx, vy,
with_ = "vector ls %i" % (iNodeList + 1),
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
else:
x = numpy.array(globalXNodes)
y = numpy.array(globalYNodes)
vx = numpy.array(globalVxNodes)
vy = numpy.array(globalVyNodes)
data = Gnuplot.Data(x, y, vx, vy,
with_ = "vector",
inline = True)
plot.replot(data)
SpheralGnuPlotCache.append(data)
return plot
else:
SpheralGnuPlotCache.append(data)
#-------------------------------------------------------------------------------
# Generate a regularly spaced sampling of the given FieldList
# The answer is returned in a 2-D numpy array.
#-------------------------------------------------------------------------------
def gridSample(fieldList,
zFunction = "%s",
nx = 100,
ny = 100,
xmin = None,
xmax = None,
ymin = None,
ymax = None):
assert nx > 0 and ny > 0
# Set up our return value array.
xValues = numpy.array([[0.0]*nx]*ny)
yValues = numpy.array([[0.0]*nx]*ny)
zValues = numpy.array([[0.0]*nx]*ny)
# Gather the fieldList info across all processors to process 0.
localNumNodes = []
localX = []
localY = []
for ifield in xrange(fieldList.numFields):
field = fieldList[ifield]
n = field.nodeList().numNodes
localNumNodes.append(n)
for r in field.nodeList().positions():
localX.append(r.x)
localY.append(r.y)
globalNumNodes = mpi.gather(localNumNodes)
globalX = mpi.gather(localX)
globalY = mpi.gather(localY)
# If the user did not specify the sampling volume, then find the min and
# max node positions.
if xmin == None:
xmin = min(localX)
if ymin == None:
ymin = min(localY)
if xmax == None:
xmax = max(localX)
if ymax == None:
ymax = max(localY)
xmin = mpi.allreduce(xmin, mpi.MIN)
ymin = mpi.allreduce(ymin, mpi.MIN)
xmax = mpi.allreduce(xmax, mpi.MAX)
ymax = mpi.allreduce(ymax, mpi.MAX)
assert xmax > xmin
assert ymax > ymin
# Figure out the sizes of the bins we're going to be sampling in
dx = (xmax - xmin)/nx
dy = (ymax - ymin)/ny
# Loop over all the grid sampling positions, and figure out this processors
# contribution.
for iy in xrange(ny):
for ix in xrange(nx):
xValues[iy][ix] = xmin + (ix + 0.5)*dx
yValues[iy][ix] = ymin + (iy + 0.5)*dy
r = Vector2d(xValues[iy][ix], yValues[iy][ix])
z = fieldList.sample(r)
localZ = eval(zFunction % "z")
globalZ = mpi.reduce(localZ, mpi.SUM)
if mpi.rank == 0:
print "%i %i %i %s %g %g" % (mpi.rank, ix, iy, r, z, localZ)
print "%i %g" % (mpi.rank, globalZ)
zValues[iy][ix] = globalZ
return xValues, yValues, zValues
#-------------------------------------------------------------------------------
# Plot the energy history of the given conservation object.
#-------------------------------------------------------------------------------
def plotEHistory(conserve):
if mpi.rank == 0:
t = conserve.timeHistory
E = conserve.EHistory
KE = conserve.KEHistory
TE = conserve.TEHistory
UE = conserve.EEHistory
Edata = Gnuplot.Data(t, E,
with_ = "lines",
title = "Total Energy",
inline = True)
KEdata = Gnuplot.Data(t, KE,
with_ = "lines",
title = "Kinetic Energy",
inline = True)
TEdata = Gnuplot.Data(t, TE,
with_ = "lines",
title = "Thermal Energy",
inline = True)
UEdata = Gnuplot.Data(t, UE,
with_ = "lines",
title = "Potential Energy",
inline = True)
plot = generateNewGnuPlot()
plot.replot(Edata)
plot.replot(KEdata)
plot.replot(TEdata)
plot.replot(UEdata)
plot.replot()
SpheralGnuPlotCache.extend([Edata, KEdata, TEdata, UEdata])
return plot
else:
return fakeGnuplot()
#-------------------------------------------------------------------------------
# Plot the linear momentum history of the given conservation object.
#-------------------------------------------------------------------------------
def plotpmomHistory(conserve):
if mpi.rank == 0:
t = conserve.timeHistory
p = conserve.pmomHistory
px = [x.x for x in p]
py = [x.y for x in p]
pz = [x.z for x in p]
pmag = [x.magnitude() for x in p]
pxdata = Gnuplot.Data(t, px,
with_ = "lines",
title = "x momentum",
inline = True)
pydata = Gnuplot.Data(t, py,
with_ = "lines",
title = "y momentum ",
inline = True)
pzdata = Gnuplot.Data(t, pz,
with_ = "lines",
title = "z momentum",
inline = True)
pmagdata = Gnuplot.Data(t, pmag,
with_ = "lines",
title = "total momentum",
inline = True)
plot = generateNewGnuPlot()
plot.replot(pxdata)
plot.replot(pydata)
plot.replot(pzdata)
plot.replot(pmagdata)
plot.replot()
SpheralGnuPlotCache.extend([pxdata, pydata, pzdata, pmagdata])
return plot
else:
return fakeGnuplot()
#-------------------------------------------------------------------------------
# Plot a polygon.
#-------------------------------------------------------------------------------
def plotPolygon(polygon,
plotVertices = True,
plotFacets = True,
plotNormals = False,
plotCentroid = False,
plot = None,
persist = False,
plotLabels = True):
px = []
py = []
for v in polygon.vertices:
px.append(v.x)
py.append(v.y)
fx = []
fy = []
fdx = []
fdy = []
nx = []
ny = []
ndx = []
ndy = []
for f in polygon.facets:
dr = f.point2 - f.point1
hdr = dr/2.0
fx.append(f.point1.x)
fy.append(f.point1.y)
fdx.append(dr.x)
fdy.append(dr.y)
nx.append(fx[-1] + hdr.x)
ny.append(fy[-1] + hdr.y)
ndx.append(f.normal.x)
ndy.append(f.normal.y)
if plot is None:
plot = generateNewGnuPlot(persist)
if plotLabels:
vlabel, flabel, nlabel = "Vertices", "Facets", "Normals"
else:
vlabel, flabel, nlabel = None, None, None
dataPoints = Gnuplot.Data(px, py,
with_ = "points pt 1 ps 2",
title = vlabel,
inline = True)
dataFacets = Gnuplot.Data(fx, fy, fdx, fdy,
with_ = "vectors",
title = flabel,
inline = True)
dataNormals = Gnuplot.Data(nx, ny, ndx, ndy,
with_ = "vectors",
title = nlabel,
inline = True)
if plotVertices:
plot.replot(dataPoints)
if plotFacets:
plot.replot(dataFacets)
if plotNormals:
plot.replot(dataNormals)
if plotCentroid:
c = polygon.centroid
dataCentroid = Gnuplot.Data([c.x], [c.y],
with_ = "points pt 2 ps 2",
title = "Centroid",
inline = True)
plot.replot(dataCentroid)
SpheralGnuPlotCache.extend([dataPoints, dataFacets, dataNormals, plot])
return plot
#-------------------------------------------------------------------------------
# Plot a PolygonalMesh
#-------------------------------------------------------------------------------
def plotPolygonalMesh(mesh,
persist = False):
polylocal = []
for izone in xrange(mesh.numZones):
zone = mesh.zone(izone)
polylocal.append([mesh.node(i).position() for i in zone.nodeIDs])
polylocal[-1].append(polylocal[-1][0])
assert len(polylocal) == mesh.numZones
p = generateNewGnuPlot(persist)
for sendProc in xrange(mpi.procs):
polys = mpi.bcast(polylocal, root=sendProc)
for poly in polys:
p.replot(Gnuplot.Data([x.x for x in poly], [x.y for x in poly],
with_ = "lines lt %i lw 2" % 1,
title = None,
inline = True))
return p
## edges0 = [(mesh.node(mesh.edge(i).node1ID).position(), mesh.node(mesh.edge(i).node2ID).position())
## for i in xrange(mesh.numEdges)]
## p = generateNewGnuPlot()
## datas = []
## for sendProc in xrange(mpi.procs):
## edges = mpi.bcast(edges0, root=sendProc)
## for edge in edges:
## datas.append(Gnuplot.Data([edge[0].x, edge[1].x], [edge[0].y, edge[1].y],
## with_ = "lines %s" % linetype,
## title = None,
## inline = True))
## p.replot(datas[-1])
## p.datas = datas
## return p
| 42,569 | 11,869 |
a = int(input())
if a < 10 :
print("small")
else :
print("big")
| 77 | 33 |
import json
import base64
import random
import logging
from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
from phe import paillier, EncryptedNumber, PaillierPublicKey
import client.dbhandler as dbhandler
from client.exceptions import WrongPin, UnknownUser
logger = logging.getLogger('client')
# for salting pins of users
SALT = b'=sNmXf\xd6\xefe\xf8\xd0\x10\xe5\xb2\xf3o\x01|\xf3\x99\xbf\xd6\x88\x0c\xb6\x9b\x08\xb3\xac\xf0\xb9g'
def generate_verification_code():
"""
Generates a list of random numbers which is used to transform
the fingerprint vector to protect against malicious users
who have access to the fingerprint data of the user they want to impersonate.
:return: user verification code
"""
user_vcode = random.sample(range(1, 255), 4)
return user_vcode
def generate_shuffle_code():
"""
Returns a random shuffle code.
:return: shuffle code
"""
user_shuffle_code = random.randint(1000, 9999)
return user_shuffle_code
def enrollment_transform(user_fingerprint, user_vcode, user_shuffle_code):
"""
Performs fingerprint transform during enrollment
:param user_fingerprint: fingerprint feature vector
:param user_vcode: verification code of the user
:return: transformed fingerprint vector
"""
transformed_fingerprint = user_fingerprint + user_vcode
sumOfXiSquare = sum(x*x for x in user_fingerprint)
sumOfViSquare = sum(v*v for v in user_vcode)
transformed_fingerprint.extend([1, 1, sumOfXiSquare, sumOfViSquare])
random.Random(user_shuffle_code).shuffle(transformed_fingerprint)
return transformed_fingerprint
def string_encrypt(pin, plaintext):
"""
Performs AES encryption based on a pin.
Used for storing paillier key pair and verification code of a user.
:param pin: 4 digit integer string
:param plaintext: JSON dumps of reaquired data to be encrypted
:return: ciphertext and initialization vector
"""
key = PBKDF2(pin, SALT, dkLen=32)
data = plaintext.encode('utf-8')
# CFB basically doesn't require padding to maintain block size
cipher_encrypt = AES.new(key, AES.MODE_CFB)
ciphered_bytes = cipher_encrypt.encrypt(data)
iv = cipher_encrypt.iv
return ciphered_bytes, iv
def string_decrypt(pin, iv, ciphertext):
"""
Performs AES decryption on a ciphertext given a pin and iv.
:param pin: 4 digit integer string
:param iv: Initialization vector returned during encryption
:param ciphertext: encrypted cipher text
:return: decrypted string data
"""
key = PBKDF2(pin, SALT, dkLen=32)
cipher_decrypt = AES.new(key, AES.MODE_CFB, iv)
deciphered_bytes = cipher_decrypt.decrypt(ciphertext)
try:
decrypted_data = deciphered_bytes.decode('utf-8')
except UnicodeDecodeError as e:
logger.info(f'Incorrect pin')
return None
return decrypted_data
def paillier_encrypt_vector(pub_key, transformed_fingerprint):
"""
Performs encryption on the transformmed fingerprint
using the paillier cryptosystem.
:param pub_key: public key of the user
:param transformed_fingerprint: a fingerprint feature vector
:return: encrypted feature vector
"""
encrypted_fingerprint = [pub_key.encrypt(
feature) for feature in transformed_fingerprint]
serialized_fingerprint = [] # readable form of the ciphertext
for entry in encrypted_fingerprint:
serialized_fingerprint.append(entry._EncryptedNumber__ciphertext)
logger.debug(json.dumps(serialized_fingerprint, indent=2))
return encrypted_fingerprint
def store_credentials(user_roll_no, user_pin, user_tid, user_pub_key, user_priv_key, user_vcode, user_shuffle_code):
"""
Store credentials of the user in an encrypted format.
:param user_roll_no: user roll no
:param user_pin: user 4 digit integer pin
:param user_tid: user fingerprint id stored on the server
:param user_pub_key: user paillier public key
:param user_priv_key: user paillier private key
:param user_vcode: user verification code
"""
data = dbhandler.read_data('userdata.json')
user_data = {
'tid': user_tid,
'vcode': user_vcode,
'scode': user_shuffle_code,
'n': user_pub_key.n,
'p': user_priv_key.p,
'q': user_priv_key.q
}
user_data_string = json.dumps(user_data)
ciphertext, iv = string_encrypt(user_pin, user_data_string)
store_data = {
'roll_no': user_roll_no,
'ciphertext': base64.b64encode(ciphertext).decode('utf-8'),
'iv': base64.b64encode(iv).decode('utf-8')
}
data.append(store_data)
dbhandler.write_data(data, 'userdata.json')
logger.info(f'User data stored: {user_roll_no}')
def retrieve_credentials(user_roll_no, user_pin):
"""
Fetch and decrypt encrypted user data stored in the database
:param user_roll_no: user roll number
:param user_pin: user pin
:return: decrypted data
"""
data = dbhandler.read_data('userdata.json')
ciphertext = None
iv = None
flag = 0
for user in data:
if user['roll_no'] == user_roll_no:
ciphertext = base64.b64decode(user['ciphertext'].encode('utf-8'))
iv = base64.b64decode(user['iv'].encode('utf-8'))
flag = 1
break
if flag == 0:
print(f'Unknown user: {user_roll_no}')
raise UnknownUser
return None
user_data = string_decrypt(user_pin, iv, ciphertext)
if not user_data:
print(f'Incorrect pin: {user_roll_no}')
raise WrongPin
return None
user_data = json.loads(user_data)
return user_data
def verification_transform(user_fingerprint, user_vcode, user_shuffle_code):
"""
Performs transformation on the fingerprint feature vector
required during verification.
:param user_fingerprint: fingerprint feature vector
:param user_vcode: verification code of the user
:return: transformed fingerprint
"""
# is not this same as enrollment_transform
transformed_fingerprint = user_fingerprint + user_vcode
transformed_fingerprint = [-2*n for n in transformed_fingerprint]
sumOfYiSquare = sum(y*y for y in user_fingerprint)
sumOfViSquare = sum(v*v for v in user_vcode)
transformed_fingerprint.extend([sumOfYiSquare, sumOfViSquare, 1, 1])
random.Random(user_shuffle_code).shuffle(transformed_fingerprint)
return transformed_fingerprint
| 6,484 | 2,153 |
from .config import config
from .tree import tree
| 50 | 13 |
"""
Implements the Kemeny Rule and various heuristics
"""
import time
import datetime
from itertools import combinations, permutations
from multiprocessing import Pool
import functools
from collections import defaultdict
from matrix import generate_zeros_matrix, matrix_multiplication
NUM_WORKERS = 2
STATIONARY_DISTRIBUTION_ITERATIONS = 1000
def kendall_tau_distance(ranking_a, ranking_b):
"""
Determines the Kendell Tau Distance between two orderings
"""
distance = 0
num_candidates = len(ranking_a)
pairs = combinations(range(1, num_candidates + 1), 2)
for alt_x, alt_y in pairs:
a_order = ranking_a.index(alt_x) - ranking_a.index(alt_y)
b_order = ranking_b.index(alt_x) - ranking_b.index(alt_y)
if a_order * b_order < 0:
distance += 1
return distance
def calculate_ranking_score(ranking, profile):
"""
Calculates the ranking score for a particular strict ordering
"""
ranking_score = 0
for profile_ranking in profile:
ranking_score += kendall_tau_distance(ranking, profile_ranking)
return ranking_score
def kemeny_rule(profile, num_workers=1):
"""
Implements the kemeny rule by calculating all Kendell-Tau distances
"""
print('\nApplying the Kemeny Rule to the Profile...')
# Start timer
time_start = time.perf_counter()
num_candidates = len(profile[0])
ranking_scores = []
rank_permutations = list(permutations(range(1, num_candidates + 1)))
calculate_scores = functools.partial(calculate_ranking_score, profile=profile)
with Pool(num_workers) as worker_pool:
ranking_scores = worker_pool.map(calculate_scores, rank_permutations)
min_ranking_score = min(ranking_scores)
win_idx = [index for index, score in enumerate(ranking_scores) if score == min_ranking_score]
print("The winning ranking(s) are as follows: ")
for index in win_idx:
winning_ranking = rank_permutations[index]
winning_ranking_stringified = [str(i) for i in winning_ranking]
print(", ".join(winning_ranking_stringified))
# Calculate time required to finish
time_finish = time.perf_counter()
time_elapsed = datetime.timedelta(seconds = (time_finish - time_start))
print(f"Applying the Kemeny Rule took {time_elapsed}")
def determine_pairwise_victories(profile):
"""
Determines the pairwise victories for candidates
Returns a dictionary indexed by tuples of candidates
"""
pairwise_victories = defaultdict(int)
num_candidates = len(profile[0])
candidiate_pairs = list(permutations(range(1, num_candidates + 1), 2))
for pair in candidiate_pairs:
for vote in profile:
if vote.index(pair[0]) < vote.index(pair[1]):
pairwise_victories[pair] += 1
return pairwise_victories
def create_transition_matrix(pairwise_victories, num_candidates, num_votes, mc_type):
"""
Generates a transition matrix based on the MC heuristic type
Type 1:
The transition probability of a to b is:
1 / # Candidates if b is preferred to a at some point
0 otherwise
The transition probability from a to a is 1 - Sum of all other transitions
Type 2:
The transition probability of a to b is:
1 / # Candidates if the majority of ballots prefer b to a
0 otherwise
The transition probability from a to a is 1 - Sum of all other transitions
Type 3:
The transition probability of a to b is:
Summation of all orderings where
sum(orderings where b is preferred to a) / Orderings * candidates
The transition probability from a to a is 1 - Sum of all other transitions
"""
# Put 0's on transition matrix
transition_matrix = generate_zeros_matrix(num_candidates, num_candidates)
# Populate transition probabilities in the matrix
candidiate_pairs = list(permutations(range(1, num_candidates + 1), 2))
# Based on preferences of a and b assign probability of a -> b
if mc_type == 1:
for first, second in candidiate_pairs:
if pairwise_victories[(second, first)] > 0:
probability = 1 / num_candidates
else:
probability = 0
transition_matrix[first - 1][second - 1] = probability
elif mc_type == 2:
for first, second in candidiate_pairs:
if pairwise_victories[(second, first)] > (num_votes // 2):
probability = 1 / num_candidates
else:
probability = 0
transition_matrix[first - 1][second - 1] = probability
elif mc_type == 3:
for first, second in candidiate_pairs:
probability = pairwise_victories[(second, first)] / (num_votes * num_candidates)
transition_matrix[first - 1][second - 1] = probability
# Determine the probability of a self-transition
for candidate in range(1, num_candidates + 1):
self_transition_probability = 1 - sum(transition_matrix[candidate - 1])
transition_matrix[candidate - 1][candidate - 1] = self_transition_probability
return transition_matrix
def markov_heuristic(profile, mc_type):
"""
Applies the Markov Chain Heuristic to a Profile using a transition function of mc_type
"""
print(f'\nApplying the MC{mc_type} Markov Heuristic to the Profile...')
# Start timer
time_start = time.perf_counter()
num_candidates = len(profile[0])
num_votes = len(profile)
# Determine pairwise victories for each pair of candidates
pairwise_wins = determine_pairwise_victories(profile)
transition_matrix = create_transition_matrix(pairwise_wins, num_candidates, num_votes, mc_type)
# Put the probability matrix to a high power to find the stationary distribution
stationary_distribution = transition_matrix.copy()
for _ in range(STATIONARY_DISTRIBUTION_ITERATIONS):
stationary_distribution = matrix_multiplication(stationary_distribution, transition_matrix)
final_probabilities = stationary_distribution[0]
prob_tuples = [(idx + 1, prob) for idx, prob in enumerate(final_probabilities)]
prob_tuples.sort(key=lambda x: x[1], reverse=True)
final_ranking = [pair[0] for pair in prob_tuples]
print("The winning ranking is as follows: ")
winning_ranking_stringified = [str(i) for i in final_ranking]
print(", ".join(winning_ranking_stringified))
# Calculate time required to finish
time_finish = time.perf_counter()
time_elapsed = datetime.timedelta(seconds = (time_finish - time_start))
print(f"Applying the MC{mc_type} Markov Model took {time_elapsed}")
| 6,622 | 2,055 |
try:
import fiona
from fiona.crs import from_epsg
import utilities.fiona_supported_drivers as fsd
import os
except Exception as e:
print(f"{e}")
quit(1)
def write_spatial(file=None, directory=None, data=None, **meta):
try:
if not data:
raise ValueError(f"No data to write.")
if not os.path.exists(directory):
raise ValueError(f"Target directory doesn't exist.")
if "driver" not in meta:
raise ValueError(f"Missing driver.")
if "crs" not in meta:
raise ValueError(f"Missing CRS.")
if "schema" not in meta:
raise ValueError(f"Missing schema.")
if meta["driver"] not in fsd.file_extensions:
raise ValueError(f"Invalid driver.")
target = os.path.join(directory, f"{file}.{fsd.file_extensions[meta['driver']]}")
meta["crs"] = from_epsg(meta["crs"])
for k, v in meta["schema"]["properties"].items():
if v == "string":
meta["schema"]["properties"][k] = "str"
elif v == "double":
meta["schema"]["properties"][k] = "float"
with fiona.open(target, "w", **meta) as fh:
for feature in data:
fh.write(feature)
except Exception as e:
print(f"{e}")
quit(1)
| 1,331 | 406 |
# -*- coding: utf-8 -*-
import pytest
from .utils import QATOUCH_MARK, MissingQatouchData, ExpectedIntegerValue
from .qatouch import QatouchTestResult
__QATOUCH_TEST_RSESULT = None
___Enable_PLUGIN = None
def pytest_addoption(parser):
group = parser.getgroup("QaTouch")
def add_option(option, dest, help, default=None, type=None, **kwargs):
group.addoption(option, dest=dest, default=default, **kwargs)
parser.addini(dest, default=default, type=type, help=help)
add_option(
option="--qatouch",
action="store",
dest="qatouch",
default="False",
help="Enable the qatouch plugin (Set ['True', 'False'])",
)
add_option(
option="--qatouch-subdomain",
action="store",
dest="qatouch-subdomain",
help="Your qatouch submodule name (i.e <your_subdomain>.qatouch.com)",
)
add_option(
"--qatouch-api-token",
action="store",
dest="qatouch-api-token",
help="Your qatouch API token",
)
add_option(
"--qatouch-project-key",
action="store",
dest="qatouch-project-key",
help="The qatouch project key",
)
add_option(
"--qatouch-testrun-key",
action="store",
dest="qatouch-testrun-key",
help="The testrun key in qatouch project",
)
def pytest_configure(config):
config.addinivalue_line("markers", f"{QATOUCH_MARK}(TR): Mark test")
global ___Enable_PLUGIN
___Enable_PLUGIN = (
str(config.getoption("--qatouch")).lower() == "true"
or str(config.getini("qatouch")).lower() == "true"
)
if ___Enable_PLUGIN:
def get_option(option: str):
value = config.getoption("--" + option) or config.getini(option)
if value is None:
raise MissingQatouchData(
f"The option ['--'{option}] or the ini option[{option}] not set"
)
return value
global __QATOUCH_TEST_RSESULT
__QATOUCH_TEST_RSESULT = QatouchTestResult(
domain=get_option("qatouch-subdomain"),
api_token=get_option("qatouch-api-token"),
project_key=get_option("qatouch-project-key"),
testrun_key=get_option("qatouch-testrun-key"),
)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
test_result = outcome.get_result()
qa_marker = item.get_closest_marker(QATOUCH_MARK)
if __QATOUCH_TEST_RSESULT and qa_marker:
if test_result.when == "call":
__add_test(qa_marker, test_result)
elif test_result.when in ("setup", "teardown") and test_result.outcome != "passed":
__add_test(qa_marker, test_result)
def pytest_sessionfinish():
global __QATOUCH_TEST_RSESULT
if ___Enable_PLUGIN and __QATOUCH_TEST_RSESULT:
__QATOUCH_TEST_RSESULT.push_results_to_qatouch()
__QATOUCH_TEST_RSESULT = None
def __add_test(qa_marker, test_result):
if "TR" in qa_marker.kwargs:
tr_value = qa_marker.kwargs["TR"]
if not isinstance(tr_value, int):
raise ExpectedIntegerValue(
f"Expected the TR value to be a valid integer value bug insted got {tr_value} of type {type(tr_value)}"
)
else:
raise MissingQatouchData(f"Expected to have a TR and its value, but not found")
__QATOUCH_TEST_RSESULT.push_testcase_to_results(
testcase_id=tr_value, testcase_status=test_result.outcome
)
| 3,536 | 1,209 |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/111_models.ROCKET.ipynb (unless otherwise specified).
__all__ = ['RocketClassifier', 'load_rocket', 'RocketRegressor']
# Cell
import sklearn
from sklearn.linear_model import RidgeClassifierCV, RidgeCV
from sklearn.metrics import make_scorer
from ..imports import *
from ..data.external import *
from .layers import *
warnings.filterwarnings("ignore", category=FutureWarning)
# Cell
class RocketClassifier(sklearn.pipeline.Pipeline):
"""Time series classification using ROCKET features and a linear classifier"""
def __init__(self, num_kernels=10_000, normalize_input=True, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):
"""
RocketClassifier is recommended for up to 10k time series.
For a larger dataset, you can use ROCKET (in Pytorch).
scoring = None --> defaults to accuracy.
Rocket args:
num_kernels : int, number of random convolutional kernels (default 10,000)
normalize_input : boolean, whether or not to normalise the input time series per instance (default True)
random_state : Optional random seed (default None)
"""
try:
import sktime
from sktime.transformations.panel.rocket import Rocket
except ImportError:
print("You need to install sktime to be able to use RocketClassifier")
self.steps = [('rocket', Rocket(num_kernels=num_kernels, normalise=normalize_input, random_state=random_state)),
('ridgeclassifiercv', RidgeClassifierCV(alphas=alphas, normalize=normalize_features, scoring=scoring,
class_weight=class_weight, **kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname='Rocket', path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
# Cell
def load_rocket(fname='Rocket', path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'rb') as input:
output = pickle.load(input)
return output
# Cell
class RocketRegressor(sklearn.pipeline.Pipeline):
"""Time series regression using ROCKET features and a linear regressor"""
def __init__(self, num_kernels=10_000, normalize_input=True, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):
"""
RocketRegressor is recommended for up to 10k time series.
For a larger dataset, you can use ROCKET (in Pytorch).
scoring = None --> defaults to r2.
Args:
num_kernels : int, number of random convolutional kernels (default 10,000)
normalize_input : boolean, whether or not to normalise the input time series per instance (default True)
random_state : Optional random seed (default None)
"""
try:
import sktime
from sktime.transformations.panel.rocket import Rocket
except ImportError:
print("You need to install sktime to be able to use RocketRegressor")
self.steps = [('rocket', Rocket(num_kernels=num_kernels, normalise=normalize_input, random_state=random_state)),
('ridgecv', RidgeCV(alphas=alphas, normalize=normalize_features, scoring=scoring, **kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname='Rocket', path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL) | 4,066 | 1,212 |
"""Language selector handler
Todo:
* Use internacionalization
* Add more languages
"""
english = {
"INTRO_MESSAGE" : "Welcome to CheatSheet Maker",
"MAIN_MENU_OPTIONS" : { 1: "Create sheet",
2: "Export (NOT CODED YET)",
3: "Help (NOT CODED YET)",
4: "Exit",
},
"MENU_MESSAGE" : "Type the number to choose your option.",
"CONFIG_SHEET_MESSAGE1" : "Building the basic layout... answer the next questions.",
"CONFIG_SHEET_MESSAGE2" : "How many columns your sheet will have?",
"CONFIG_SHEET_MESSAGE3" : "Which color style do you prefer?",
"CONFIG_SHEET_OPTIONS1" : { 1: "What is your sheet title? ('CheatSheet' is added automatically)"
},
"CONFIG_SHEET_OPTIONS2" : { 1: "1 main column",
2: "2 main columns",
3: "3 main columns"
},
"CONFIG_SHEET_OPTIONS3" : { 1: "Orange",
2: "Black and white",
3: "Red",
4: "Yellow",
5: "Green",
6: "Blue",
},
"HEADER_MESSAGE" : "Building the header... answer the next questions.",
"HEADER_OPTIONS" : { 1: "What is the author name?"
},
"FOOTER_MESSAGE" : "Building the footer... answer the next questions.",
"FOOTER_OPTIONS1" : { 1: "What is the author picture url?"
},
"FOOTER_OPTIONS2" : { 1: "What is the author website url? (use http://)"
},
"FOOTER_OPTIONS3" : { 1: "What is the sponsor name?"
},
"FOOTER_OPTIONS4" : { 1: "What is the sponsor webite url? (use http://)"
},
"BLOCK_MESSAGE" : "Building the blocks... answer the next questions.",
"BLOCK_OPTIONS" : { 1: "Create text block",
2: "Create block with rows",
0: "Done"
},
"BLOCK_ROWS_MESSAGE1" : "Building block with rows... answer the next questions.",
"BLOCK_ROWS_MESSAGE2" : "In what main column do you want to build it?",
"BLOCK_ROWS_OPTIONS1" : { 1: "What is the title of the block?"
},
"BLOCK_ROWS_OPTIONS2" : { 1: "How many rows does it have?"
},
"BLOCK_ROWS_OPTIONS3" : { 1: "What is the text of each row? (text row1. # text row2. # text row3)"
},
"TEXT_BLOCK_MESSAGE" : "Building text block... answer the next questions.",
"TEXT_BLOCK_EXTRA" : "main column",
"TEXT_BLOCK_OPTIONS1" : { 1: "What is the title of the block?"
},
"TEXT_BLOCK_OPTIONS2" : { 1: "What is the text for the block (use <br> for new line or any html tag for formatting)"
},
"END_MESSAGE" : "Thanks for using CheatSheet Maker. Feel free to share your ideas at http://github.com/cosme12/cheasheet-maker",
"EXIT_MESSAGE" : "Press any key to exit",
"INVALID_INPUT_MESSAGE" : "Invalid input. Try again.",
}
espanol = {
"INTRO_MESSAGE" : "Bienvenido a CheatSheet Maker",
"MAIN_MENU_OPTIONS" : { 1: "Crear hoja",
2: "Exportar (NOT CODED YET)",
3: "Ayuda (NOT CODED YET)",
4: "Salir",
},
"MENU_MESSAGE" : "Escribe el numero para elegir tu opcion",
"CONFIG_SHEET_MESSAGE1" : "Cosntruyendo la estructura basica... responde las siguientes preguntas.",
"CONFIG_SHEET_MESSAGE2" : "Cuantas columnas tiene tu hoja?",
"CONFIG_SHEET_MESSAGE3" : "Que color de estilo prefieres?",
"CONFIG_SHEET_OPTIONS1" : { 1: "Cual es el titulo de tu hoja? ('CheatSheet' se agrega automaticamente)"
},
"CONFIG_SHEET_OPTIONS2" : { 1: "1 columna principal",
2: "2 columnas principales",
3: "3 columnas principales"
},
"CONFIG_SHEET_OPTIONS3" : { 1: "Naranja",
2: "Negro y Blanco",
3: "Rojo",
4: "Amarillo",
5: "Verde",
6: "Azul",
},
"HEADER_MESSAGE" : "Cosntruyendo el encabezado... contesta las siguientes preguntas.",
"HEADER_OPTIONS" : { 1: "Cual es el nombre del autor?"
},
"FOOTER_MESSAGE" : "Construyendo el pie de pagina... contesta las siguientes preguntas.",
"FOOTER_OPTIONS1" : { 1: "Cual es la url de la imagen del autor?"
},
"FOOTER_OPTIONS2" : { 1: "Cual es la url del sitio web del autor? (use http://)"
},
"FOOTER_OPTIONS3" : { 1: "Cual es el nombre del sponsor?"
},
"FOOTER_OPTIONS4" : { 1: "Cual es la url del sitio web del sponsor? (use http://)"
},
"BLOCK_MESSAGE" : "Construyendo los bloques... contesta las siguientes preguntas.",
"BLOCK_OPTIONS" : { 1: "Crear bloque de texto",
2: "Crear bloque con filas",
0: "Fin"
},
"BLOCK_ROWS_MESSAGE1" : "Construyendo bloque con filas... contesta las siguientes preguntas.",
"BLOCK_ROWS_MESSAGE2" : "En que columna principal quieres construilo?",
"BLOCK_ROWS_OPTIONS1" : { 1: "Cual es el titulo del bloque?"
},
"BLOCK_ROWS_OPTIONS2" : { 1: "Cuantas filas tiene?"
},
"BLOCK_ROWS_OPTIONS3" : { 1: "Cual es el texto de cada fila? (texto fila1. # texto fila2. # texto fila3.)"
},
"TEXT_BLOCK_MESSAGE" : "Construyendo bloque de texto... contesta las siguientes preguntas.",
"TEXT_BLOCK_EXTRA" : "columna principal",
"TEXT_BLOCK_OPTIONS1" : { 1: "Cual es el titulo del bloque?"
},
"TEXT_BLOCK_OPTIONS2" : { 1: "Cual es el texto para el bloque? (usa <br> para nueva linea o cualquier html tag para dar formato)"
},
"END_MESSAGE" : "Gracias por utilizar CheatSheet Maker. Comparte tus ideas en http://github.com/cosme12/cheasheet-maker",
"EXIT_MESSAGE" : "Presiona cualquier tecla para salir",
"INVALID_INPUT_MESSAGE" : "Entrada invalida. Pruba otra vez.",
} | 6,596 | 2,094 |
import pytest
import datetime
@pytest.fixture
def freeze_datetime(monkeypatch):
"""Patch datetime.now function to return fixed timestamp."""
original_datetime = datetime.datetime
class FrozenDateTimeMeta(type):
"""Meta class for FrozenDateTime class."""
def __instancecheck__(self, instance):
return isinstance(instance, (original_datetime, FrozenDateTime))
class FrozenDateTime(datetime.datetime):
"""Use freeze method to control result of datetime.datetime.now()."""
__metaclass__ = FrozenDateTimeMeta
@classmethod
def freeze(cls, freezing_timestamp):
"""Freeze time at freezing_timestamp."""
cls.frozen_time = freezing_timestamp
@classmethod
def now(cls, tz=None):
"""Return the frozen time."""
return cls.frozen_time
monkeypatch.setattr(datetime, 'datetime', FrozenDateTime)
FrozenDateTime.freeze(original_datetime.now())
return FrozenDateTime
| 1,007 | 274 |
from flask import Flask, redirect, url_for
from markupsafe import escape
app = Flask(__name__)
@app.route('/')
def index():
print(url_for('static', filename='icon.png'))
return app.send_static_file('icon.png') | 219 | 73 |
import wrapper
import tensorflow as tf
from tensorflow.core.example import example_pb2
from StringIO import StringIO
from PIL import Image
from matplotlib.pyplot import imshow, show
import numpy as np
a = wrapper.Wrapper('discrete_tcnn1','./data/pretrained_models/discrete_tcnn1/model.ckpt-126001.bestmodel', 20)
example = example_pb2.Example()
in_file = './data/tfrecord_release/tfrecords/b1c9c847-3bda4659.tfrecords'
count = 0
for example_serialized in tf.python_io.tf_record_iterator(in_file):
example.ParseFromString(example_serialized)
feature_map = example.features.feature
encoded = feature_map['image/encoded'].bytes_list.value
print(count)
count += 1
file_jpgdata = StringIO(encoded[0])
dt = Image.open(file_jpgdata)
imshow(np.asarray(dt))
print(a.observe_a_frame(np.asarray(dt)))
| 815 | 288 |
import torch
from torch import nn
import torchvision.utils as vutils
import numpy as np
from focal_loss import FocalLoss
from Param import *
from utils import weights_init
from net import PoseNet
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def fit(data, mask, Net, optimizer, criterion, max_norm=0):
img = data[0].to(device)
heat_maps, output = Net(img)
loss = 0
for i in range(output.shape[1]):
loss += criterion(output[:,i], mask[0].to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss.detach_()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(Encoder.parameters(), max_norm)
torch.nn.utils.clip_grad_norm_(Decoder.parameters(), max_norm)
return loss
def train(dataloader, dataloader_mask, print_epoch=batch_size, verbose=False):
assert image_size == 256
model = PoseNet(nstack, image_size, oup_dim, bn, increase).to(device)
#if initialize_weights:
# model.apply(weights_init)
#criterion = nn.MSELoss()
criterion = FocalLoss(gamma=2)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params ', n_parameters)
print("Starting Training Loop...")
losses = []
img_list = []
heat_maps_list = []
# For each epoch
for epoch in range(num_epochs):
torch.cuda.empty_cache()
model.train()
# For each batch in the dataloader
for i, (data, mask) in enumerate(zip(dataloader, dataloader_mask), 0):
if verbose: print(data[0].shape)
if verbose: print(data[1].shape)
recons_loss = fit(data, mask, model, optimizer, criterion)
# Output training stats
if i % print_epoch == 0:
print('[%d/%d][%d/%d]\tLoss: %.4f'
% (epoch+1, num_epochs, i, len(dataloader), recons_loss.item()))
# Save Losses for plotting later
losses.append(recons_loss.item())
# Check how the generator is doing by saving G's output on fixed_noise
if (i % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
with torch.no_grad():
heat_maps, img_out = model(data[0].to(device))
img_out = img_out.detach().cpu()
heat_maps = heat_maps.detach().cpu()
img_list.append(vutils.make_grid(img_out[0:10,0], nrow=5, normalize=True))
if epoch == (num_epochs-1):
for qq in range(heat_maps.shape[2]):
heat_maps_list.append(vutils.make_grid(heat_maps[0:5,nstack-1,qq].unsqueeze(1), nrow=5, normalize=True, padding=5, pad_value=1).permute(1,2,0))
heat_map_out = np.vstack(heat_maps_list)
return losses, img_list, heat_map_out, model
| 2,989 | 1,054 |
import io
import os
import os.path
from os import listdir
from os.path import isfile, join
import numpy as np
import tensorflow as tf
from PIL import Image
import horovod.tensorflow as hvd
from model import DCGAN
from utils import pp, visualize, show_all_variables
flags = tf.app.flags
flags.DEFINE_integer("epoch", 25, "Epoch to train [25]")
flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_float("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_integer("batch_size", None, "The size of batch images [64]")
flags.DEFINE_integer("grid_height", 8, "Grid Height")
flags.DEFINE_integer("grid_width", 8, "Grid Width")
flags.DEFINE_integer("input_height", None, "The size of image to use (will be center cropped). [108]")
flags.DEFINE_integer("input_width", None, "The size of image to use (will be center cropped). If None, same value as input_height [None]")
flags.DEFINE_integer("output_height", None, "The size of the output images to produce [64]")
flags.DEFINE_integer("output_width", None, "The size of the output images to produce. If None, same value as output_height [None]")
flags.DEFINE_string("dataset", "celebA", "The name of dataset [celebA, mnist, lsun]")
flags.DEFINE_string("input_fname_pattern", "*.jpg", "Glob pattern of filename of input images [*]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_integer("sample_rate", None, "If == 5, it will take a sample image every 5 iterations")
flags.DEFINE_boolean("train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("crop", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("visualize", False, "True for visualizing, False for nothing [False]")
flags.DEFINE_integer("generate_test_images", 100, "Number of images to generate during test. [100]")
flags.DEFINE_integer("nbr_of_layers_d", 5, "Number of layers in Discriminator")
flags.DEFINE_integer("nbr_of_layers_g", 5, "Number of layers in Generator")
flags.DEFINE_boolean("use_checkpoints", True, "Save and load checkpoints")
FLAGS = flags.FLAGS
# default batch_size
if FLAGS.batch_size is None and FLAGS.grid_height is not None and FLAGS.grid_width is not None:
batch_size = FLAGS.grid_height * FLAGS.grid_width
elif FLAGS.batch_size is not None:
batch_size = FLAGS.batch_size
else:
raise Exception('grid_height/grid_width or batch_size must be provided')
# default size parameters
input_width = FLAGS.input_width
input_height = FLAGS.input_height
output_width = FLAGS.output_width
output_height = FLAGS.output_height
if (input_height is None and input_width is None) or (output_height is None and output_width is None):
data_path = 'data/' + FLAGS.dataset
first_image = [f for f in listdir(data_path) if isfile(join(data_path, f))][0]
image_data = open(data_path + '/' + first_image, "rb").read()
image = Image.open(io.BytesIO(image_data))
rgb_im = image.convert('RGB')
input_width = rgb_im.size[0]
output_width = rgb_im.size[0]
input_height = rgb_im.size[1]
output_height = rgb_im.size[1]
def main(_):
pp.pprint(flags.FLAGS.__flags)
hvd.init()
if FLAGS.input_width is None:
FLAGS.input_width = FLAGS.input_height
if FLAGS.output_width is None:
FLAGS.output_width = FLAGS.output_height
if FLAGS.use_checkpoints and not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
sample_dir = FLAGS.sample_dir + "_g" + str(FLAGS.nbr_of_layers_g) + "_d" + str(FLAGS.nbr_of_layers_d)
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
run_config.gpu_options.visible_device_list = str(hvd.local_rank())
with tf.Session(config=run_config) as sess:
if FLAGS.dataset == 'mnist':
dcgan = DCGAN(
sess,
input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height,
grid_height=FLAGS.grid_height,
grid_width=FLAGS.grid_width,
batch_size=batch_size,
sample_num=batch_size,
y_dim=10,
z_dim=FLAGS.generate_test_images,
dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=sample_dir,
nbr_of_layers_d=FLAGS.nbr_of_layers_d,
nbr_of_layers_g=FLAGS.nbr_of_layers_g,
use_checkpoints=FLAGS.use_checkpoints)
else:
dcgan = DCGAN(
sess,
input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height,
grid_height=FLAGS.grid_height,
grid_width=FLAGS.grid_width,
batch_size=batch_size,
sample_num=batch_size,
z_dim=FLAGS.generate_test_images,
dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=sample_dir,
sample_rate=FLAGS.sample_rate,
nbr_of_layers_d=FLAGS.nbr_of_layers_d,
nbr_of_layers_g=FLAGS.nbr_of_layers_g,
use_checkpoints=FLAGS.use_checkpoints)
show_all_variables()
if FLAGS.train:
dcgan.train(FLAGS)
else:
if not dcgan.load(FLAGS.checkpoint_dir)[0]:
raise Exception("[!] Train a model first, then run test mode")
# to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
# [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
# [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
# [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
# [dcgan.h4_w, dcgan.h4_b, None])
# Below is codes for visualization
OPTION = 1
visualize(sess, dcgan, FLAGS, batch_size, OPTION)
if __name__ == '__main__':
tf.app.run()
| 6,308 | 2,257 |
from OktaEventCollector import ReqParams, Client, Request, GetEvents, Method
import pytest
req_params = ReqParams(since='', sortOrder='ASCENDING', limit='5')
request = Request(method=Method.GET, url='https://testurl.com', headers={}, params=req_params)
client = Client(request)
get_events = GetEvents(client)
id1 = {'uuid': 'a5b57ec5febb'}
id2 = {'uuid': 'a5b57ec5fecc'}
id3 = {'uuid': 'a12f3c5d77f3'}
id4 = {'uuid': 'a12f3c5dxxxx'}
class MockResponse:
def __init__(self, data):
self.data = data
def json(self):
return self.data
@pytest.mark.parametrize("events,ids,result", [
([id1, id2, id3], ['a12f3c5d77f3'], [id1, id2]),
([id1, id2, id3], ['a12f3c5dxxxx'], [id1, id2, id3]),
([], ['a12f3c5d77f3'], []),
([{'uuid': 0}, {'uuid': 1}, {'uuid': 2}, {'uuid': 3}, {'uuid': 4}, {'uuid': 5}, {'uuid': 6}, {'uuid': 7},
{'uuid': 8}, {'uuid': 9}], [0, 4, 7, 9],
[{'uuid': 1}, {'uuid': 2}, {'uuid': 3}, {'uuid': 5}, {'uuid': 6}, {'uuid': 8}])])
def test_remove_duplicates(events, ids, result):
assert get_events.remove_duplicates(events, ids) == result
@pytest.mark.parametrize("events,result", [
([{'published': '2022-04-17T12:31:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5faaa'},
{'published': '2022-04-17T12:32:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fbbb'},
{'published': '2022-04-17T12:33:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fccc'}],
{'after': '2022-04-17T12:33:36.667000', 'ids': ['1d0844b6-3148-11ec-9027-a5b57ec5fccc']}),
([{'published': '2022-04-17T12:31:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5faaa'},
{'published': '2022-04-17T12:32:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fbbb'},
{'published': '2022-04-17T12:32:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fccc'}], {'after': '2022-04-17T12:32:36.667000',
'ids': ['1d0844b6-3148-11ec-9027-a5b57ec5fccc',
'1d0844b6-3148-11ec-9027-a5b57ec5fbbb']})])
def test_get_last_run(events, result):
assert get_events.get_last_run(events) == result
@pytest.mark.parametrize("time", ['2022-04-17T12:32:36.667)'])
def test_set_since_value(time):
req_params.set_since_value(time)
assert req_params.since == time
def test_make_api_call(mocker):
mock_res = MockResponse([{1}, {1}, {1}, {1}, {1}])
mocker.patch.object(client, 'call', return_value=mock_res)
assert get_events.make_api_call() == [{1}, {1}, {1}, {1}, {1}]
mock_res.data = [{1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}]
assert get_events.make_api_call() == [{1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}]
| 2,768 | 1,381 |
# Copyright 2021 The NetKet Authors - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import jax
import jax.numpy as jnp
import netket as nk
@pytest.mark.parametrize(
"cusp_exponent", [pytest.param(None, id="cusp=None"), pytest.param(5, id="cusp=5")]
)
@pytest.mark.parametrize(
"L",
[
pytest.param(1.0, id="1D"),
pytest.param((1.0, 1.0), id="2D-Square"),
pytest.param((1.0, 0.5), id="2D-Rectangle"),
],
)
def test_deepsets(cusp_exponent, L):
hilb = nk.hilbert.Particle(N=2, L=L, pbc=True)
sdim = len(hilb.extent)
x = jnp.hstack([jnp.ones(4), -jnp.ones(4)]).reshape(1, -1)
xp = jnp.roll(x, sdim)
ds = nk.models.DeepSetRelDistance(
hilbert=hilb,
cusp_exponent=cusp_exponent,
layers_phi=2,
layers_rho=2,
features_phi=(10, 10),
features_rho=(10, 1),
)
p = ds.init(jax.random.PRNGKey(42), x)
assert jnp.allclose(ds.apply(p, x), ds.apply(p, xp))
def test_deepsets_error():
hilb = nk.hilbert.Particle(N=2, L=1.0, pbc=True)
sdim = len(hilb.extent)
x = jnp.hstack([jnp.ones(4), -jnp.ones(4)]).reshape(1, -1)
xp = jnp.roll(x, sdim)
ds = nk.models.DeepSetRelDistance(
hilbert=hilb,
layers_phi=3,
layers_rho=3,
features_phi=(10, 10),
features_rho=(10, 1),
)
with pytest.raises(ValueError):
p = ds.init(jax.random.PRNGKey(42), x)
with pytest.raises(AssertionError):
ds = nk.models.DeepSetRelDistance(
hilbert=hilb,
layers_phi=2,
layers_rho=2,
features_phi=(10, 10),
features_rho=(10, 2),
)
p = ds.init(jax.random.PRNGKey(42), x)
with pytest.raises(ValueError):
ds = nk.models.DeepSetRelDistance(
hilbert=nk.hilbert.Particle(N=2, L=1.0, pbc=False),
layers_phi=2,
layers_rho=2,
features_phi=(10, 10),
features_rho=(10, 2),
)
p = ds.init(jax.random.PRNGKey(42), x)
| 2,563 | 1,021 |
from tkinter import *
root = Tk()
b = Button(root)
b['text'] = 'test'
def test(event):
import test
b.bind('<Button-1>', test)
b.pack()
root.mainloop() | 158 | 66 |
from collections import defaultdict
starting_numbers = [16, 12, 1, 0, 15, 7, 11]
def solver(rounds):
last_spoken = dict()
number_spoken = defaultdict(int)
for i, n in enumerate(starting_numbers):
last_spoken[n] = i + 1
number_spoken[n] += 1
most_recent = starting_numbers[-1]
turn = len(starting_numbers)
while turn != rounds:
turn += 1
prev_most_recent = most_recent
if number_spoken[most_recent] <= 1:
most_recent = 0
else:
most_recent = turn - 1 - last_spoken[most_recent]
number_spoken[most_recent] += 1
last_spoken[prev_most_recent] = turn - 1
return most_recent
if __name__ == '__main__':
print(f'Part One: {solver(2020)}')
print(f'Part Two: {solver(30000000)}')
| 803 | 314 |
# Author: Sergey Chaban <sergey.chaban@gmail.com>
import sys
import hou
import os
import imp
import re
import inspect
from math import *
from array import array
import xcore
import xhou
try: xrange
except: xrange = range
def writeBits(bw, bits, nbits):
nbytes = xcore.ceilDiv(nbits, 8)
wk = bits
for i in xrange(nbytes):
bw.writeU8(wk & 0xFF)
wk >>= 8
class ImgPlane:
def __init__(self, ximg, name, rawFlg = not True):
self.ximg = ximg
self.name = name
self.nameId = ximg.strLst.add(name)
if name == "a":
self.data = ximg.cop.allPixels("A")
else:
self.data = ximg.cop.allPixels("C", xhou.getRGBComponentName(ximg.cop, name))
ref = self.data[0]
self.constFlg = True
for val in self.data:
if val != ref:
self.constFlg = False
break
self.compress(rawFlg)
def compress(self, rawFlg):
self.minVal = min(self.data)
self.maxVal = max(self.data)
self.valOffs = self.minVal
if self.valOffs > 0: self.valOffs = 0
self.bitCnt = 0
self.bits = 0
self.minTZ = 32
if self.constFlg:
self.format = 0
return
if rawFlg:
self.format = -1
return
self.format = 1
for fval in self.data:
fval -= self.valOffs
ival = xcore.getBitsF32(fval) & ((1<<31)-1)
self.minTZ = min(self.minTZ, xcore.ctz32(ival))
tblSize = 1 << 8
tbl = [0 for i in xrange(tblSize)]
pred = 0
hash = 0
nlenBits = 5
w = self.ximg.w
h = self.ximg.h
for y in xrange(h):
for x in xrange(w):
idx = (h-1-y)*w + x
fval = self.data[idx] - self.valOffs
ival = xcore.getBitsF32(fval) & ((1<<31)-1)
ival >>= self.minTZ
xor = ival ^ pred
tbl[hash] = ival
hash = ival >> 21
hash &= tblSize - 1
pred = tbl[hash]
xlen = 0
if xor: xlen = xcore.bitLen32(xor)
dat = xlen
if xlen: dat |= (xor & ((1<<xlen)-1)) << nlenBits
self.bits |= dat << self.bitCnt
self.bitCnt += nlenBits + xlen
def writeInfo(self, bw):
bw.writeU32(0) # +00 -> data
self.ximg.writeStrId16(bw, self.nameId) # +04
bw.writeU8(self.minTZ) # +06
bw.writeI8(self.format) # +07
bw.writeF32(self.minVal) # +08
bw.writeF32(self.maxVal) # +0C
bw.writeF32(self.valOffs) # +10
bw.writeU32(self.bitCnt) # +14
bw.writeU32(0) # +18 reserved0
bw.writeU32(0) # +1C reserved1
def writeData(self, bw):
if self.format == 0:
bw.writeF32(self.data[0])
elif self.format == 1:
writeBits(bw, self.bits, self.bitCnt)
else:
w = self.ximg.w
h = self.ximg.h
for y in xrange(h):
for x in xrange(w):
idx = (h-1-y)*w + x
bw.writeF32(self.data[idx])
class ImgExporter(xcore.BaseExporter):
def __init__(self):
xcore.BaseExporter.__init__(self)
self.sig = "XIMG"
def build(self, copPath, rawFlg = True):
self.copPath = copPath
self.nameId, self.pathId = self.strLst.addNameAndPath(copPath)
self.cop = hou.node(copPath)
self.w = self.cop.xRes()
self.h = self.cop.yRes()
self.planes = {}
self.addPlane("r", rawFlg)
self.addPlane("g", rawFlg)
self.addPlane("b", rawFlg)
self.addPlane("a", rawFlg)
def addPlane(self, name, rawFlg = True):
self.planes[name] = ImgPlane(self, name, rawFlg)
def writeHead(self, bw, top):
npln = len(self.planes)
bw.writeU32(self.w) # +20
bw.writeU32(self.h) # +24
bw.writeU32(npln) # +28
self.patchPos = bw.getPos()
bw.writeI32(0) # +2C -> info
def writeData(self, bw, top):
plnLst = []
for plnName in self.planes: plnLst.append(self.planes[plnName])
npln = len(plnLst)
bw.align(0x10)
infoTop = bw.getPos()
bw.patch(self.patchPos, bw.getPos() - top) # -> info
for i in xrange(npln):
plnLst[i].writeInfo(bw)
for i, pln in enumerate(plnLst):
bw.align(4)
bw.patch(infoTop + (i*0x20), bw.getPos() - top)
xcore.dbgmsg("Saving plane " + pln.name)
pln.writeData(bw)
def save(self, outPath):
xcore.BaseExporter.save(self, outPath)
| 3,987 | 2,081 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import shutil
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
import ansible.constants as C
from django.shortcuts import render, HttpResponse
from .models import Host, Group, Module, Args
def mainpage(request):
return render(request, 'webansi/mainpage.html')
def index(request):
return render(request, 'webansi/hostinfo.html')
def addhosts(request):
if request.method == 'POST':
g = request.POST.get('group')
ip = request.POST.get('ipaddr')
h = request.POST.get('hostname')
gobj = Group.objects.get_or_create(group=g)[0]
# host = Host(hostname=h, ipaddr=ip, group=gobj)
# host.save()
Host.objects.get_or_create(hostname=h, ipaddr=ip, group=gobj)
host_info = {}
# {'webservers': ['node1', 'node2'], 'dbservers': 'hosts'}
groups = Group.objects.all() # qset
for g in groups:
hosts = []
for host in g.host_set.all():
hosts.append(host.hostname)
host_info[g] = hosts
return render(request, 'webansi/addhosts.html', {'host_info': host_info})
def addmodules(request):
if request.method == 'POST':
m = request.POST.get('module')
a = request.POST.get('args')
mobj = Module.objects.get_or_create(mod_name=m)[0]
# args = Args(mod_args=a, mod=mobj)
# args.save()
Args.objects.get_or_create(mod_args=a, mod=mobj)
mod_info = {}
mods = Module.objects.all()
for m in mods:
argss = []
for args in m.args_set.all():
argss.append(args.mod_args)
mod_info[m] = argss
return render(request, 'webansi/addmodules.html', {'mod_info': mod_info})
def exec_task(dest, mod, args):
Options = namedtuple('Options',
['connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check', 'diff'])
options = Options(connection='smart', module_path=['/to/mymodules'], forks=10, become=None, become_method=None,
become_user=None, check=False, diff=False)
loader = DataLoader()
passwords = dict()
inventory = InventoryManager(loader=loader, sources=['ansicfg/dhosts.py'])
variable_manager = VariableManager(loader=loader, inventory=inventory)
play_source = dict(
name="Ansible Play",
hosts=dest,
gather_facts='no',
tasks=[
dict(action=dict(module=mod, args=args), register='shell_out'),
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm = None
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
)
result = tqm.run(play)
finally:
if tqm is not None:
tqm.cleanup()
shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
def tasks(request):
if request.method == 'POST':
ip = request.POST.get('ipaddr')
group = request.POST.get('group')
mod = request.POST.get('module')
args = request.POST.get('args')
print ip, group, mod, args
if ip:
dest = ip
else:
dest = group
exec_task(dest, mod, args)
hosts = list(Host.objects.all())
groups = list(Group.objects.all())
mod_info = {}
mods = Module.objects.all()
for m in mods:
argss = []
for args in m.args_set.all():
argss.append(args.mod_args)
mod_info[m] = argss
result = {'hosts': hosts, 'groups': groups, 'mods': mods, 'args': args, 'mod_info': mod_info}
return render(request, 'webansi/tasks.html', result)
| 4,000 | 1,282 |
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/4/26 20:25
# @author : Mo
# @function: constant of token-symbol and hyper-parameters-default
from macadam.conf.path_config import path_model_dir
from typing import Dict
import os
EMBEDDING_TYPE = ["ROBERTA","ELECTRA","RANDOM","ALBERT",
"XLNET","NEZHA","GPT2","WORD","BERT", "MIX"]
# symbol of common token
MASK = "[MASK]"
CLS = "[CLS]"
SEP = "[SEP]"
PAD = "[PAD]"
UNK = "[UNK]"
BOS = "[BOS]"
EOS = "[EOS]"
WC = "[WC]"
# task of macadam
SL = "SL" # sequence-labeling(ner, pos, tag)
TC = "TC" # text-classification
RE = "RE" # relation-extraction
# hyper_parameters of deep-learning, include sharing, embed, graph, train, save and data
hyper_parameters_default = {
"sharing": {"length_max": None, # 句子最大长度, 不配置则会选择前95%数据的最大长度, 配置了则会强制选择, 固定推荐20-50, bert越长会越慢, 占用空间也会变大, 小心OOM
"embed_size": 768, # 字/词向量维度, bert取768, word取300, char可以更小些
"vocab_size": None, # 字典/词典大小, 可根据具体语料更新, 可不配置
"trainable": True, # embedding是静态的还是动态的, 即控制可不可以微调
"task": None, # 任务类型, "SL"(sequence-labeling), "TC"(text-classification),"RE"(relation-extraction)
"token_type": "CHAR", # 级别, 最小单元, 字/词, 填 "CHAR" or "WORD", "NGRAM", 注意:word2vec模式下训练语料要首先切好
"embed_type": "BERT", # 级别, 嵌入类型, 还可以填"WORD"、"RANDOM"、 "BERT"、 "ALBERT"、"ROBERTA"、"NEZHA"、"XLNET"、"ELECTRA"、"GPT2"
"gpu_memory_fraction": 0.6, # gpu使用率, 0-1
},
"embed": {"layer_idx": [-2], # 取bert的layer层输出, -1~-12, 0-11等, eg. 0, 1, 11, -1, -2, -12等
"path_embed": None, # 外部embed模型地址, 如word2vec, bert
"merge_type": "concat", # bert的layer层输出融合方式, 包括 "concat", "add", "pool-max", "pool-avg", "multi"
"application": "encode", # bert4keras下游任务, "encode", "lm", "unilm"等
"length_first": None, # 第一句最大长度, 大则截断-小则padding
"length_second": None, # 第二句最大长度, 大则截断-小则padding
"xlnet_embed": {"attention_type": "bi",
"memory_len": 0,
"target_len": 5}, # xlnet的参数, 使用的是keras-xlnet
},
"graph": {"filters_size": [3, 4, 5], # 卷积核尺寸, 1-10
"filters_num": 300, # 卷积个数 text-cnn:300-600
"rnn_type": None, # 循环神经网络, select "LSTM", "GRU", "Bidirectional-GRU"
"rnn_unit": 256, # RNN隐藏层, 8的倍数, 一般取64, 128, 256, 512, 768等
"dropout": 0.5, # 随机失活, 概率, 0-1
"activate_mid": "tanh", # 中间激活函数, 非线性变幻, 提升逼近能力, 选择"relu","tanh"或"sigmoid"
"activate_end": "softmax", # 结束激活函数, 即最后一层的激活函数, 如cls激活函数, ner激活函数
"use_onehot": True, # label是否使用独热编码
"use_crf": False, # 是否使用CRF(条件随机场), task="sl"(序列标注任务)任务
"loss": None, # 损失函数, 真实值与实际预测的差值损失, 最优化的方向, "categorical_crossentropy"
"metrics": "accuracy", # 评估指标, 保存更好模型的评价标准, 一般选择loss, acc或f1等
"optimizer": "Adam", # 优化器, 可选["Adam", "Radam", "RAdam,Lookahead"]
"optimizer_extend":[
"gradient_accumulation",
"piecewise_linear_lr",
"layer_adaptation",
"lazy_optimization",
"]weight_decay",
"lookahead"], # 优化器拓展, ["gradient_accumulation", "piecewise_linear_lr", "layer_adaptation",
# "lazy_optimization","weight_decay", "lookahead"]
},
"train": {"learning_rate": 1e-3, # 学习率, 必调参数, 对训练影响较大, word2vec一般设置1e-3, bert设置5e-5或2e-5
"decay_rate": 0.999, # 学习率衰减系数, 即乘法, lr = lr * rate
"decay_step": 1000, # 学习率每step步衰减, 每N个step衰减一次
"batch_size": 32, # 批处理尺寸, 设置过小会造成收敛困难、陷入局部最小值或震荡, 设置过大会造成泛化能力降低
"early_stop": 6, # 早停, N个轮次(epcoh)评估指标(metrics)不增长就停止训练
"epochs": 20, # 训练最大轮次, 即最多训练N轮
"label": None, # 类别数, auto无需定义, 如果定义则是强制指定
"is_training": True, # 是否训练, 用以区分训练train或预测predict, 用它判断后确定加不加载优化器optimizer
},
"save": {
# "path_hyper_parameters": None, # 超参数文件地址
"path_model_dir": None, # 模型目录, loss降低则保存的依据, save_best_only=True, save_weights_only=True
"path_model_info": None, # 模型所有超参数, 保存在model_info.json
"path_fineture": None, # 微调后embedding文件地址, 例如字向量、词向量、bert向量等
},
"data": {"train_data": None, # 训练数据
"val_data": None # 验证数据
},
}
class Config:
def __init__(self, hyper_parameters: Dict={}):
"""
Init of hyper_parameters and build_embed.
Args:
hyper_parameters: hyper_parameters of all, which contains "sharing", "embed", "graph", "train", "save" and "data".
Returns:
None
"""
# 各种超参数, 设置默认超参数
self.hyper_parameters = self.get_hyper_parameters_default()
# 只更新传入的key-value
for k in hyper_parameters.keys():
self.hyper_parameters[k].update(hyper_parameters.get(k, {}))
self.params_sharing = self.hyper_parameters.get("sharing", {})
self.params_embed = self.hyper_parameters.get("embed", {})
self.params_graph = self.hyper_parameters.get("graph", {})
self.params_train = self.hyper_parameters.get("train", {})
self.params_save = self.hyper_parameters.get("save", {})
self.params_data = self.hyper_parameters.get("data", {})
# params of sharing
self.gpu_memory_fraction = self.params_sharing.get("gpu_memory_fraction", 0.60)
self.embed_type = self.params_sharing.get("embed_type", "RANDOM")
self.token_type = self.params_sharing.get("token_type", "CHAR")
self.task = self.params_sharing.get("task", None)
self.length_max = self.params_sharing.get("length_max", None)
self.vocab_size = self.params_sharing.get("vocab_size", None)
self.embed_size = self.params_sharing.get("embed_size", None)
self.trainable = self.params_sharing.get("trainable", True)
# params of embed
self.layer_idx = self.params_embed.get("layer_idx", [])
self.path_embed = self.params_embed.get("path_embed", None)
self.merge_type = self.params_embed.get("merge_type", "concat")
self.length_first = self.params_embed.get("length_first", None)
self.length_second = self.params_embed.get("length_second", None)
self.xlnet_embed = self.params_embed.get("xlnet_embed", {})
self.attention_type = self.params_embed.get("attention_type", "bi")
self.memory_len = self.params_embed.get("memory_len", 128)
self.target_len = self.params_embed.get("target_len", 128)
# params of graph
self.filters_size = self.params_graph.get("filters_size", [3, 4, 5])
self.filters_num = self.params_graph.get("filters_num", 300)
self.rnn_type = self.params_graph.get("rnn_type", None)
self.rnn_unit = self.params_graph.get("rnn_unit", 256)
self.dropout = self.params_graph.get("dropout", 0.5)
self.activate_mid = self.params_graph.get("activate_mid", "tanh")
self.activate_end = self.params_graph.get("activate_end", "softmax")
self.use_onehot = self.params_graph.get("use_onehot", True)
self.use_crf = self.params_graph.get("use_crf", False)
self.loss = self.params_graph.get("loss", "categorical_crossentropy" if self.use_onehot
else "sparse_categorical_crossentropy")
self.metrics = self.params_graph.get("metrics", "accuracy")
self.optimizer = self.params_graph.get("optimizer", "Adam").upper()
self.optimizer_extend = self.params_graph.get("optimizer_extend", [])
# params of train
self.learning_rate = self.params_train.get("learning_rate", 5e-5)
self.decay_rate = self.params_train.get("decay_rate", 0.999)
self.decay_step = self.params_train.get("decay_step", 32000)
self.early_stop = self.params_train.get("early_stop", 6)
self.batch_size = self.params_train.get("batch_size", 32)
self.epochs = self.params_train.get("epochs", 20)
self.label = self.params_train.get("label", None)
self.is_training = self.params_train.get("is_training", True)
# params of save
self.path_model_dir = self.params_save.get("path_model_dir", path_model_dir)
# self.path_model_info = self.params_save.get("path_model_info", None)
self.path_fineture = self.params_save.get("path_fineture", None)
# params of data
self.train_data = self.params_data.get("train_data", None)
self.val_data = self.params_data.get("val_data", None)
# 特殊符号
self.token_dict = {PAD: 0, UNK: 1,
CLS: 2, SEP: 3,
BOS: 4, EOS: 5,
MASK: 6, WC: 7
}
# 递归创建模型保存目录
if not self.path_model_dir: self.path_model_dir = path_model_dir
if not os.path.exists(self.path_model_dir):
os.makedirs(self.path_model_dir)
def get_hyper_parameters_default(self) -> Dict:
"""
Get hyper_parameters of default.
Args:
None
Returns:
Dict
"""
return hyper_parameters_default
| 9,698 | 4,052 |
# -*- coding: utf-8 -*-
"""
@date: 2020/12/30 下午9:36
@file: test_mobilenetv3_backbone.py
@author: zj
@description:
"""
import torch
from zcls.model.backbones.mobilenet.mobilenetv3_backbone import MobileNetV3Backbone
def test_mobilenet_v3_backbone():
data = torch.randn(1, 3, 224, 224)
model = MobileNetV3Backbone(
in_channels=3,
base_channels=16,
out_channels=960,
width_multiplier=1.,
round_nearest=8,
reduction=4,
attention_type='SqueezeAndExcitationBlock2D',
conv_layer=None,
norm_layer=None,
act_layer=None,
)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 960, 7, 7)
if __name__ == '__main__':
test_mobilenet_v3_backbone()
| 786 | 336 |
import os, re, subprocess
import numpy as np
from spyci import spyci
from PySpice.Spice.NgSpice.Shared import NgSpiceShared
from analog_sim.spice.generic import GenericSpiceInterface
class NgSpiceInterface(GenericSpiceInterface):
'''
'''
def __init__(self, verbose=True, netlist_path=None, pdk_path=None):
'''
Instantiate the object
'''
self.config = {}
self.config['simulator'] = {'executable' : 'ngspice',
# 'shared' : True,
'shared' : False,
'silent' : False}
self.config['verbose'] = verbose
# create an ngspice shared object
self.ngspice = NgSpiceShared.new_instance()
def run_simulation(self, new_instance=True, outputs=None):
'''
Run simulation
'''
# pre-create the file locations
netlist_path = self.run_dir + '/' + self.temp_netlist
raw_path = self.run_dir + '/' + self.temp_result
log_path = self.run_dir + '/' + self.temp_log
# run ngspice
if self.config['simulator']['shared']:
# destroy previous run data
self.ngspice.destroy()
# self.ngspice.exec_command("reset")
# self.ngspice.reset()
# load the netlist into the
if new_instance:
self.ngspice.source(netlist_path)
# run the simulation
if self.config['simulator']['silent']:
with suppress_stdout_stderr():
self.ngspice.run()
else:
self.ngspice.run()
# save the outputs
self.ngspice.exec_command("set filetype=ascii")
self.ngspice.exec_command("write %s" % raw_path)
else:
# set the output format to ascii required by spyci
os.environ["SPICE_ASCIIRAWFILE"] = "1"
self.result_type = 'ascii'
# run the simulation through command line
bash_command = "ngspice -b -r %s -o %s %s" % (raw_path, log_path, netlist_path)
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
# check if error occured
with open(log_path) as f:
sim_log = f.read()
if 'fatal' in sim_log or 'aborted' in sim_log:
print('\033[91m')
print('-'*150)
print('ERROR IN SIMULATION:')
print(sim_log)
print('-'*150)
print('\033[0m')
# read in the results of the simulation
if outputs:
self.simulation_data = {}
for output in outputs:
self.read_results("rundir/spiceinterface_temp_"+output+".raw", output)
else:
self.read_results(raw_path)
def netlist_voltage_pwl(self, name, voltage, negative='0', dc=0):
'''
Write a netlist line for a DC PWL source
'''
return 'V' + name + ' ' + name + ' ' + negative + ' dc %f ' % dc + 'pwl ( ' + voltage + ' )'
def netlist_temperature(self, temperature):
'''
Set the temperature
'''
# form the include line
line = '.option TEMP=%s' % temperature
return line
def netlist_control_block(self, control_block):
'''
Set a control block
'''
# form the include line
line = '.control\n'
line += control_block + '\n'
line += '.endc'
return line
def netlist_sim_tran(self, final_time, initial_step=-1, use_intitial_conditions=False):
'''
Define a transient simulation
TRAN <initial step value> <final time value>
'''
# if the rise and fall is not set then default to 1/50 of the period
if initial_step < 0:
initial_step = final_time/1000
# form the transient instruction
line = '.tran %s %s' % (self.unit_format(initial_step), self.unit_format(final_time))
if use_intitial_conditions:
line += ' uic'
return line | 4,348 | 1,266 |
SITES_JSON_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"sites": {"type": "array"},
"items": {"$ref": "#/$defs/site"}
},
"$defs": {
"site": {
"type": "object",
"required": ["url"],
"properties": {
"url": {
"type": "string",
"description": "Website URL"
},
"pattern": {
"type": "string",
"description": ("Python-compatible RegEx pattern to be "
"used to validate website content")
}
}
}
}
}
| 728 | 200 |
from .base import Trait
__version__ = "0.0.1"
__all__ = ("Trait",)
| 68 | 31 |
"""
whois.py - Willie Whois module
Copyright 2014, Ellis Percival (Flyte) willie@failcode.co.uk
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
A module to enable Willie to perform WHOIS lookups on nicknames.
This can either be to have Willie perform lookups on behalf of
other people, or can be imported and used by other modules.
"""
from willie.module import commands, event, rule
from time import sleep
from datetime import datetime, timedelta
AGE_THRESHOLD = timedelta(days=1)
class Whois(object):
def __init__(self, data):
to, self.nick, self.ident, self.host, star, self.name = data
self.datetime = datetime.now()
def __repr__(self):
return '%s(nick=%r, ident=%r, host=%r, name=%r, datetime=%r)' % (
self.__class__.__name__,
self.nick,
self.ident,
self.host,
self.name,
self.datetime
)
def __str__(self):
return '%s!%s@%s * %s' % (
self.nick, self.ident, self.host, self.name)
def set_chans(self, trigger):
self.chans = trigger
class WhoisFailed(Exception):
pass
def setup(bot):
bot.memory['whois'] = {}
def check_setup(bot):
if 'whois' not in bot.memory:
bot.memory['whois'] = {}
def _clear_old_entries(bot):
"""
Removes entries from the bot's memory which are older
than AGE_THRESHOLD.
"""
to_del = []
for nick, whois in bot.memory['whois'].items():
if whois.datetime < datetime.now() - AGE_THRESHOLD:
to_del.append(nick)
for nick in to_del:
try:
del bot.memory['whois'][nick]
except KeyError:
pass
def send_whois(bot, nick):
"""
Sends the WHOIS command to the server for the
specified nick.
"""
bot.write(['WHOIS', nick])
def get_whois(bot, nick):
"""
Waits for the response to be put into the bot's
memory by the receiving thread.
"""
check_setup(bot)
i = 0
while nick.lower() not in bot.memory['whois'] and i < 10:
i += 1
sleep(2)
if nick.lower() not in bot.memory['whois']:
return
#raise WhoisFailed('No reply from server')
elif bot.memory['whois'][nick.lower()] is None:
try:
del bot.memory['whois'][nick.lower()]
except KeyError:
pass
#raise WhoisFailed('No such nickname')
# A little housekeeping
_clear_old_entries(bot)
try:
return bot.memory['whois'][nick.lower()]
except KeyError:
return None
def whois(bot, nick):
"""
Sends the WHOIS command to the server then waits for
the response to be put into the bot's memory by the
receiving thread.
"""
# Remove entry first so that we get the latest
check_setup(bot)
try:
del bot.memory['whois'][nick]
except KeyError:
pass
send_whois(bot, nick)
return get_whois(bot, nick)
@event('311')
@rule(r'.*')
def whois_found_reply(bot, trigger):
"""
Listens for successful WHOIS responses and saves
them to the bot's memory.
"""
check_setup(bot)
nick = trigger.args[1]
bot.memory['whois'][nick.lower()] = Whois(trigger.args)
@event('319')
@rule(r'.*')
def whois_chan_list(bot, trigger):
nick = trigger.args[1]
if nick not in bot.memory['whois']:
sleep(3)
bot.memory['whois'][nick.lower()].set_chans(trigger)
@event('401')
@rule(r'.*')
def whois_not_found_reply(bot, trigger):
"""
Listens for unsuccessful WHOIS responses and saves
None to the bot's memory so that the initial
whois function is aware that the lookup failed.
"""
check_setup(bot)
nick = trigger.args[1]
bot.memory['whois'][nick] = None
print("Encountered 401")
# Give the initiating whois function time to see
# that the lookup has failed, then remove the None.
sleep(5)
try:
del bot.memory['whois'][nick]
except KeyError:
pass
@commands('whois')
def display_whois(bot, trigger):
"""PM's you the chans the nick is in."""
nick = trigger.group().split()[1]
try:
w = whois(bot, nick)
sleep(3)
bot.msg(trigger.nick,
'%s is on the following chans: %s' % (w.nick, w.chans))
except:
bot.msg(trigger.nick,
'%s could not be found'
% (nick))
| 4,404 | 1,474 |
from setuptools import setup, find_packages
from cmsplugin_scripts import __version__
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Programming Language :: Python :: 2.7',
]
setup(
name='cmsplugin-scripts',
version=__version__,
description='Django CMS plugin for script tag injection',
author='Anton Egorov',
author_email='anton.egoroff@gmail.com',
url='https://github.com/satyrius/cmsplugin-scripts',
license='MIT',
long_description=open('README.rst').read(),
classifiers=CLASSIFIERS,
platforms=['OS Independent'],
packages=find_packages(),
include_package_data=True,
install_requires=[
'django-cms',
],
tests_require=['tox>=1.8'],
zip_safe=False,
)
| 1,188 | 374 |
import os
import json
if __name__ == '__main__':
f = open('coverage/codeclimate.json', 'r')
summary = json.load(f)
f.close()
for i in range(len(summary['source_files'])):
if summary['source_files'][i]['name'][0] == '/':
path = summary['source_files'][i]['name'].replace('//', '/')
fields = path.split(os.sep)
local = list(fields)
for field in fields:
if field == 'Dtect':
break
local.remove(field)
new_name = os.path.join(*local)
summary['source_files'][i]['name'] = new_name
f = open('coverage/codeclimate.json', 'w')
json.dump(summary, f)
| 698 | 218 |