max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
stats/types.py | TravelChain/golos-ql | 5 | 6621251 | <filename>stats/types.py
import graphene
from graphene.relay import Node
from graphene_mongo import MongoengineObjectType
from post.models import CommentModel
from stats.models import DGPModel
class GDB(MongoengineObjectType):
class Meta:
description = 'dynamic_global_properties'
model = DGPModel
interfaces = (Node,)
class BlockChain(graphene.ObjectType):
dynamic_global_properties = graphene.Field(GDB)
def resolve_dynamic_global_properties(self, info):
return DGPModel.objects.first()
class PostStats(graphene.ObjectType):
posts_count = graphene.Int()
total_payout = graphene.Int(category=graphene.String())
def resolve_posts_count(self, info):
return CommentModel.objects(depth=0).count()
def resolve_total_payout(self, info, category=None):
qs = CommentModel.objects(depth=0)
if category:
qs = qs.filter(category=category)
return qs.sum('total_payout_value')
class Stats(graphene.ObjectType):
blockchain = graphene.Field(BlockChain)
posts = graphene.Field(PostStats)
def resolve_posts(self, info):
return PostStats()
def resolve_blockchain(self, info):
return BlockChain()
| <filename>stats/types.py
import graphene
from graphene.relay import Node
from graphene_mongo import MongoengineObjectType
from post.models import CommentModel
from stats.models import DGPModel
class GDB(MongoengineObjectType):
class Meta:
description = 'dynamic_global_properties'
model = DGPModel
interfaces = (Node,)
class BlockChain(graphene.ObjectType):
dynamic_global_properties = graphene.Field(GDB)
def resolve_dynamic_global_properties(self, info):
return DGPModel.objects.first()
class PostStats(graphene.ObjectType):
posts_count = graphene.Int()
total_payout = graphene.Int(category=graphene.String())
def resolve_posts_count(self, info):
return CommentModel.objects(depth=0).count()
def resolve_total_payout(self, info, category=None):
qs = CommentModel.objects(depth=0)
if category:
qs = qs.filter(category=category)
return qs.sum('total_payout_value')
class Stats(graphene.ObjectType):
blockchain = graphene.Field(BlockChain)
posts = graphene.Field(PostStats)
def resolve_posts(self, info):
return PostStats()
def resolve_blockchain(self, info):
return BlockChain()
| none | 1 | 2.335878 | 2 | |
warp_transducer/pytorch_binding/warprnnt_pytorch/__init__.py | qq1418381215/caat | 14 | 6621252 | <reponame>qq1418381215/caat<gh_stars>10-100
import torch
from torch.autograd import Function
from torch.nn import Module
from .warp_rnnt import *
from .rnnt import rnnt_loss,RNNTLoss
from .delay_transducer import delay_transducer_loss, DelayTLoss
__all__ = ['rnnt_loss', 'RNNTLoss','delay_transducer_loss', 'DelayTLoss']
| import torch
from torch.autograd import Function
from torch.nn import Module
from .warp_rnnt import *
from .rnnt import rnnt_loss,RNNTLoss
from .delay_transducer import delay_transducer_loss, DelayTLoss
__all__ = ['rnnt_loss', 'RNNTLoss','delay_transducer_loss', 'DelayTLoss'] | none | 1 | 1.805727 | 2 | |
data_wrangling/createcsv.py | puntofisso/EUTwinnings | 0 | 6621253 | <reponame>puntofisso/EUTwinnings
import urllib.request, json
import time
import csv
from scipy import spatial
from numpy import dot
from numpy.linalg import norm
import pandas as pd
import numpy as np
from sklearn import preprocessing
# creates csv from globaldictionary and list of nuts
def createCSV():
csvfile = open("basicdata.tsv", "w")
global globaldict
thisline=f'code|level|name|nuts0|nuts1|nuts2|pop3|pop2|pop1|pop0|density|fertility|popchange|womenratio|gdppps|gva|medianage\n'
csvfile.write(thisline)
fileHandle = open('nutsrelations.psv', 'r')
for line in fileHandle:
fields = line.split('|')
# RS|REPUBLIKA SRBIJA /РЕПУБЛИКА СРБИЈА|0|RS|NUTS1|NUTS2|NUTS3
code=fields[0]
name=fields[1]
level=fields[2]
nuts0=fields[3]
nuts1=fields[4]
nuts2=fields[5]
if (level == "0"):
pass
try:
# get dictionaries for this area and its ancestors
dictionary0 = globaldict[code]
# data about this nuts
pop3 = ""
pop2 = ""
pop1 = ""
pop0 = dictionary0.get('population2019','N/A')
density = dictionary0.get('density2018_nuts3','N/A')
fertility = dictionary0.get('fertility2018_nuts3', 'N/A')
popchange = dictionary0.get('populationchange2018_nuts3', 'N/A')
womenratio = dictionary0.get('womenper100men2019_nuts3', 'N/A')
gdppps = dictionary0.get('gdpPps2017_nuts3', 'N/A')
gva = dictionary0.get('gva2017basicprices_nuts3', 'N/A')
medianage = dictionary0.get('medianage2019_nuts3', 'N/A')
# data about containing nuts - no container nuts
#thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n'
#csvfile.write(thisline)
except Exception:
# DO SOMETHING ABOIUT MISSING DATA
thisline = f'{code}|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR\n'
csvfile.write(thisline)
elif (level == "1"):
pass
try:
# get dictionaries for this area and its ancestors
dictionary0 = globaldict[nuts0]
dictionary1 = globaldict[code]
# data about this nuts
pop3 = ""
pop2 = ""
pop1 = dictionary1.get('population2019_nuts3','N/A')
density = dictionary1.get('density2018_nuts3','N/A')
fertility = dictionary1.get('fertility2018_nuts3', 'N/A')
popchange = dictionary1.get('populationchange2018_nuts3', 'N/A')
womenratio = dictionary1.get('womenper100men2019_nuts3', 'N/A')
gdppps = dictionary1.get('gdpPps2017_nuts3', 'N/A')
gva = dictionary1.get('gva2017basicprices_nuts3', 'N/A')
medianage = dictionary1.get('medianage2019_nuts3', 'N/A')
# data about containing nuts
pop0 = dictionary0.get('population2019','N/A')
#thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n'
#csvfile.write(thisline)
except Exception:
# DO SOMETHING ABOIUT MISSING DATA
thisline = f'{code}|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR\n'
csvfile.write(thisline)
elif (level == "2"):
pass
try:
# get dictionaries for this area and its ancestors
dictionary0 = globaldict[nuts0]
dictionary1 = globaldict[nuts1]
dictionary2 = globaldict[code]
# data about this nuts
pop3 = ""
pop2 = dictionary2.get('population2019_nuts3','N/A')
density = dictionary2.get('density2018_nuts3','N/A')
fertility = dictionary2.get('fertility2018_nuts3', 'N/A')
popchange = dictionary2.get('populationchange2018_nuts3', 'N/A')
womenratio = dictionary2.get('womenper100men2019_nuts3', 'N/A')
gdppps = dictionary2.get('gdpPps2017_nuts3', 'N/A')
gva = dictionary2.get('gva2017basicprices_nuts3', 'N/A')
medianage = dictionary2.get('medianage2019_nuts3', 'N/A')
# data about containing nuts
pop1 = dictionary1.get('population2019', 'N/A')
pop0 = dictionary0.get('population2019','N/A')
#thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n'
#csvfile.write(thisline)
except Exception:
# DO SOMETHING ABOIUT MISSING DATA
thisline = f'{code}|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR\n'
csvfile.write(thisline)
elif (level=="3"):
try:
# get dictionaries for this area and its ancestors
dictionary3 = globaldict[code]
dictionary0 = globaldict[nuts0]
dictionary1 = globaldict[nuts1]
dictionary2 = globaldict[nuts2]
# data about this nuts
pop3 = dictionary3.get('population2019_nuts3','N/A')
density = dictionary3.get('density2018_nuts3','N/A')
fertility = dictionary3.get('fertility2018_nuts3', 'N/A')
popchange = dictionary3.get('populationchange2018_nuts3', 'N/A')
womenratio = dictionary3.get('womenper100men2019_nuts3', 'N/A')
gdppps = dictionary3.get('gdpPps2017_nuts3', 'N/A')
gva = dictionary3.get('gva2017basicprices_nuts3', 'N/A')
medianage = dictionary3.get('medianage2019_nuts3', 'N/A')
# data about containing nuts
pop2 = dictionary2.get('population2019', 'N/A')
pop1 = dictionary1.get('population2019', 'N/A')
pop0 = dictionary0.get('population2019','N/A')
thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n'
csvfile.write(thisline)
except Exception:
# DO SOMETHING ABOIUT MISSING DATA
thisline = f'{code}|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR\n'
print(thisline)
# csvfile.write(thisline)
else:
# Error?
print("Level does not exist")
fileHandle.close()
csvfile.close()
def getOrCalculate(valuename, dictionary, dictionary3, dictionary2, dictionary1, dictionary0, method):
# try and get it from level-3 dictionary
try:
val = dictionary[valuename]
return val
except Exception:
if (method=='globalmean'):
# calculate average from entire column
df[valuename] = pd.to_numeric(df[valuename], errors='coerce')
return df[valuename].mean()
elif (method=='copy'):
pass
else:
print("in else")
pass
def fixData():
# data fixes
df = pd.read_csv('basicdata.tsv', sep='|', header='infer')
# df = df.replace('N/A',np.NaN)
# df = df.replace('NONE',np.NaN)
df['gdppps'] = pd.to_numeric(df['gdppps'], errors='coerce')
df['gdppps'] = df['gdppps'].fillna(df['gdppps'].mean())
df['gva'] = pd.to_numeric(df['gva'], errors='coerce')
df['gva'] = df['gva'].fillna(df['gva'].mean())
df['medianage'] = pd.to_numeric(df['medianage'], errors='coerce')
df['medianage'] = df['medianage'].fillna(df['medianage'].mean())
df['womenratio'] = pd.to_numeric(df['womenratio'], errors='coerce')
df['womenratio'] = df['womenratio'].fillna(df['womenratio'].mean())
# TODO this is wrong - needs fixing in createCSV because population should be an average of the container
df['pop2'] = pd.to_numeric(df['pop2'], errors='coerce')
df['pop2'] = df['pop2'].fillna(df['womenratio'].mean())
df['pop1'] = pd.to_numeric(df['womenratio'], errors='coerce')
df['pop1'] = df['womenratio'].fillna(df['womenratio'].mean())
# DON'T NORMALISE THESE COLUMNS
# code|level|name|nuts0|nuts1|nuts2|
# NORMALISE THESE COLUMNS
# pop3|pop2|pop1|pop0|density|fertility|popchange|womenratio|gdppps|gva|medianage
# Save non-normalised data
df.to_csv('basicdata.tsv', sep='|', index=False)
for columnname in ['pop3','pop2','pop1', 'pop0', 'density', 'fertility', 'popchange', 'womenratio', 'gdppps', 'gva', 'medianage']:
df[columnname] = pd.to_numeric(df[columnname], errors='coerce')
x = df[[columnname]].values.astype(float)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df[columnname] = x_scaled
# Save normalised data
df.to_csv('basicdataNORM.tsv', sep='|', index=False)
#x.to_csv('test.csv')
with open("globaldict.json", "r") as read_file: globaldict = json.load(read_file)
createCSV()
fixData()
| import urllib.request, json
import time
import csv
from scipy import spatial
from numpy import dot
from numpy.linalg import norm
import pandas as pd
import numpy as np
from sklearn import preprocessing
# creates csv from globaldictionary and list of nuts
def createCSV():
csvfile = open("basicdata.tsv", "w")
global globaldict
thisline=f'code|level|name|nuts0|nuts1|nuts2|pop3|pop2|pop1|pop0|density|fertility|popchange|womenratio|gdppps|gva|medianage\n'
csvfile.write(thisline)
fileHandle = open('nutsrelations.psv', 'r')
for line in fileHandle:
fields = line.split('|')
# RS|REPUBLIKA SRBIJA /РЕПУБЛИКА СРБИЈА|0|RS|NUTS1|NUTS2|NUTS3
code=fields[0]
name=fields[1]
level=fields[2]
nuts0=fields[3]
nuts1=fields[4]
nuts2=fields[5]
if (level == "0"):
pass
try:
# get dictionaries for this area and its ancestors
dictionary0 = globaldict[code]
# data about this nuts
pop3 = ""
pop2 = ""
pop1 = ""
pop0 = dictionary0.get('population2019','N/A')
density = dictionary0.get('density2018_nuts3','N/A')
fertility = dictionary0.get('fertility2018_nuts3', 'N/A')
popchange = dictionary0.get('populationchange2018_nuts3', 'N/A')
womenratio = dictionary0.get('womenper100men2019_nuts3', 'N/A')
gdppps = dictionary0.get('gdpPps2017_nuts3', 'N/A')
gva = dictionary0.get('gva2017basicprices_nuts3', 'N/A')
medianage = dictionary0.get('medianage2019_nuts3', 'N/A')
# data about containing nuts - no container nuts
#thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n'
#csvfile.write(thisline)
except Exception:
# DO SOMETHING ABOIUT MISSING DATA
thisline = f'{code}|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR\n'
csvfile.write(thisline)
elif (level == "1"):
pass
try:
# get dictionaries for this area and its ancestors
dictionary0 = globaldict[nuts0]
dictionary1 = globaldict[code]
# data about this nuts
pop3 = ""
pop2 = ""
pop1 = dictionary1.get('population2019_nuts3','N/A')
density = dictionary1.get('density2018_nuts3','N/A')
fertility = dictionary1.get('fertility2018_nuts3', 'N/A')
popchange = dictionary1.get('populationchange2018_nuts3', 'N/A')
womenratio = dictionary1.get('womenper100men2019_nuts3', 'N/A')
gdppps = dictionary1.get('gdpPps2017_nuts3', 'N/A')
gva = dictionary1.get('gva2017basicprices_nuts3', 'N/A')
medianage = dictionary1.get('medianage2019_nuts3', 'N/A')
# data about containing nuts
pop0 = dictionary0.get('population2019','N/A')
#thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n'
#csvfile.write(thisline)
except Exception:
# DO SOMETHING ABOIUT MISSING DATA
thisline = f'{code}|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR\n'
csvfile.write(thisline)
elif (level == "2"):
pass
try:
# get dictionaries for this area and its ancestors
dictionary0 = globaldict[nuts0]
dictionary1 = globaldict[nuts1]
dictionary2 = globaldict[code]
# data about this nuts
pop3 = ""
pop2 = dictionary2.get('population2019_nuts3','N/A')
density = dictionary2.get('density2018_nuts3','N/A')
fertility = dictionary2.get('fertility2018_nuts3', 'N/A')
popchange = dictionary2.get('populationchange2018_nuts3', 'N/A')
womenratio = dictionary2.get('womenper100men2019_nuts3', 'N/A')
gdppps = dictionary2.get('gdpPps2017_nuts3', 'N/A')
gva = dictionary2.get('gva2017basicprices_nuts3', 'N/A')
medianage = dictionary2.get('medianage2019_nuts3', 'N/A')
# data about containing nuts
pop1 = dictionary1.get('population2019', 'N/A')
pop0 = dictionary0.get('population2019','N/A')
#thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n'
#csvfile.write(thisline)
except Exception:
# DO SOMETHING ABOIUT MISSING DATA
thisline = f'{code}|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR\n'
csvfile.write(thisline)
elif (level=="3"):
try:
# get dictionaries for this area and its ancestors
dictionary3 = globaldict[code]
dictionary0 = globaldict[nuts0]
dictionary1 = globaldict[nuts1]
dictionary2 = globaldict[nuts2]
# data about this nuts
pop3 = dictionary3.get('population2019_nuts3','N/A')
density = dictionary3.get('density2018_nuts3','N/A')
fertility = dictionary3.get('fertility2018_nuts3', 'N/A')
popchange = dictionary3.get('populationchange2018_nuts3', 'N/A')
womenratio = dictionary3.get('womenper100men2019_nuts3', 'N/A')
gdppps = dictionary3.get('gdpPps2017_nuts3', 'N/A')
gva = dictionary3.get('gva2017basicprices_nuts3', 'N/A')
medianage = dictionary3.get('medianage2019_nuts3', 'N/A')
# data about containing nuts
pop2 = dictionary2.get('population2019', 'N/A')
pop1 = dictionary1.get('population2019', 'N/A')
pop0 = dictionary0.get('population2019','N/A')
thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n'
csvfile.write(thisline)
except Exception:
# DO SOMETHING ABOIUT MISSING DATA
thisline = f'{code}|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR|ERROR\n'
print(thisline)
# csvfile.write(thisline)
else:
# Error?
print("Level does not exist")
fileHandle.close()
csvfile.close()
def getOrCalculate(valuename, dictionary, dictionary3, dictionary2, dictionary1, dictionary0, method):
# try and get it from level-3 dictionary
try:
val = dictionary[valuename]
return val
except Exception:
if (method=='globalmean'):
# calculate average from entire column
df[valuename] = pd.to_numeric(df[valuename], errors='coerce')
return df[valuename].mean()
elif (method=='copy'):
pass
else:
print("in else")
pass
def fixData():
# data fixes
df = pd.read_csv('basicdata.tsv', sep='|', header='infer')
# df = df.replace('N/A',np.NaN)
# df = df.replace('NONE',np.NaN)
df['gdppps'] = pd.to_numeric(df['gdppps'], errors='coerce')
df['gdppps'] = df['gdppps'].fillna(df['gdppps'].mean())
df['gva'] = pd.to_numeric(df['gva'], errors='coerce')
df['gva'] = df['gva'].fillna(df['gva'].mean())
df['medianage'] = pd.to_numeric(df['medianage'], errors='coerce')
df['medianage'] = df['medianage'].fillna(df['medianage'].mean())
df['womenratio'] = pd.to_numeric(df['womenratio'], errors='coerce')
df['womenratio'] = df['womenratio'].fillna(df['womenratio'].mean())
# TODO this is wrong - needs fixing in createCSV because population should be an average of the container
df['pop2'] = pd.to_numeric(df['pop2'], errors='coerce')
df['pop2'] = df['pop2'].fillna(df['womenratio'].mean())
df['pop1'] = pd.to_numeric(df['womenratio'], errors='coerce')
df['pop1'] = df['womenratio'].fillna(df['womenratio'].mean())
# DON'T NORMALISE THESE COLUMNS
# code|level|name|nuts0|nuts1|nuts2|
# NORMALISE THESE COLUMNS
# pop3|pop2|pop1|pop0|density|fertility|popchange|womenratio|gdppps|gva|medianage
# Save non-normalised data
df.to_csv('basicdata.tsv', sep='|', index=False)
for columnname in ['pop3','pop2','pop1', 'pop0', 'density', 'fertility', 'popchange', 'womenratio', 'gdppps', 'gva', 'medianage']:
df[columnname] = pd.to_numeric(df[columnname], errors='coerce')
x = df[[columnname]].values.astype(float)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df[columnname] = x_scaled
# Save normalised data
df.to_csv('basicdataNORM.tsv', sep='|', index=False)
#x.to_csv('test.csv')
with open("globaldict.json", "r") as read_file: globaldict = json.load(read_file)
createCSV()
fixData() | en | 0.565689 | # creates csv from globaldictionary and list of nuts # RS|REPUBLIKA SRBIJA /РЕПУБЛИКА СРБИЈА|0|RS|NUTS1|NUTS2|NUTS3 # get dictionaries for this area and its ancestors # data about this nuts # data about containing nuts - no container nuts #thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n' #csvfile.write(thisline) # DO SOMETHING ABOIUT MISSING DATA # get dictionaries for this area and its ancestors # data about this nuts # data about containing nuts #thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n' #csvfile.write(thisline) # DO SOMETHING ABOIUT MISSING DATA # get dictionaries for this area and its ancestors # data about this nuts # data about containing nuts #thisline = f'{code}|{level}|"{name}"|{nuts0}|{nuts1}|{nuts2}|{pop3}|{pop2}|{pop1}|{pop0}|{density}|{fertility}|{popchange}|{womenratio}|{gdppps}|{gva}|{medianage}\n' #csvfile.write(thisline) # DO SOMETHING ABOIUT MISSING DATA # get dictionaries for this area and its ancestors # data about this nuts # data about containing nuts # DO SOMETHING ABOIUT MISSING DATA # csvfile.write(thisline) # Error? # try and get it from level-3 dictionary # calculate average from entire column # data fixes # df = df.replace('N/A',np.NaN) # df = df.replace('NONE',np.NaN) # TODO this is wrong - needs fixing in createCSV because population should be an average of the container # DON'T NORMALISE THESE COLUMNS # code|level|name|nuts0|nuts1|nuts2| # NORMALISE THESE COLUMNS # pop3|pop2|pop1|pop0|density|fertility|popchange|womenratio|gdppps|gva|medianage # Save non-normalised data # Save normalised data #x.to_csv('test.csv') | 2.892831 | 3 |
brownbags/management/commands/load_csv.py | openkawasaki/brownbag-django | 2 | 6621254 | <reponame>openkawasaki/brownbag-django
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
import environ
#import config.settings.local as settings
import traceback
import logging
logger = logging.getLogger('init')
import pandas as pd
import numpy as np
import math
import copy
#-------------------------------------------
def filesave(filename, data_list):
try:
basedir = os.path.dirname(os.path.abspath(__file__))
outdir = os.path.join(basedir, "csvdata")
os.makedirs(outdir, exist_ok=True)
outputname = os.path.join(outdir, filename)
utils.write_dict_csv(outputname, data_list)
except Exception as e:
logger.error('filesave() error = {}'.format(e))
traceback.print_exc()
raise Exception(e)
#-------------------------------------------
def main(filename):
try:
pass
except:
logger.error("error end")
#-------------------------------------------
# TestCase
#-------------------------------------------
from django.core.management import call_command
from django.test import TestCase
# https://django-testing-docs.readthedocs.io/en/latest/basic_unittests.html
class CommandsTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_mycommand(self):
" Test my custom command."
args = []
opts = {}
call_command('init_facility', *args, **opts)
#-------------------------------------------
# Command
#-------------------------------------------
from django.core.management.base import BaseCommand, CommandError
#-------------------------------------------
class Command(BaseCommand):
"""
コマンド:init_facilities
"""
#args = '<param_1 param_2 ...>'
help = 'HELP'
# コマンドライン引数を指定
#def add_arguments(self, parser):
# parser.add_argument('hoge', nargs='+', type=int)
def handle(self, *args, **options):
try:
hoges = options['hoge']
for hoge in hoges:
main(hoge)
except:
logger.error("error end")
"""
#-------------------------------------------
if __name__ == "__main__":
argv = sys.argv # コマンドライン引数を格納したリストの取得
argc = len(argv) # 引数の個数
""" | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
import environ
#import config.settings.local as settings
import traceback
import logging
logger = logging.getLogger('init')
import pandas as pd
import numpy as np
import math
import copy
#-------------------------------------------
def filesave(filename, data_list):
try:
basedir = os.path.dirname(os.path.abspath(__file__))
outdir = os.path.join(basedir, "csvdata")
os.makedirs(outdir, exist_ok=True)
outputname = os.path.join(outdir, filename)
utils.write_dict_csv(outputname, data_list)
except Exception as e:
logger.error('filesave() error = {}'.format(e))
traceback.print_exc()
raise Exception(e)
#-------------------------------------------
def main(filename):
try:
pass
except:
logger.error("error end")
#-------------------------------------------
# TestCase
#-------------------------------------------
from django.core.management import call_command
from django.test import TestCase
# https://django-testing-docs.readthedocs.io/en/latest/basic_unittests.html
class CommandsTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_mycommand(self):
" Test my custom command."
args = []
opts = {}
call_command('init_facility', *args, **opts)
#-------------------------------------------
# Command
#-------------------------------------------
from django.core.management.base import BaseCommand, CommandError
#-------------------------------------------
class Command(BaseCommand):
"""
コマンド:init_facilities
"""
#args = '<param_1 param_2 ...>'
help = 'HELP'
# コマンドライン引数を指定
#def add_arguments(self, parser):
# parser.add_argument('hoge', nargs='+', type=int)
def handle(self, *args, **options):
try:
hoges = options['hoge']
for hoge in hoges:
main(hoge)
except:
logger.error("error end")
"""
#-------------------------------------------
if __name__ == "__main__":
argv = sys.argv # コマンドライン引数を格納したリストの取得
argc = len(argv) # 引数の個数
""" | ja | 0.158192 | #!/usr/bin/env python # -*- coding: utf-8 -*- #import config.settings.local as settings #------------------------------------------- #------------------------------------------- #------------------------------------------- # TestCase #------------------------------------------- # https://django-testing-docs.readthedocs.io/en/latest/basic_unittests.html #------------------------------------------- # Command #------------------------------------------- #------------------------------------------- コマンド:init_facilities #args = '<param_1 param_2 ...>' # コマンドライン引数を指定 #def add_arguments(self, parser): # parser.add_argument('hoge', nargs='+', type=int) #------------------------------------------- if __name__ == "__main__": argv = sys.argv # コマンドライン引数を格納したリストの取得 argc = len(argv) # 引数の個数 | 2.222958 | 2 |
nipo/tests/populate.py | Ahmad4z/nipo | 0 | 6621255 | #This file will eventually be removed. It is here to test functionality that interfaces with objects that havent been created or functionality that requires a populated DB (for this we use nipo_test which is a mirror of nipo but whose contents are used for testing purposes)
#Documentation for creating an instance of the mapped classes is very well done at https://docs.sqlalchemy.org/en/latest/orm/tutorial.html#create-an-instance-of-the-mapped-class
from nipo import test_session, get_logger
from nipo.db.schema import Module, Student, Course, Venue, User, PrivilegeLevel
from nipo.attendance import ModuleAttendance, get_student_attendance
from datetime import datetime
import random
logger = get_logger("nipo_populate")
session = test_session
session_engine = session.get_bind()
conn_details = session_engine.url
sd = [datetime(2029,4,30,10,30)]
sd.append(datetime(2029,5,7,10,30))
sd.append(datetime(2029,5,8,10,30))
sd.append(datetime(2029,5,9,10,30))
sd.append(datetime(2029,5,10,10,30))
sd.append(datetime(2029,5,11,10,30))
def populate_testdb():
'''Populate the test db with test info'''
logger.info("Populating DB >>{}<< with dummy data for integration testing".format(conn_details))
venue1 = Venue(code = "H7009", name = "Hall 7 Rm 9", capacity = 20)
venue2 = Venue(code = "EMB101", name = "Eng MAin Building 101" , capacity = 30)
venue3 = Venue(code = "4A", name = "Form 4A", capacity = 60)
venue4 = Venue(code = "SHAC", name = "<NAME>-Cziffra" , capacity = 35 )
venue5 = Venue(code = "PLMM", name = "<NAME>" , capacity = 40)
venues = [venue1, venue2, venue3, venue4, venue5]
course1 = Course(uid = "TIE18", name = "Telecommunications and Information Engineering 2018")
course2 = Course(uid = "EMT18", name = "Mechatronic Engineering 2018")
course3 = Course(uid = "EPE17", name = "Electrical Power Systems Engineering 2018")
course4 = Course(uid = "CSS17", name = "Computer Science 2018")
courses = [course1, course2, course3, course4]
student1 = Student(name = "<NAME>", course_uid ="TIE18" )
student2 = Student(name = "<NAME>", course_uid ="TIE18" )
student3 = Student(name = "<NAME>", course_uid ="TIE18" )
student4 = Student(name = "<NAME>", course_uid ="TIE18" )
student5 = Student(name = "<NAME>", course_uid ="TIE18" )
student6 = Student(name = "<NAME>", course_uid ="TIE18" )
student7 = Student(name = "<NAME>", course_uid ="TIE18" )
student8 = Student(name = "<NAME>", course_uid ="TIE18" )
student9 = Student(name = "<NAME>", course_uid ="TIE18" )
student10 = Student(name = "<NAME>", course_uid ="TIE18" )
students = [student1, student2, student3, student4, student5, student6, \
student7, student8, student9, student10]
module1 = Module(code = "ETI001", name = "Telecommunications", venue_code ="H7009" , course_code = "TIE18",attendance = None)
module2 = Module(code = "ETI002", name = "Information systems", venue_code = "EMB101", course_code = "TIE18",attendance = None)
module3 = Module(code = "ETI003", name = "Making phonecalls", venue_code ="4A" , course_code = "TIE18",attendance = None)
module4 = Module(code = "ETI004", name = "Receiving phonecalls", venue_code = "SHAC", course_code = "TIE18",attendance = None)
module5 = Module(code = "ETI005", name = "Writing phone reviews", venue_code = "PLMM", course_code = "TIE18",attendance = None)
modules = [module1, module2, module3, module4, module5]
admin_user = User(username = "mcflyhalf", email = '<EMAIL>', name= '<NAME>', password_hash='<PASSWORD>', authenticated= False, active = True, privilege=PrivilegeLevel.admin.name)
admin_user.set_password('<PASSWORD>')
logger.debug("Created most dummy data for DB >>{}<< for integration testing. Attempting to persist the data...".format(conn_details)) #TODO:conn_details gives too much info. reduce to only give dbname
for venue in venues:
session.add(venue)
for course in courses:
session.add(course)
for student in students:
session.add(student)
for module in modules:
session.add(module)
session.add(admin_user)
session.commit()
logger.info("Persisted most dummy data for DB >>{}<< for integration testing. ".format(conn_details))
#------------------STUDENT USER CREATION------------------#
logger.debug("Creating student user dummy data for DB >>{}<< for integration testing.".format(conn_details))
students = session.query(Student).limit(20).all()
users = []
for student in students:
stud_fname = student.name.split()[0].lower()
stud_lname = student.name.split()[1].lower()
stud_username = stud_fname[0]+stud_lname
stud_email = stud_username + '@n<EMAIL>'
stud_privilege = PrivilegeLevel.student.name
stud_user = User(username= stud_username,\
email= stud_email,\
name= student.name,\
privilege= stud_privilege,
active= True,\
authenticated= False,\
student_id= student.id)
stud_user.set_password('<PASSWORD>')
session.add(stud_user)
logger.debug("Created user dummy data for DB >>{}<< for integration testing. Attempting to persist the data...".format(conn_details))
session.commit()
logger.info("Persisted all dummy data for DB >>{}<< for integration testing. ".format(conn_details))
#------------------STAFF USER CREATION------------------#
logger.debug("Creating staff users for DB >>{}<< for integration testing.".format(conn_details))
staff_users = []
staff_user = User(username = "stafflyhalf", email = '<EMAIL>', name= 'Staff Flyhalf', password_hash='<PASSWORD>', authenticated= False, active = True, privilege=PrivilegeLevel.staff.name)
staff_user.set_password('<PASSWORD>')
staff_users.append(staff_user)
staff_user = User(username = "starflyhalf", email = '<EMAIL>', name= 'Star Flyhalf', password_hash='<PASSWORD>', authenticated= False, active = True, privilege=PrivilegeLevel.staff.name)
staff_user.set_password('<PASSWORD>')
staff_users.append(staff_user)
for user in staff_users:
session.add(user)
session.commit()
logger.debug("Created staff users dummy data for DB >>{}<< for integration testing. Attempting to persist the data...".format(conn_details))
session.commit()
logger.info("Persisted all dummy data for DB >>{}<< for integration testing. ".format(conn_details))
module_code = "ETI001"
student_id = 6
logger.info("Creating dummy attendance record for module >>{}<<".format(module_code))
mod = ModuleAttendance(module_code,session)
logger.debug("On creation, attendance record for {} is \n {}".format(module_code,mod.getAttendance()))
for d in sd:
mod.createClassSession(d)
logger.debug("On creation of class session, attendance record for {} is \n {}".format(module_code ,mod.getAttendance()))
for d in sd:
for studID in range(len(students)):
mod.updateAttendance(studID+1, d, present=random.choice([True,True,False,True,True,True])) #Skew attendance towards presence
logger.debug("After marking some students present, attendance record is \n {}".format(mod.getAttendance()))
att = get_student_attendance(4,mod.getAttendance())
logger.debug("The attendance for 1 Student is :\n {}".format(att))
logger.info("Created dummy attendance record for module >>{}<<".format(module_code)) | #This file will eventually be removed. It is here to test functionality that interfaces with objects that havent been created or functionality that requires a populated DB (for this we use nipo_test which is a mirror of nipo but whose contents are used for testing purposes)
#Documentation for creating an instance of the mapped classes is very well done at https://docs.sqlalchemy.org/en/latest/orm/tutorial.html#create-an-instance-of-the-mapped-class
from nipo import test_session, get_logger
from nipo.db.schema import Module, Student, Course, Venue, User, PrivilegeLevel
from nipo.attendance import ModuleAttendance, get_student_attendance
from datetime import datetime
import random
logger = get_logger("nipo_populate")
session = test_session
session_engine = session.get_bind()
conn_details = session_engine.url
sd = [datetime(2029,4,30,10,30)]
sd.append(datetime(2029,5,7,10,30))
sd.append(datetime(2029,5,8,10,30))
sd.append(datetime(2029,5,9,10,30))
sd.append(datetime(2029,5,10,10,30))
sd.append(datetime(2029,5,11,10,30))
def populate_testdb():
'''Populate the test db with test info'''
logger.info("Populating DB >>{}<< with dummy data for integration testing".format(conn_details))
venue1 = Venue(code = "H7009", name = "Hall 7 Rm 9", capacity = 20)
venue2 = Venue(code = "EMB101", name = "Eng MAin Building 101" , capacity = 30)
venue3 = Venue(code = "4A", name = "Form 4A", capacity = 60)
venue4 = Venue(code = "SHAC", name = "<NAME>-Cziffra" , capacity = 35 )
venue5 = Venue(code = "PLMM", name = "<NAME>" , capacity = 40)
venues = [venue1, venue2, venue3, venue4, venue5]
course1 = Course(uid = "TIE18", name = "Telecommunications and Information Engineering 2018")
course2 = Course(uid = "EMT18", name = "Mechatronic Engineering 2018")
course3 = Course(uid = "EPE17", name = "Electrical Power Systems Engineering 2018")
course4 = Course(uid = "CSS17", name = "Computer Science 2018")
courses = [course1, course2, course3, course4]
student1 = Student(name = "<NAME>", course_uid ="TIE18" )
student2 = Student(name = "<NAME>", course_uid ="TIE18" )
student3 = Student(name = "<NAME>", course_uid ="TIE18" )
student4 = Student(name = "<NAME>", course_uid ="TIE18" )
student5 = Student(name = "<NAME>", course_uid ="TIE18" )
student6 = Student(name = "<NAME>", course_uid ="TIE18" )
student7 = Student(name = "<NAME>", course_uid ="TIE18" )
student8 = Student(name = "<NAME>", course_uid ="TIE18" )
student9 = Student(name = "<NAME>", course_uid ="TIE18" )
student10 = Student(name = "<NAME>", course_uid ="TIE18" )
students = [student1, student2, student3, student4, student5, student6, \
student7, student8, student9, student10]
module1 = Module(code = "ETI001", name = "Telecommunications", venue_code ="H7009" , course_code = "TIE18",attendance = None)
module2 = Module(code = "ETI002", name = "Information systems", venue_code = "EMB101", course_code = "TIE18",attendance = None)
module3 = Module(code = "ETI003", name = "Making phonecalls", venue_code ="4A" , course_code = "TIE18",attendance = None)
module4 = Module(code = "ETI004", name = "Receiving phonecalls", venue_code = "SHAC", course_code = "TIE18",attendance = None)
module5 = Module(code = "ETI005", name = "Writing phone reviews", venue_code = "PLMM", course_code = "TIE18",attendance = None)
modules = [module1, module2, module3, module4, module5]
admin_user = User(username = "mcflyhalf", email = '<EMAIL>', name= '<NAME>', password_hash='<PASSWORD>', authenticated= False, active = True, privilege=PrivilegeLevel.admin.name)
admin_user.set_password('<PASSWORD>')
logger.debug("Created most dummy data for DB >>{}<< for integration testing. Attempting to persist the data...".format(conn_details)) #TODO:conn_details gives too much info. reduce to only give dbname
for venue in venues:
session.add(venue)
for course in courses:
session.add(course)
for student in students:
session.add(student)
for module in modules:
session.add(module)
session.add(admin_user)
session.commit()
logger.info("Persisted most dummy data for DB >>{}<< for integration testing. ".format(conn_details))
#------------------STUDENT USER CREATION------------------#
logger.debug("Creating student user dummy data for DB >>{}<< for integration testing.".format(conn_details))
students = session.query(Student).limit(20).all()
users = []
for student in students:
stud_fname = student.name.split()[0].lower()
stud_lname = student.name.split()[1].lower()
stud_username = stud_fname[0]+stud_lname
stud_email = stud_username + '@n<EMAIL>'
stud_privilege = PrivilegeLevel.student.name
stud_user = User(username= stud_username,\
email= stud_email,\
name= student.name,\
privilege= stud_privilege,
active= True,\
authenticated= False,\
student_id= student.id)
stud_user.set_password('<PASSWORD>')
session.add(stud_user)
logger.debug("Created user dummy data for DB >>{}<< for integration testing. Attempting to persist the data...".format(conn_details))
session.commit()
logger.info("Persisted all dummy data for DB >>{}<< for integration testing. ".format(conn_details))
#------------------STAFF USER CREATION------------------#
logger.debug("Creating staff users for DB >>{}<< for integration testing.".format(conn_details))
staff_users = []
staff_user = User(username = "stafflyhalf", email = '<EMAIL>', name= 'Staff Flyhalf', password_hash='<PASSWORD>', authenticated= False, active = True, privilege=PrivilegeLevel.staff.name)
staff_user.set_password('<PASSWORD>')
staff_users.append(staff_user)
staff_user = User(username = "starflyhalf", email = '<EMAIL>', name= 'Star Flyhalf', password_hash='<PASSWORD>', authenticated= False, active = True, privilege=PrivilegeLevel.staff.name)
staff_user.set_password('<PASSWORD>')
staff_users.append(staff_user)
for user in staff_users:
session.add(user)
session.commit()
logger.debug("Created staff users dummy data for DB >>{}<< for integration testing. Attempting to persist the data...".format(conn_details))
session.commit()
logger.info("Persisted all dummy data for DB >>{}<< for integration testing. ".format(conn_details))
module_code = "ETI001"
student_id = 6
logger.info("Creating dummy attendance record for module >>{}<<".format(module_code))
mod = ModuleAttendance(module_code,session)
logger.debug("On creation, attendance record for {} is \n {}".format(module_code,mod.getAttendance()))
for d in sd:
mod.createClassSession(d)
logger.debug("On creation of class session, attendance record for {} is \n {}".format(module_code ,mod.getAttendance()))
for d in sd:
for studID in range(len(students)):
mod.updateAttendance(studID+1, d, present=random.choice([True,True,False,True,True,True])) #Skew attendance towards presence
logger.debug("After marking some students present, attendance record is \n {}".format(mod.getAttendance()))
att = get_student_attendance(4,mod.getAttendance())
logger.debug("The attendance for 1 Student is :\n {}".format(att))
logger.info("Created dummy attendance record for module >>{}<<".format(module_code)) | en | 0.82654 | #This file will eventually be removed. It is here to test functionality that interfaces with objects that havent been created or functionality that requires a populated DB (for this we use nipo_test which is a mirror of nipo but whose contents are used for testing purposes) #Documentation for creating an instance of the mapped classes is very well done at https://docs.sqlalchemy.org/en/latest/orm/tutorial.html#create-an-instance-of-the-mapped-class Populate the test db with test info #TODO:conn_details gives too much info. reduce to only give dbname #------------------STUDENT USER CREATION------------------# #------------------STAFF USER CREATION------------------# #Skew attendance towards presence | 2.463476 | 2 |
HOUDINI/Library/Utils/NNUtils.py | CTPLab/AutoCI | 5 | 6621256 | <gh_stars>1-10
import json
import torch
import torch.nn as nn
from typing import Dict, Tuple, List
from HOUDINI.Library.NN import NetMLP, NetDO, NetCNN
def get_nn_from_params_dict(uf: Dict) -> Tuple[nn.Module, List]:
"""Instantiate the unkown function (uf) required
by the high-order functions with a neural network
Args:
uf: the dict of unknown function storing
the parameters for the nn candidate
"""
new_nn = None
if uf['type'] == 'MLP':
new_nn = NetMLP(uf['name'],
uf['input_dim'],
uf['output_dim'],
uf['dt_name'])
elif uf['type'] == 'DO':
new_nn = NetDO(uf['name'],
uf['input_dim'],
uf['dt_name'])
elif uf['type'] == 'CONV':
new_nn = NetCNN(uf['name'],
uf['input_dim'],
uf['output_dim'],)
else:
raise NotImplementedError()
if 'initialize_from' in uf and uf['initialize_from'] is not None:
new_nn.load(uf['initialize_from'])
if torch.cuda.is_available():
new_nn.cuda()
new_nn.params_dict = uf
c_trainable_parameters = list(new_nn.parameters())
return new_nn, c_trainable_parameters
def create_and_load(directory: str,
name: str,
new_name: str = None) -> nn.Module:
"""Instantiate an unkown function (uf) required
by the high-order functions with a trained neural network
Args:
directory: directory to the saved weights of an NN
name: name of the unknown function
new_name: the new name of the unknown function
"""
if new_name is None:
new_name = name
with open('{}/{}.json'.format(directory, name)) as json_data:
params_dict = json.load(json_data)
params_dict['name'] = new_name
if params_dict['output_activation'] == 'None':
params_dict['output_activation'] = None
elif params_dict['output_activation'] == 'sigmoid':
params_dict['output_activation'] = torch.sigmoid
elif params_dict['output_activation'] == 'softmax':
params_dict['output_activation'] = nn.Softmax(dim=1)
else:
raise NotImplementedError()
new_fn, _ = get_nn_from_params_dict(params_dict)
new_fn.load('{}/{}.pth'.format(directory, name))
new_fn.eval()
return new_fn
| import json
import torch
import torch.nn as nn
from typing import Dict, Tuple, List
from HOUDINI.Library.NN import NetMLP, NetDO, NetCNN
def get_nn_from_params_dict(uf: Dict) -> Tuple[nn.Module, List]:
"""Instantiate the unkown function (uf) required
by the high-order functions with a neural network
Args:
uf: the dict of unknown function storing
the parameters for the nn candidate
"""
new_nn = None
if uf['type'] == 'MLP':
new_nn = NetMLP(uf['name'],
uf['input_dim'],
uf['output_dim'],
uf['dt_name'])
elif uf['type'] == 'DO':
new_nn = NetDO(uf['name'],
uf['input_dim'],
uf['dt_name'])
elif uf['type'] == 'CONV':
new_nn = NetCNN(uf['name'],
uf['input_dim'],
uf['output_dim'],)
else:
raise NotImplementedError()
if 'initialize_from' in uf and uf['initialize_from'] is not None:
new_nn.load(uf['initialize_from'])
if torch.cuda.is_available():
new_nn.cuda()
new_nn.params_dict = uf
c_trainable_parameters = list(new_nn.parameters())
return new_nn, c_trainable_parameters
def create_and_load(directory: str,
name: str,
new_name: str = None) -> nn.Module:
"""Instantiate an unkown function (uf) required
by the high-order functions with a trained neural network
Args:
directory: directory to the saved weights of an NN
name: name of the unknown function
new_name: the new name of the unknown function
"""
if new_name is None:
new_name = name
with open('{}/{}.json'.format(directory, name)) as json_data:
params_dict = json.load(json_data)
params_dict['name'] = new_name
if params_dict['output_activation'] == 'None':
params_dict['output_activation'] = None
elif params_dict['output_activation'] == 'sigmoid':
params_dict['output_activation'] = torch.sigmoid
elif params_dict['output_activation'] == 'softmax':
params_dict['output_activation'] = nn.Softmax(dim=1)
else:
raise NotImplementedError()
new_fn, _ = get_nn_from_params_dict(params_dict)
new_fn.load('{}/{}.pth'.format(directory, name))
new_fn.eval()
return new_fn | en | 0.714178 | Instantiate the unkown function (uf) required by the high-order functions with a neural network Args: uf: the dict of unknown function storing the parameters for the nn candidate Instantiate an unkown function (uf) required by the high-order functions with a trained neural network Args: directory: directory to the saved weights of an NN name: name of the unknown function new_name: the new name of the unknown function | 2.473825 | 2 |
catkin_ws/src/machine_learning/src/fotos.py | EnzoBassano/Software | 0 | 6621257 | #!/usr/bin/env python
import rospy #importar ros para python
from sensor_msgs.msg import Image
import cv2 as cv
from cv_bridge import CvBridge
from std_msgs.msg import String, Int32 # importar mensajes de ROS tipo String y tipo Int32
from geometry_msgs.msg import Twist # importar mensajes de ROS tipo geometry / Twist
class Template(object):
def __init__(self, args):
self.contador=0
super(Template, self).__init__()
self.args = args
self.subscriber = rospy.Subscriber("/duckiebot/camera_node/image/rect",Image,self.callback)
self.bridge = CvBridge()
def callback(self,msg):
image = self.bridge.imgmsg_to_cv2(msg,"bgr8")
filename = str(rospy.get_time()) + ".jpg"
if (self.contador%20==0):
cv.imwrite("/home/duckiebot/patos/"+filename,image)
self.contador+=1
#def publicar(self):
#def callback(self,msg):
def main():
rospy.init_node('test') #creacion y registro del nodo!
obj = Template('args') # Crea un objeto del tipo Template, cuya definicion se encuentra arriba
#objeto.publicar() #llama al metodo publicar del objeto obj de tipo Template
rospy.spin() #funcion de ROS que evita que el programa termine - se debe usar en Subscribers
if __name__ =='__main__':
main()
| #!/usr/bin/env python
import rospy #importar ros para python
from sensor_msgs.msg import Image
import cv2 as cv
from cv_bridge import CvBridge
from std_msgs.msg import String, Int32 # importar mensajes de ROS tipo String y tipo Int32
from geometry_msgs.msg import Twist # importar mensajes de ROS tipo geometry / Twist
class Template(object):
def __init__(self, args):
self.contador=0
super(Template, self).__init__()
self.args = args
self.subscriber = rospy.Subscriber("/duckiebot/camera_node/image/rect",Image,self.callback)
self.bridge = CvBridge()
def callback(self,msg):
image = self.bridge.imgmsg_to_cv2(msg,"bgr8")
filename = str(rospy.get_time()) + ".jpg"
if (self.contador%20==0):
cv.imwrite("/home/duckiebot/patos/"+filename,image)
self.contador+=1
#def publicar(self):
#def callback(self,msg):
def main():
rospy.init_node('test') #creacion y registro del nodo!
obj = Template('args') # Crea un objeto del tipo Template, cuya definicion se encuentra arriba
#objeto.publicar() #llama al metodo publicar del objeto obj de tipo Template
rospy.spin() #funcion de ROS que evita que el programa termine - se debe usar en Subscribers
if __name__ =='__main__':
main()
| es | 0.7933 | #!/usr/bin/env python #importar ros para python # importar mensajes de ROS tipo String y tipo Int32 # importar mensajes de ROS tipo geometry / Twist #def publicar(self): #def callback(self,msg): #creacion y registro del nodo! # Crea un objeto del tipo Template, cuya definicion se encuentra arriba #objeto.publicar() #llama al metodo publicar del objeto obj de tipo Template #funcion de ROS que evita que el programa termine - se debe usar en Subscribers | 2.657505 | 3 |
curso_hector/17-funcionalidades-avanzadas/funcion_map.py | corahama/python | 1 | 6621258 | <gh_stars>1-10
a = [1,2,3,4,5]
b = [6,7,8,9,10]
c = [11,12,13,14,15]
# print(list(map(lambda x,y,z: x*y*z, a,b,c)))
class Persona:
def __init__(self, nombre, edad):
self.nombre = nombre
self.edad = edad
def __str__(self):
return "{} de {} años.".format(self.nombre, self.edad)
personas = (
Persona("Fernando", 22),
Persona("Laura", 21),
Persona("Axel", 17),
Persona("Angel", 14)
)
print(list(map(lambda persona:Persona(persona.nombre,persona.edad+1), personas)))
| a = [1,2,3,4,5]
b = [6,7,8,9,10]
c = [11,12,13,14,15]
# print(list(map(lambda x,y,z: x*y*z, a,b,c)))
class Persona:
def __init__(self, nombre, edad):
self.nombre = nombre
self.edad = edad
def __str__(self):
return "{} de {} años.".format(self.nombre, self.edad)
personas = (
Persona("Fernando", 22),
Persona("Laura", 21),
Persona("Axel", 17),
Persona("Angel", 14)
)
print(list(map(lambda persona:Persona(persona.nombre,persona.edad+1), personas))) | en | 0.07262 | # print(list(map(lambda x,y,z: x*y*z, a,b,c))) | 3.601932 | 4 |
1046.py | OmangRawat/Leetcode | 0 | 6621259 | <gh_stars>0
"""
---> Last Stone Weight
---> Easy
"""
import bisect
import heapq
class Solution:
def lastStoneWeight(self, stones) -> int:
heap = [-x for x in stones]
heapq.heapify(heap)
while len(heap) > 1 and heap[0] != 0:
heapq.heappush(heap, heapq.heappop(heap) - heapq.heappop(heap))
print(heap)
return -heap[0]
def lastStoneWeight_sol2(self, stones) -> int:
stones.sort()
while len(stones) > 1:
bisect.insort(stones, stones.pop() - stones.pop())
print(stones)
return stones[0]
in_stones = [2, 7, 4, 1, 8, 1]
a = Solution()
print(a.lastStoneWeight(in_stones))
print(a.lastStoneWeight_sol2(in_stones))
"""
Approach 1:
Make a min heap of negative o weights i.e. somehow same max heap, get the 2 top elements check for diff and append it
back if their is something remaining till len smaller than 1 and heap[0] != 0
Approach 2:
Use bisect.insort after sorting the array, take top 2 elements subtract and add it back, it will add the new element
properly so that the resultant array still remains sorted
Reference - https://leetcode.com/problems/last-stone-weight/discuss/294956/JavaC%2B%2BPython-Priority-Queue
"""
| """
---> Last Stone Weight
---> Easy
"""
import bisect
import heapq
class Solution:
def lastStoneWeight(self, stones) -> int:
heap = [-x for x in stones]
heapq.heapify(heap)
while len(heap) > 1 and heap[0] != 0:
heapq.heappush(heap, heapq.heappop(heap) - heapq.heappop(heap))
print(heap)
return -heap[0]
def lastStoneWeight_sol2(self, stones) -> int:
stones.sort()
while len(stones) > 1:
bisect.insort(stones, stones.pop() - stones.pop())
print(stones)
return stones[0]
in_stones = [2, 7, 4, 1, 8, 1]
a = Solution()
print(a.lastStoneWeight(in_stones))
print(a.lastStoneWeight_sol2(in_stones))
"""
Approach 1:
Make a min heap of negative o weights i.e. somehow same max heap, get the 2 top elements check for diff and append it
back if their is something remaining till len smaller than 1 and heap[0] != 0
Approach 2:
Use bisect.insort after sorting the array, take top 2 elements subtract and add it back, it will add the new element
properly so that the resultant array still remains sorted
Reference - https://leetcode.com/problems/last-stone-weight/discuss/294956/JavaC%2B%2BPython-Priority-Queue
""" | en | 0.762184 | ---> Last Stone Weight ---> Easy Approach 1: Make a min heap of negative o weights i.e. somehow same max heap, get the 2 top elements check for diff and append it back if their is something remaining till len smaller than 1 and heap[0] != 0 Approach 2: Use bisect.insort after sorting the array, take top 2 elements subtract and add it back, it will add the new element properly so that the resultant array still remains sorted Reference - https://leetcode.com/problems/last-stone-weight/discuss/294956/JavaC%2B%2BPython-Priority-Queue | 3.431826 | 3 |
spconv/pytorch/tables.py | xmyqsh/spconv | 0 | 6621260 | <filename>spconv/pytorch/tables.py<gh_stars>0
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.autograd import Function
#from torch.nn import Module
from spconv.pytorch.modules import SparseModule
from spconv.pytorch.core import SparseConvTensor
from typing import List
class JoinTable(SparseModule): # Module):
def forward(self, input: List[SparseConvTensor]):
output = SparseConvTensor(torch.cat([i.features for i in input], 1),
input[0].indices, input[0].spatial_shape,
input[0].batch_size, input[0].grid,
input[0].voxel_num, input[0].indice_dict)
output.benchmark_record = input[1].benchmark_record
output.thrust_allocator = input[1].thrust_allocator
return output
def input_spatial_size(self, out_size):
return out_size
class AddTable(SparseModule): # Module):
def forward(self, input: List[SparseConvTensor]):
output = SparseConvTensor(sum([i.features for i in input]),
input[0].indices, input[0].spatial_shape,
input[0].batch_size, input[0].grid,
input[0].voxel_num, input[0].indice_dict)
output.benchmark_record = input[1].benchmark_record
output.thrust_allocator = input[1].thrust_allocator
return output
def input_spatial_size(self, out_size):
return out_size
class ConcatTable(SparseModule): # Module):
def forward(self, input):
return [module(input) for module in self._modules.values()]
def add(self, module):
self._modules[str(len(self._modules))] = module
return self
def input_spatial_size(self, out_size):
return self._modules['0'].input_spatial_size(out_size)
| <filename>spconv/pytorch/tables.py<gh_stars>0
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.autograd import Function
#from torch.nn import Module
from spconv.pytorch.modules import SparseModule
from spconv.pytorch.core import SparseConvTensor
from typing import List
class JoinTable(SparseModule): # Module):
def forward(self, input: List[SparseConvTensor]):
output = SparseConvTensor(torch.cat([i.features for i in input], 1),
input[0].indices, input[0].spatial_shape,
input[0].batch_size, input[0].grid,
input[0].voxel_num, input[0].indice_dict)
output.benchmark_record = input[1].benchmark_record
output.thrust_allocator = input[1].thrust_allocator
return output
def input_spatial_size(self, out_size):
return out_size
class AddTable(SparseModule): # Module):
def forward(self, input: List[SparseConvTensor]):
output = SparseConvTensor(sum([i.features for i in input]),
input[0].indices, input[0].spatial_shape,
input[0].batch_size, input[0].grid,
input[0].voxel_num, input[0].indice_dict)
output.benchmark_record = input[1].benchmark_record
output.thrust_allocator = input[1].thrust_allocator
return output
def input_spatial_size(self, out_size):
return out_size
class ConcatTable(SparseModule): # Module):
def forward(self, input):
return [module(input) for module in self._modules.values()]
def add(self, module):
self._modules[str(len(self._modules))] = module
return self
def input_spatial_size(self, out_size):
return self._modules['0'].input_spatial_size(out_size)
| en | 0.80548 | # Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #from torch.nn import Module # Module): # Module): # Module): | 2.179085 | 2 |
python/testData/refactoring/introduceVariable/backslash.after.py | jnthn/intellij-community | 2 | 6621261 | def f(x):
a = x.foo.bar
return a.baz() | def f(x):
a = x.foo.bar
return a.baz() | none | 1 | 1.673176 | 2 | |
9_functions/10_documentingFunctions.py | qaidjohar/PythonCourse | 0 | 6621262 | <reponame>qaidjohar/PythonCourse
def exponent(num, power = 2):
"""This is the exponent documentation\n\nUsage: exponent(number, power)\nExample: exponent(5,2)."""
return num ** power
print(exponent.__doc__)
exponent() | def exponent(num, power = 2):
"""This is the exponent documentation\n\nUsage: exponent(number, power)\nExample: exponent(5,2)."""
return num ** power
print(exponent.__doc__)
exponent() | en | 0.303624 | This is the exponent documentation\n\nUsage: exponent(number, power)\nExample: exponent(5,2). | 3.738832 | 4 |
python/Latin Numerals To English.py | TechieHelper/Codewars | 0 | 6621263 | <filename>python/Latin Numerals To English.py
# A program to turn Latin numerals into English Characters
data = "VI"
total = 0
def string_flip(flipData):
dataLength = len(flipData)
dataFlip = flipData[dataLength::-1]
flipData = dataFlip
return flipData
data = string_flip(data)
data = list(data)
for i in range(10):
data.append(0)
print(data)
i = 0
if data[i] == "I":
if data[i+1] == "I":
if data[i+2] == "I":
total += 3
else:
total += 2
else:
total += 1
else:
total += 0
i = total
if data[i] == "V":
if data[i+1] == "I":
total += 4
i += 2
else:
total += 5
i += 1
else:
pass
# LOOP INSIDE 99!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
print(i)
print(total)
| <filename>python/Latin Numerals To English.py
# A program to turn Latin numerals into English Characters
data = "VI"
total = 0
def string_flip(flipData):
dataLength = len(flipData)
dataFlip = flipData[dataLength::-1]
flipData = dataFlip
return flipData
data = string_flip(data)
data = list(data)
for i in range(10):
data.append(0)
print(data)
i = 0
if data[i] == "I":
if data[i+1] == "I":
if data[i+2] == "I":
total += 3
else:
total += 2
else:
total += 1
else:
total += 0
i = total
if data[i] == "V":
if data[i+1] == "I":
total += 4
i += 2
else:
total += 5
i += 1
else:
pass
# LOOP INSIDE 99!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
print(i)
print(total)
| en | 0.688635 | # A program to turn Latin numerals into English Characters # LOOP INSIDE 99!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! | 3.574398 | 4 |
newssimilarity/segment_sim/tf_idf.py | imackerracher/NewsSimilarity | 0 | 6621264 | from newssimilarity.segment_sim.segment_similarity_measurement import SegmentSimMeasurement
from nltk.corpus import stopwords
from scipy import spatial
import math
import nltk
class TfIdf(SegmentSimMeasurement):
def __init__(self, token_dict, segment_list, source_segment, target_segment):
"""
:param token_dict: all the tokens in the corpus as a dictionary with the frequencies
:param segment_list: list of dictionaries containing all segments
:param source_segment: The 2 segments that are being compared
:param target_segment:
"""
self.token_dict = token_dict
self.token_list = [w for w in token_dict]
self.segment_list = segment_list
self.source_segment = source_segment
self.target_segment = target_segment
self.stop = set(stopwords.words('english'))
def segment_token_dict(self, tokens):
"""
Calculate the frequencies of all tokens apart from stop words
:param tokens: All the tokens from an segment
:return: Dictionary with the tokens and their frequencies
"""
token_dict = {}
for token in tokens:
if token not in self.stop:
if token in token_dict:
token_dict[token] += 1
else:
token_dict[token] = 1
return token_dict
def tf(self, token, segment_token_dict, length_segment):
"""
Term frequency
:param token: Token, that gets counted
:param segment_dict: dictionary with all the tokens and their frequencies for the segment
:return:
"""
return segment_token_dict[token] / length_segment
def containing(self, token):
"""
Number of segments that contain the token
:param token:
:return:
"""
return sum([1 for dic in self.segment_list if token in dic])
def idf(self, token):
"""
Inverse document frequency
:param token: Token that gets input, to calculate idf score
:return: Idf score for token
"""
# the number of segments in the corpus
number_segments = len(self.segment_list)
return math.log(number_segments) / (1 + self.containing(token))
def tf_idf(self, segment):
"""
Calculate the tf-idf value for the segment
:param segment: segment that gets input
:return: vector score for all the tokens
"""
segment_tokens = [token.lower() for token in nltk.word_tokenize(segment.text) if token.lower() not in self.stop]
segment_length = len(segment_tokens)
segment_token_dict = self.segment_token_dict(segment_tokens)
vector = []
for token in self.token_list:
if token.lower() in segment_tokens:
tf = self.tf(token.lower(), segment_token_dict, segment_length)
idf = self.idf(token)
vector.append(tf*idf)
else:
vector.append(0)
return vector
def calculate_similarity(self, cosine=True):
"""
Calculate the tf-idf score between source an target segment
:return:
"""
source_vector = self.tf_idf(self.source_segment)
target_vector = self.tf_idf(self.target_segment)
result = 1 - spatial.distance.cosine(source_vector, target_vector)
return result
| from newssimilarity.segment_sim.segment_similarity_measurement import SegmentSimMeasurement
from nltk.corpus import stopwords
from scipy import spatial
import math
import nltk
class TfIdf(SegmentSimMeasurement):
def __init__(self, token_dict, segment_list, source_segment, target_segment):
"""
:param token_dict: all the tokens in the corpus as a dictionary with the frequencies
:param segment_list: list of dictionaries containing all segments
:param source_segment: The 2 segments that are being compared
:param target_segment:
"""
self.token_dict = token_dict
self.token_list = [w for w in token_dict]
self.segment_list = segment_list
self.source_segment = source_segment
self.target_segment = target_segment
self.stop = set(stopwords.words('english'))
def segment_token_dict(self, tokens):
"""
Calculate the frequencies of all tokens apart from stop words
:param tokens: All the tokens from an segment
:return: Dictionary with the tokens and their frequencies
"""
token_dict = {}
for token in tokens:
if token not in self.stop:
if token in token_dict:
token_dict[token] += 1
else:
token_dict[token] = 1
return token_dict
def tf(self, token, segment_token_dict, length_segment):
"""
Term frequency
:param token: Token, that gets counted
:param segment_dict: dictionary with all the tokens and their frequencies for the segment
:return:
"""
return segment_token_dict[token] / length_segment
def containing(self, token):
"""
Number of segments that contain the token
:param token:
:return:
"""
return sum([1 for dic in self.segment_list if token in dic])
def idf(self, token):
"""
Inverse document frequency
:param token: Token that gets input, to calculate idf score
:return: Idf score for token
"""
# the number of segments in the corpus
number_segments = len(self.segment_list)
return math.log(number_segments) / (1 + self.containing(token))
def tf_idf(self, segment):
"""
Calculate the tf-idf value for the segment
:param segment: segment that gets input
:return: vector score for all the tokens
"""
segment_tokens = [token.lower() for token in nltk.word_tokenize(segment.text) if token.lower() not in self.stop]
segment_length = len(segment_tokens)
segment_token_dict = self.segment_token_dict(segment_tokens)
vector = []
for token in self.token_list:
if token.lower() in segment_tokens:
tf = self.tf(token.lower(), segment_token_dict, segment_length)
idf = self.idf(token)
vector.append(tf*idf)
else:
vector.append(0)
return vector
def calculate_similarity(self, cosine=True):
"""
Calculate the tf-idf score between source an target segment
:return:
"""
source_vector = self.tf_idf(self.source_segment)
target_vector = self.tf_idf(self.target_segment)
result = 1 - spatial.distance.cosine(source_vector, target_vector)
return result
| en | 0.789175 | :param token_dict: all the tokens in the corpus as a dictionary with the frequencies :param segment_list: list of dictionaries containing all segments :param source_segment: The 2 segments that are being compared :param target_segment: Calculate the frequencies of all tokens apart from stop words :param tokens: All the tokens from an segment :return: Dictionary with the tokens and their frequencies Term frequency :param token: Token, that gets counted :param segment_dict: dictionary with all the tokens and their frequencies for the segment :return: Number of segments that contain the token :param token: :return: Inverse document frequency :param token: Token that gets input, to calculate idf score :return: Idf score for token # the number of segments in the corpus Calculate the tf-idf value for the segment :param segment: segment that gets input :return: vector score for all the tokens Calculate the tf-idf score between source an target segment :return: | 3.200418 | 3 |
doAllGraphs.py | zerosquadron/grove-weather-pi | 1 | 6621265 | <gh_stars>1-10
#
# calculate all graphs
#
# SwitchDoc Labs March 30, 2015
import sys
sys.path.append('/home/pi/SDL_Pi_GroveWeatherPi/graphs')
# Check for user imports
try:
import conflocal as config
except ImportError:
import config
import TemperatureHumidityGraph
import PowerCurrentGraph
import PowerVoltageGraph
import BarometerLightningGraph
def doAllGraphs():
if (config.enable_MySQL_Logging == True):
BarometerLightningGraph.BarometerLightningGraph('test', 10, 0)
TemperatureHumidityGraph.TemperatureHumidityGraph('test', 10, 0)
PowerCurrentGraph.PowerCurrentGraph('test', 10, 0)
PowerVoltageGraph.PowerVoltageGraph('test', 10, 0)
| #
# calculate all graphs
#
# SwitchDoc Labs March 30, 2015
import sys
sys.path.append('/home/pi/SDL_Pi_GroveWeatherPi/graphs')
# Check for user imports
try:
import conflocal as config
except ImportError:
import config
import TemperatureHumidityGraph
import PowerCurrentGraph
import PowerVoltageGraph
import BarometerLightningGraph
def doAllGraphs():
if (config.enable_MySQL_Logging == True):
BarometerLightningGraph.BarometerLightningGraph('test', 10, 0)
TemperatureHumidityGraph.TemperatureHumidityGraph('test', 10, 0)
PowerCurrentGraph.PowerCurrentGraph('test', 10, 0)
PowerVoltageGraph.PowerVoltageGraph('test', 10, 0) | en | 0.818805 | # # calculate all graphs # # SwitchDoc Labs March 30, 2015 # Check for user imports | 1.901204 | 2 |
qcloudsdkcvm/DeleteKeyPairRequest.py | f3n9/qcloudcli | 0 | 6621266 | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class DeleteKeyPairRequest(Request):
def __init__(self):
super(DeleteKeyPairRequest, self).__init__(
'cvm', 'qcloudcliV1', 'DeleteKeyPair', 'cvm.api.qcloud.com')
def get_keyIds(self):
return self.get_params().get('keyIds')
def set_keyIds(self, keyIds):
self.add_param('keyIds', keyIds)
| # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class DeleteKeyPairRequest(Request):
def __init__(self):
super(DeleteKeyPairRequest, self).__init__(
'cvm', 'qcloudcliV1', 'DeleteKeyPair', 'cvm.api.qcloud.com')
def get_keyIds(self):
return self.get_params().get('keyIds')
def set_keyIds(self, keyIds):
self.add_param('keyIds', keyIds)
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.985665 | 2 |
photos/views.py | careymwarabu/Gallery | 0 | 6621267 | <reponame>careymwarabu/Gallery<filename>photos/views.py
from django.shortcuts import render
from django.http import HttpResponse
from .models import Image, ImageCategory, ImageLocation
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def index(request):
images = Image.objects.all()
categories = ImageCategory.objects.all()
locations = ImageLocation.objects.all()
return render(request, 'index.html', {"images": images, "categories": categories, "locations": locations})
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
print(search_term)
try:
categories = ImageCategory.objects.get(name=search_term)
searched_images = Image.search_image(categories)
print(searched_images)
return render(request, 'search.html', {'images': searched_images})
except ObjectDoesNotExist:
message = "No images found"
categories = ImageCategory.objects.all()
return render(request, "search.html", {"message": message, "categories": categories})
else:
message = "You haven't searched for any term"
return render(request, 'search.html', {'message': message})
def view_image(request, image_id):
try:
image = Image.objects.get(id=image_id)
return render(request, 'image.html', {'image': image})
except ObjectDoesNotExist:
message = 'Sorry, we could not find what you are looking for'
return render(request, 'image.html', {'message': message})
def get_category(request, category_id):
category = ImageCategory.objects.get(id=category_id)
image = Image.search_image(category)
return render(request, 'search.html', {'images': image})
def get_location(request,location_id):
location = ImageLocation.objects.get(id=location_id)
image = Image.search_by_location(location)
return render(request, 'search.html', {'images': image})
| from django.shortcuts import render
from django.http import HttpResponse
from .models import Image, ImageCategory, ImageLocation
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def index(request):
images = Image.objects.all()
categories = ImageCategory.objects.all()
locations = ImageLocation.objects.all()
return render(request, 'index.html', {"images": images, "categories": categories, "locations": locations})
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
print(search_term)
try:
categories = ImageCategory.objects.get(name=search_term)
searched_images = Image.search_image(categories)
print(searched_images)
return render(request, 'search.html', {'images': searched_images})
except ObjectDoesNotExist:
message = "No images found"
categories = ImageCategory.objects.all()
return render(request, "search.html", {"message": message, "categories": categories})
else:
message = "You haven't searched for any term"
return render(request, 'search.html', {'message': message})
def view_image(request, image_id):
try:
image = Image.objects.get(id=image_id)
return render(request, 'image.html', {'image': image})
except ObjectDoesNotExist:
message = 'Sorry, we could not find what you are looking for'
return render(request, 'image.html', {'message': message})
def get_category(request, category_id):
category = ImageCategory.objects.get(id=category_id)
image = Image.search_image(category)
return render(request, 'search.html', {'images': image})
def get_location(request,location_id):
location = ImageLocation.objects.get(id=location_id)
image = Image.search_by_location(location)
return render(request, 'search.html', {'images': image}) | en | 0.968116 | # Create your views here. | 2.370521 | 2 |
identipy/peptide_centric.py | comcon1/identipy | 0 | 6621268 | <reponame>comcon1/identipy
import numpy as np
from string import punctuation
from collections import defaultdict
import operator as op
from bisect import bisect
from pyteomics import parser, mass, fasta, auxiliary as aux, mgf, mzml
from . import scoring, utils
import logging
logger = logging.getLogger(__name__)
try:
from pyteomics import cmass
except ImportError:
# logger.warning('cmass could not be imported')
cmass = mass
try:
# import pyximport; pyximport.install()
from .cutils import theor_spectrum
except:
logger.info('Cython modules were not loaded...')
from .utils import theor_spectrum
from .utils import reshape_theor_spectrum
spectra = {}
titles = {}
best_res = {}
nmasses = {}
t2s = {}
charges = {}
def prepare_peptide_processor(fname, settings):
global spectra
global nmasses
global titles
global t2s
global charges
global best_res
best_res = {}
maxcharges = {}
fcharge = settings.getint('scoring', 'maximum fragment charge')
ch_range = range(settings.getint('search', 'minimum charge'),
1 + settings.getint('search', 'maximum charge'))
for c in ch_range:
maxcharges[c] = max(1, min(fcharge, c-1) if fcharge else c-1)
params = {}
params['maxpeaks'] = settings.getint('scoring', 'maximum peaks')
params['minpeaks'] = settings.getint('scoring', 'minimum peaks')
params['dynrange'] = settings.getfloat('scoring', 'dynamic range')
params['acc'] = settings.getfloat('search', 'product accuracy')
params['min_mz'] = settings.getfloat('search', 'product minimum m/z')
params.update(utils._charge_params(settings))
params['dacc'] = settings.getfloat('input', 'deisotoping mass tolerance')
params['deisotope'] = settings.getboolean('input', 'deisotope')
params['tags'] = utils.get_tags(settings.get('output', 'tags'))
if not spectra:
logger.info('Reading spectra ...')
for spec in utils.iterate_spectra(fname):
ps = utils.preprocess_spectrum(spec, params)
if ps is not None:
ttl = utils.get_title(ps)
t2s[ttl] = ps
for m, c in utils.neutral_masses(ps, params):
effc = maxcharges[c]
nmasses.setdefault(effc, []).append(m)
spectra.setdefault(effc, []).append(ps)
titles.setdefault(effc, []).append(ttl)
charges.setdefault(effc, []).append(c)
ps.setdefault('nm', {})[c] = m
logger.info('%s spectra pass quality criteria.', sum(map(len, spectra.itervalues())))
for c in list(spectra):
i = np.argsort(nmasses[c])
nmasses[c] = np.array(nmasses[c])[i]
spectra[c] = np.array(spectra[c])[i]
titles[c] = np.array(titles[c])[i]
charges[c] = np.array(charges[c])[i]
else:
logger.info('Reusing %s spectra from previous run.', sum(map(len, spectra.itervalues())))
utils.set_mod_dict(settings)
mods = settings.get('modifications', 'variable')
maxmods = settings.getint('modifications', 'maximum variable mods')
leg = settings.get('misc', 'legend')
punct = set(punctuation)
nmods = [(p, mod[1], mod[2]) for p, mod in leg.iteritems() if p in punct]
aa_mass = utils.get_aa_mass(settings)
score = utils.import_(settings.get('scoring', 'score'))
try:
score_fast_name = settings.get('scoring', 'score') + '_fast'
if score_fast_name == 'identipy.scoring.RNHS_fast':
try:
from cutils import RNHS_fast as score_fast
except:
score_fast = utils.import_(settings.get('scoring', 'score') + '_fast')
else:
score_fast = utils.import_(settings.get('scoring', 'score') + '_fast')
except Exception as e:
score_fast = False
logging.debug('No fast score imported: %s', e)
acc_l = settings.getfloat('search', 'precursor accuracy left')
acc_r = settings.getfloat('search', 'precursor accuracy right')
acc_frag = settings.getfloat('search', 'product accuracy')
frag_unit = settings.get('search', 'product accuracy unit')
if frag_unit == 'ppm':
acc_frag_ppm = settings.getfloat('search', 'product accuracy ppm')
else:
acc_frag_ppm = False
try:
fast_first_stage = settings.getint('misc', 'fast first stage')
except:
fast_first_stage = 0
unit = settings.get('search', 'precursor accuracy unit')
rel = utils.relative(unit)
if settings.has_option('scoring', 'condition'):
cond = settings.get('scoring', 'condition')
else:
cond = None
if isinstance(cond, str) and cond.strip():
cond = utils.import_(cond)
score = utils.import_(settings.get('scoring', 'score'))
return {'rel': rel, 'aa_mass': aa_mass, 'acc_l': acc_l, 'acc_r': acc_r, 'acc_frag': acc_frag, 'acc_frag_ppm': acc_frag_ppm,
'unit': unit, 'nmods': nmods, 'maxmods': maxmods, 'fast first stage': fast_first_stage,
'sapime': utils.get_shifts_and_pime(settings),
'cond': cond, 'score': score, 'score_fast': score_fast,
'settings': settings}
def peptide_processor_iter_isoforms(peptide, **kwargs):
nmods, maxmods = op.itemgetter('nmods', 'maxmods')(kwargs)
if nmods and maxmods:
out = []
for form in utils.custom_isoforms(peptide, variable_mods=nmods, maxmods=maxmods, snp=kwargs['snp']):
res = peptide_processor(form, **kwargs)
if res:
out.append(res)
if out:
return out
else:
res = peptide_processor(peptide, **kwargs)
if res:
return [res, ]
def peptide_processor(peptide, **kwargs):
if kwargs['snp']:
if 'snp' not in peptide:
seqm = peptide
aachange_pos = False
snp_label = 'wild'
else:
tmp = peptide.split('snp')
seqm = tmp[0] + tmp[1].split('at')[0].split('to')[-1] + tmp[2]
aachange_pos = len(tmp[0]) + 1
snp_label = tmp[1]
else:
seqm = peptide
aachange_pos = False
snp_label = False
nterm_mass = kwargs.get('nterm_mass')
cterm_mass = kwargs.get('cterm_mass')
m = utils.custom_mass(seqm, aa_mass=kwargs['aa_mass'], nterm_mass = nterm_mass, cterm_mass = cterm_mass)
# m = cmass.fast_mass(seqm, aa_mass=kwargs['aa_mass']) + (nterm_mass - 1.007825) + (cterm_mass - 17.002735)
rel = kwargs['rel']
acc_l = kwargs['acc_l']
acc_r = kwargs['acc_r']
settings = kwargs['settings']
shifts_and_pime = kwargs['sapime']
theor = {}
theoretical_set = {}
cand_idx = {}
stored_value = False
if rel:
dm_l = acc_l * m / 1.0e6
dm_r = acc_r * m / 1.0e6
for c in spectra:
if not rel:
dm_l = acc_l * c
dm_r = acc_r * c
idx = set()
for shift in shifts_and_pime:
start = nmasses[c].searchsorted(m + shift - dm_l)
end = nmasses[c].searchsorted(m + shift + dm_r)
if end - start:
idx.update(range(start, end))
if kwargs['cond']:
idx2 = set()
for i in idx:
cond_val, stored_value = kwargs['cond'](spectra[c][i], seqm, settings, stored_value)
if cond_val:
idx2.add(i)
idx = idx2
if idx:
cand_idx[c] = idx
theor[c], theoretical_set[c] = theor_spectrum(seqm, maxcharge=c, aa_mass=kwargs['aa_mass'], reshape=False,
acc_frag=kwargs['acc_frag'], nterm_mass = nterm_mass,
cterm_mass = cterm_mass, nm=m)
reshaped = False
results = []
for fc, ind in cand_idx.iteritems():
reshaped = False
for i in ind:
s = spectra[fc][i]
# st = utils.get_title(s)
st = titles[fc][i]
if kwargs['score_fast']:
hf = kwargs['score_fast'](s['fastset'], s['idict'], theoretical_set[fc], kwargs['min_matched'])
if hf[0]:
if -hf[1] <= best_res.get(st, 0):
if kwargs['fast first stage']:
sc = hf[1]
score = {'match': [], 'sumI': 1, 'dist': [], 'total_matched': 999}
else:
if not reshaped:
theor[fc] = reshape_theor_spectrum(theor[fc])
reshaped = True
score = kwargs['score'](s, theor[fc], kwargs['acc_frag'], kwargs['acc_frag_ppm'], position=aachange_pos)#settings.getfloat('search', 'product accuracy ppm')) # FIXME (?)
sc = score.pop('score')
if -sc <= best_res.get(st, 0) and score.pop('total_matched') >= kwargs['min_matched']:
results.append((sc, st, score, m, charges[fc][i], snp_label))
else:
# st = utils.get_title(s)
if not reshaped:
theor[fc] = reshape_theor_spectrum(theor[fc])
reshaped = True
score = kwargs['score'](s, theor[fc], kwargs['acc_frag'], kwargs['acc_frag_ppm'], position=aachange_pos)#settings.getfloat('search', 'product accuracy ppm')) # FIXME (?)
sc = score.pop('score')
if -sc <= best_res.get(st, 0) and score.pop('total_matched') >= kwargs['min_matched']:
results.append((sc, st, score, m, charges[fc][i], snp_label))
# results.sort(reverse=True, key=op.itemgetter(0))
# results = np.array(results, dtype=[('score', np.float32), ('title', np.str_, 30), ('spectrum', np.object_), ('info', np.object_)])
if results:
return seqm, results
# return seqm, []
def process_peptides(fname, settings):
spec_results = defaultdict(dict)
peps = utils.peptide_gen(settings)
kwargs = prepare_peptide_processor(fname, settings)
func = peptide_processor_iter_isoforms
kwargs['min_matched'] = settings.getint('output', 'minimum matched')
kwargs['snp'] = settings.getint('search', 'snp')
kwargs['nterm_mass'] = settings.getfloat('modifications', 'protein nterm cleavage')
kwargs['cterm_mass'] = settings.getfloat('modifications', 'protein cterm cleavage')
kwargs['qsize'] = settings.getint('performance', 'out queue size')
logger.info('Running the search ...')
n = settings.getint('performance', 'processes')
leg = {}
if settings.has_option('misc', 'legend'):
leg = settings.get('misc', 'legend')
for y in utils.multimap(n, func, peps, **kwargs):
for x in y:
if x[1] is not None:
peptide, result = x
for score, spec_t, info, m, c, snp_label in result:
spec_results[spec_t]['spectrum'] = t2s[spec_t]
top_scores = spec_results[spec_t].setdefault('top_scores', 0)
if -score <= top_scores:
best_res[spec_t] = -score
info['pep_nm'] = m
info['charge'] = c
spec_results[spec_t]['top_scores'] = -score
spec_results[spec_t]['sequences'] = peptide
spec_results[spec_t]['info'] = info
spec_results[spec_t]['snp_label'] = snp_label
# spec_results[spec_t].setdefault('scores', []).append(score) FIXME write histogram
#
# top_seqs = spec_results[spec_t].setdefault('sequences', '')
# top_info = spec_results[spec_t].setdefault('info', [])
#
# i = bisect(top_scores, -score)
# if nc is None or i < nc:
# top_scores.insert(i, -score)
# top_seqs.insert(i, peptide)
# top_info.insert(i, info)
# if nc is not None and len(top_scores) > nc:
# top_scores.pop()
# top_seqs.pop()
# top_info.pop()
maxlen = settings.getint('search', 'peptide maximum length')
dtype = np.dtype([('score', np.float64),
('seq', np.str_, maxlen), ('note', np.str_, 1),
('charge', np.int8), ('info', np.object_), ('sumI', np.float64), ('fragmentMT', np.float64), ('snp_label', np.str_, 15)])
for spec_name, val in spec_results.iteritems():
s = val['spectrum']
c = []
evalues = []
score = val['top_scores']
# for idx, score in enumerate(val['top_scores']):
mseq = val['sequences']#[idx]
seq = mseq
info = val['info']#[idx]
for x in set(mseq).intersection(punctuation):
repl = leg[x][1]
if repl == '-':
repl = ''
seq = seq.replace(x, repl)
pnm = info['pep_nm']
c.append((-score, mseq, 't' if seq in utils.seen_target else 'd',
info['charge'], info, info.pop('sumI'), np.median(info.pop('dist')), val['snp_label']))
c[-1][4]['mzdiff'] = {'Da': s['nm'][info['charge']] - pnm}
c[-1][4]['mzdiff']['ppm'] = 1e6 * c[-1][4]['mzdiff']['Da'] / pnm
evalues.append(-1./score if -score else 1e6)
c = np.array(c, dtype=dtype)
yield {'spectrum': s, 'candidates': c, 'e-values': evalues}
| import numpy as np
from string import punctuation
from collections import defaultdict
import operator as op
from bisect import bisect
from pyteomics import parser, mass, fasta, auxiliary as aux, mgf, mzml
from . import scoring, utils
import logging
logger = logging.getLogger(__name__)
try:
from pyteomics import cmass
except ImportError:
# logger.warning('cmass could not be imported')
cmass = mass
try:
# import pyximport; pyximport.install()
from .cutils import theor_spectrum
except:
logger.info('Cython modules were not loaded...')
from .utils import theor_spectrum
from .utils import reshape_theor_spectrum
spectra = {}
titles = {}
best_res = {}
nmasses = {}
t2s = {}
charges = {}
def prepare_peptide_processor(fname, settings):
global spectra
global nmasses
global titles
global t2s
global charges
global best_res
best_res = {}
maxcharges = {}
fcharge = settings.getint('scoring', 'maximum fragment charge')
ch_range = range(settings.getint('search', 'minimum charge'),
1 + settings.getint('search', 'maximum charge'))
for c in ch_range:
maxcharges[c] = max(1, min(fcharge, c-1) if fcharge else c-1)
params = {}
params['maxpeaks'] = settings.getint('scoring', 'maximum peaks')
params['minpeaks'] = settings.getint('scoring', 'minimum peaks')
params['dynrange'] = settings.getfloat('scoring', 'dynamic range')
params['acc'] = settings.getfloat('search', 'product accuracy')
params['min_mz'] = settings.getfloat('search', 'product minimum m/z')
params.update(utils._charge_params(settings))
params['dacc'] = settings.getfloat('input', 'deisotoping mass tolerance')
params['deisotope'] = settings.getboolean('input', 'deisotope')
params['tags'] = utils.get_tags(settings.get('output', 'tags'))
if not spectra:
logger.info('Reading spectra ...')
for spec in utils.iterate_spectra(fname):
ps = utils.preprocess_spectrum(spec, params)
if ps is not None:
ttl = utils.get_title(ps)
t2s[ttl] = ps
for m, c in utils.neutral_masses(ps, params):
effc = maxcharges[c]
nmasses.setdefault(effc, []).append(m)
spectra.setdefault(effc, []).append(ps)
titles.setdefault(effc, []).append(ttl)
charges.setdefault(effc, []).append(c)
ps.setdefault('nm', {})[c] = m
logger.info('%s spectra pass quality criteria.', sum(map(len, spectra.itervalues())))
for c in list(spectra):
i = np.argsort(nmasses[c])
nmasses[c] = np.array(nmasses[c])[i]
spectra[c] = np.array(spectra[c])[i]
titles[c] = np.array(titles[c])[i]
charges[c] = np.array(charges[c])[i]
else:
logger.info('Reusing %s spectra from previous run.', sum(map(len, spectra.itervalues())))
utils.set_mod_dict(settings)
mods = settings.get('modifications', 'variable')
maxmods = settings.getint('modifications', 'maximum variable mods')
leg = settings.get('misc', 'legend')
punct = set(punctuation)
nmods = [(p, mod[1], mod[2]) for p, mod in leg.iteritems() if p in punct]
aa_mass = utils.get_aa_mass(settings)
score = utils.import_(settings.get('scoring', 'score'))
try:
score_fast_name = settings.get('scoring', 'score') + '_fast'
if score_fast_name == 'identipy.scoring.RNHS_fast':
try:
from cutils import RNHS_fast as score_fast
except:
score_fast = utils.import_(settings.get('scoring', 'score') + '_fast')
else:
score_fast = utils.import_(settings.get('scoring', 'score') + '_fast')
except Exception as e:
score_fast = False
logging.debug('No fast score imported: %s', e)
acc_l = settings.getfloat('search', 'precursor accuracy left')
acc_r = settings.getfloat('search', 'precursor accuracy right')
acc_frag = settings.getfloat('search', 'product accuracy')
frag_unit = settings.get('search', 'product accuracy unit')
if frag_unit == 'ppm':
acc_frag_ppm = settings.getfloat('search', 'product accuracy ppm')
else:
acc_frag_ppm = False
try:
fast_first_stage = settings.getint('misc', 'fast first stage')
except:
fast_first_stage = 0
unit = settings.get('search', 'precursor accuracy unit')
rel = utils.relative(unit)
if settings.has_option('scoring', 'condition'):
cond = settings.get('scoring', 'condition')
else:
cond = None
if isinstance(cond, str) and cond.strip():
cond = utils.import_(cond)
score = utils.import_(settings.get('scoring', 'score'))
return {'rel': rel, 'aa_mass': aa_mass, 'acc_l': acc_l, 'acc_r': acc_r, 'acc_frag': acc_frag, 'acc_frag_ppm': acc_frag_ppm,
'unit': unit, 'nmods': nmods, 'maxmods': maxmods, 'fast first stage': fast_first_stage,
'sapime': utils.get_shifts_and_pime(settings),
'cond': cond, 'score': score, 'score_fast': score_fast,
'settings': settings}
def peptide_processor_iter_isoforms(peptide, **kwargs):
nmods, maxmods = op.itemgetter('nmods', 'maxmods')(kwargs)
if nmods and maxmods:
out = []
for form in utils.custom_isoforms(peptide, variable_mods=nmods, maxmods=maxmods, snp=kwargs['snp']):
res = peptide_processor(form, **kwargs)
if res:
out.append(res)
if out:
return out
else:
res = peptide_processor(peptide, **kwargs)
if res:
return [res, ]
def peptide_processor(peptide, **kwargs):
if kwargs['snp']:
if 'snp' not in peptide:
seqm = peptide
aachange_pos = False
snp_label = 'wild'
else:
tmp = peptide.split('snp')
seqm = tmp[0] + tmp[1].split('at')[0].split('to')[-1] + tmp[2]
aachange_pos = len(tmp[0]) + 1
snp_label = tmp[1]
else:
seqm = peptide
aachange_pos = False
snp_label = False
nterm_mass = kwargs.get('nterm_mass')
cterm_mass = kwargs.get('cterm_mass')
m = utils.custom_mass(seqm, aa_mass=kwargs['aa_mass'], nterm_mass = nterm_mass, cterm_mass = cterm_mass)
# m = cmass.fast_mass(seqm, aa_mass=kwargs['aa_mass']) + (nterm_mass - 1.007825) + (cterm_mass - 17.002735)
rel = kwargs['rel']
acc_l = kwargs['acc_l']
acc_r = kwargs['acc_r']
settings = kwargs['settings']
shifts_and_pime = kwargs['sapime']
theor = {}
theoretical_set = {}
cand_idx = {}
stored_value = False
if rel:
dm_l = acc_l * m / 1.0e6
dm_r = acc_r * m / 1.0e6
for c in spectra:
if not rel:
dm_l = acc_l * c
dm_r = acc_r * c
idx = set()
for shift in shifts_and_pime:
start = nmasses[c].searchsorted(m + shift - dm_l)
end = nmasses[c].searchsorted(m + shift + dm_r)
if end - start:
idx.update(range(start, end))
if kwargs['cond']:
idx2 = set()
for i in idx:
cond_val, stored_value = kwargs['cond'](spectra[c][i], seqm, settings, stored_value)
if cond_val:
idx2.add(i)
idx = idx2
if idx:
cand_idx[c] = idx
theor[c], theoretical_set[c] = theor_spectrum(seqm, maxcharge=c, aa_mass=kwargs['aa_mass'], reshape=False,
acc_frag=kwargs['acc_frag'], nterm_mass = nterm_mass,
cterm_mass = cterm_mass, nm=m)
reshaped = False
results = []
for fc, ind in cand_idx.iteritems():
reshaped = False
for i in ind:
s = spectra[fc][i]
# st = utils.get_title(s)
st = titles[fc][i]
if kwargs['score_fast']:
hf = kwargs['score_fast'](s['fastset'], s['idict'], theoretical_set[fc], kwargs['min_matched'])
if hf[0]:
if -hf[1] <= best_res.get(st, 0):
if kwargs['fast first stage']:
sc = hf[1]
score = {'match': [], 'sumI': 1, 'dist': [], 'total_matched': 999}
else:
if not reshaped:
theor[fc] = reshape_theor_spectrum(theor[fc])
reshaped = True
score = kwargs['score'](s, theor[fc], kwargs['acc_frag'], kwargs['acc_frag_ppm'], position=aachange_pos)#settings.getfloat('search', 'product accuracy ppm')) # FIXME (?)
sc = score.pop('score')
if -sc <= best_res.get(st, 0) and score.pop('total_matched') >= kwargs['min_matched']:
results.append((sc, st, score, m, charges[fc][i], snp_label))
else:
# st = utils.get_title(s)
if not reshaped:
theor[fc] = reshape_theor_spectrum(theor[fc])
reshaped = True
score = kwargs['score'](s, theor[fc], kwargs['acc_frag'], kwargs['acc_frag_ppm'], position=aachange_pos)#settings.getfloat('search', 'product accuracy ppm')) # FIXME (?)
sc = score.pop('score')
if -sc <= best_res.get(st, 0) and score.pop('total_matched') >= kwargs['min_matched']:
results.append((sc, st, score, m, charges[fc][i], snp_label))
# results.sort(reverse=True, key=op.itemgetter(0))
# results = np.array(results, dtype=[('score', np.float32), ('title', np.str_, 30), ('spectrum', np.object_), ('info', np.object_)])
if results:
return seqm, results
# return seqm, []
def process_peptides(fname, settings):
spec_results = defaultdict(dict)
peps = utils.peptide_gen(settings)
kwargs = prepare_peptide_processor(fname, settings)
func = peptide_processor_iter_isoforms
kwargs['min_matched'] = settings.getint('output', 'minimum matched')
kwargs['snp'] = settings.getint('search', 'snp')
kwargs['nterm_mass'] = settings.getfloat('modifications', 'protein nterm cleavage')
kwargs['cterm_mass'] = settings.getfloat('modifications', 'protein cterm cleavage')
kwargs['qsize'] = settings.getint('performance', 'out queue size')
logger.info('Running the search ...')
n = settings.getint('performance', 'processes')
leg = {}
if settings.has_option('misc', 'legend'):
leg = settings.get('misc', 'legend')
for y in utils.multimap(n, func, peps, **kwargs):
for x in y:
if x[1] is not None:
peptide, result = x
for score, spec_t, info, m, c, snp_label in result:
spec_results[spec_t]['spectrum'] = t2s[spec_t]
top_scores = spec_results[spec_t].setdefault('top_scores', 0)
if -score <= top_scores:
best_res[spec_t] = -score
info['pep_nm'] = m
info['charge'] = c
spec_results[spec_t]['top_scores'] = -score
spec_results[spec_t]['sequences'] = peptide
spec_results[spec_t]['info'] = info
spec_results[spec_t]['snp_label'] = snp_label
# spec_results[spec_t].setdefault('scores', []).append(score) FIXME write histogram
#
# top_seqs = spec_results[spec_t].setdefault('sequences', '')
# top_info = spec_results[spec_t].setdefault('info', [])
#
# i = bisect(top_scores, -score)
# if nc is None or i < nc:
# top_scores.insert(i, -score)
# top_seqs.insert(i, peptide)
# top_info.insert(i, info)
# if nc is not None and len(top_scores) > nc:
# top_scores.pop()
# top_seqs.pop()
# top_info.pop()
maxlen = settings.getint('search', 'peptide maximum length')
dtype = np.dtype([('score', np.float64),
('seq', np.str_, maxlen), ('note', np.str_, 1),
('charge', np.int8), ('info', np.object_), ('sumI', np.float64), ('fragmentMT', np.float64), ('snp_label', np.str_, 15)])
for spec_name, val in spec_results.iteritems():
s = val['spectrum']
c = []
evalues = []
score = val['top_scores']
# for idx, score in enumerate(val['top_scores']):
mseq = val['sequences']#[idx]
seq = mseq
info = val['info']#[idx]
for x in set(mseq).intersection(punctuation):
repl = leg[x][1]
if repl == '-':
repl = ''
seq = seq.replace(x, repl)
pnm = info['pep_nm']
c.append((-score, mseq, 't' if seq in utils.seen_target else 'd',
info['charge'], info, info.pop('sumI'), np.median(info.pop('dist')), val['snp_label']))
c[-1][4]['mzdiff'] = {'Da': s['nm'][info['charge']] - pnm}
c[-1][4]['mzdiff']['ppm'] = 1e6 * c[-1][4]['mzdiff']['Da'] / pnm
evalues.append(-1./score if -score else 1e6)
c = np.array(c, dtype=dtype)
yield {'spectrum': s, 'candidates': c, 'e-values': evalues} | en | 0.294962 | # logger.warning('cmass could not be imported') # import pyximport; pyximport.install() # m = cmass.fast_mass(seqm, aa_mass=kwargs['aa_mass']) + (nterm_mass - 1.007825) + (cterm_mass - 17.002735) # st = utils.get_title(s) #settings.getfloat('search', 'product accuracy ppm')) # FIXME (?) # st = utils.get_title(s) #settings.getfloat('search', 'product accuracy ppm')) # FIXME (?) # results.sort(reverse=True, key=op.itemgetter(0)) # results = np.array(results, dtype=[('score', np.float32), ('title', np.str_, 30), ('spectrum', np.object_), ('info', np.object_)]) # return seqm, [] # spec_results[spec_t].setdefault('scores', []).append(score) FIXME write histogram # # top_seqs = spec_results[spec_t].setdefault('sequences', '') # top_info = spec_results[spec_t].setdefault('info', []) # # i = bisect(top_scores, -score) # if nc is None or i < nc: # top_scores.insert(i, -score) # top_seqs.insert(i, peptide) # top_info.insert(i, info) # if nc is not None and len(top_scores) > nc: # top_scores.pop() # top_seqs.pop() # top_info.pop() # for idx, score in enumerate(val['top_scores']): #[idx] #[idx] | 2.028716 | 2 |
workers/custodian.py | UphillD/edgebench | 3 | 6621269 | <reponame>UphillD/edgebench<gh_stars>1-10
# Edgebench Platform
# Worker Scripts
# Custodian Module
#
# Starts, monitors and maintains a combination of applications
#
# Data Structures:
# Pandas DataFrames:
# task_matrix: [ 'z', 'w', 'D', 'Start Timestamp', 'Predicted Duration' ]
# Types: [ int, int, int, flt, flt ]
# Stores information for tasks currently running
#
# state_matrix: [ 'ID', 'App', 'State', 'z' ]
# Types: [ int, str, str, int ]
# Stores information for the state of machine instances
#
# Queue File: z,w,D.que
# Information: Task ID, Task Type, Task Deadline
import pandas as pd
import pickle as pkl
from glob import glob
from os import chdir, path, remove, system
from paho.mqtt import publish
from shutil import copy
from subprocess import Popen, DEVNULL
from sys import argv
from tabulate import tabulate
from time import sleep, time
from config import *
from shared import *
# Print help message
if len(argv) != 1:
print('Please provide the proper arguments!')
print('')
print('Usage: python3 custodian.py <platform> <app combo>')
print(' where <app combo> is the number of instances')
print(' of every app, in the form of a,b,c,d')
print('')
# Initializer
# args: custodian.py <platform> <app combo>
elif len(argv) == 3:
# Grab the platform & set the app profile
platform = argv[1]
# Grab the application a,b,c,d combination, turn it into a list
combo = list(map(int, argv[2].split(',')))
# Initialize the task matrix & store it
remove_matrix(workdir + '/task_matrix.pkl')
task_matrix = pd.DataFrame(columns=['z', 'w', 'D', 'Start Timestamp', 'Predicted Duration'])
task_matrix.set_index('z', inplace=True)
write_matrix(task_matrix, workdir + '/task_matrix.pkl')
# Initialize the state matrix & store it too
state_matrix = pd.DataFrame(columns=['ID', 'App', 'State', 'z'])
state_matrix.set_index('ID', inplace=True)
state_matrix = state_matrix.astype(str)
remove_matrix(workdir + '/state_matrix.pkl')
# Delete any leftover queue files
for f in glob(workdir + '/*.que'):
remove(f)
# Switch to the root edgebench folder to properly launch the docker images
chdir(rootdir)
# Initialize the app counter
k = 1
# Loop through the 4 applications
for i in range(0, 4):
# Loop through the number of instances for each application
for j in range(combo[i]):
# Launch the docker image through a python subprocess
Popen(['./entrypoint.sh', platform, 'listen', apps[i], str(k)], stdout=DEVNULL)
# Add a new entry in the state matrix
state_matrix.loc[k] = [ apps[i], 'idle', 0 ]
k += 1
# Switch back to the working directory
chdir(workdir)
# Store the updated state matrix
write_matrix(state_matrix, workdir + '/state_matrix.pkl')
# Use generator function for progress bar (see shared.py)
stage = 0 # progress bar stage
gen = print_progress_bar()
# inf loop, exit with CTRL+C
while True:
# Print logos, matrices and other information
sleep(0.1)
system('clear')
print_logo('edgebench')
print_logo('custodian', -1, 'PURPLE')
print('')
print('')
print('\t ⚒ TASKS ⚒')
print('')
print(tabulate(task_matrix.drop(['Start Timestamp', 'Predicted Duration'], 1), ['z', 'App'], tablefmt='fancy_grid'))
print('')
print('')
print('\t ⛯ STATES ⛯')
print('')
print(tabulate(state_matrix, ['ID', 'App', 'State', 'z'], tablefmt='fancy_grid'))
print('')
print('')
next(gen)
print('')
#################################
### Check 1 : Completed Tasks ###
#################################
# Look through every machine instance
for index, row in state_matrix.iterrows():
# Check if instance is labeled as running, but the indicator file is gone
if row['State'] == 'running' and not path.isfile(workdir + '/app_' + str(index) + '/exec.tmp'):
# Grab finish timestamp
et = round(time(), 3)
# Get ID of task
z = row['z']
# Update the state matrix
state_matrix.at[index, 'State'] = 'idle'
state_matrix.at[index, 'z'] = 0
write_matrix(state_matrix, workdir + '/state_matrix.pkl')
# Drop the task row from the task matrix
task_matrix = read_matrix(workdir + '/task_matrix.pkl')
st = task_matrix.loc[z, 'Start Timestamp']
pd = task_matrix.loc[z, 'Predicted Duration']
task_matrix.drop(z, axis=0, inplace=True)
current_tasks = len(task_matrix)
tasks_weighted = []
for i in range(4):
tasks_weighted.append(sum_mask_numpy(task_matrix, i))
# Update the rest of the tasks
for index_tm, row_tm in task_matrix.iterrows():
w_tm = row_tm['w']
# Calculate times
done_t = time() - row_tm['Start Timestamp']
total_t = row_tm['Predicted Duration']
remaining_percentage = 1 - ( done_t / total_t )
# Calculate remaining time and total predicted duration
remaining_t = calculate_time(w, current_tasks, tasks_weighted, remaining_percentage)
duration = done_t + remaining_t
task_matrix.at[index_tm, 'Predicted Duration'] = duration
# Store the updated task matrix
write_matrix(task_matrix, workdir + '/task_matrix.pkl')
# ✍ Log: Execution
# (Task ID, Execution Start Timestamp, Execution Finish Timestamp, Predicted Duration)
payload = make_payload(z, st, et, pd)
publish.single('edgebench/log/execution', payload, qos=1, hostname=broker)
###########################
### Check 2 : New Tasks ###
###########################
# Look for queue files in workdir
new_tasks = glob('*.que')
if len(new_tasks) > 0:
# Create new_task list with task information
new_task = new_tasks[0][:-4].split(',')
# Grab task information
z = int(new_task[0])
w = int(new_task[1])
D = int(new_task[2])
# Check state matrix to find available machine instance
for index, row in state_matrix.iterrows():
if row['App'] == apps[w] and row['State'] == 'idle':
task_matrix = read_matrix(workdir + '/task_matrix.pkl')
# Delete queue file
remove(new_tasks[0])
# Create task table with weights
current_tasks = len(task_matrix)
tasks_weighted = []
for i in range(4):
tasks_weighted.append(sum_mask_numpy(task_matrix, i))
tasks_weighted[w] = tasks_weighted[w] + 1
# Update the rest of the tasks
for index_tm, row_tm in task_matrix.iterrows():
w_tm = row_tm['w']
# Calculate times
done_t = time() - row_tm['Start Timestamp']
total_t = row_tm['Predicted Duration']
remaining_per = 1 + ( done_t / total_t )
# Calculate remaining time and total predicted duration
remaining_t = calculate_time(int(w_tm), current_tasks + 1, tasks_weighted, remaining_per)
duration = round(done_t + remaining_t, 2)
task_matrix.at[index_tm, 'Predicted Duration'] = duration
# Calculate predicted duration
duration = calculate_time(w, current_tasks + 1, tasks_weighted)
# Add new row with new task information in task_matrix
st = round(time(), 3)
task_matrix.loc[z] = [ w, D, st, duration ]
write_matrix(task_matrix, workdir + '/task_matrix.pkl')
# Grab task name and appropriate payload
task_name, task_payload = categorize_task(w)
# Update state matrix
state_matrix.at[index, 'State'] = 'running'
state_matrix.at[index, 'z'] = z
write_matrix(state_matrix, workdir + '/state_matrix.pkl')
machine = index
# Start task
copy(payloaddir + '/' + task_name + '/' + task_payload,
workdir + '/app_' + str(machine) + '/' + task_payload)
break
| # Edgebench Platform
# Worker Scripts
# Custodian Module
#
# Starts, monitors and maintains a combination of applications
#
# Data Structures:
# Pandas DataFrames:
# task_matrix: [ 'z', 'w', 'D', 'Start Timestamp', 'Predicted Duration' ]
# Types: [ int, int, int, flt, flt ]
# Stores information for tasks currently running
#
# state_matrix: [ 'ID', 'App', 'State', 'z' ]
# Types: [ int, str, str, int ]
# Stores information for the state of machine instances
#
# Queue File: z,w,D.que
# Information: Task ID, Task Type, Task Deadline
import pandas as pd
import pickle as pkl
from glob import glob
from os import chdir, path, remove, system
from paho.mqtt import publish
from shutil import copy
from subprocess import Popen, DEVNULL
from sys import argv
from tabulate import tabulate
from time import sleep, time
from config import *
from shared import *
# Print help message
if len(argv) != 1:
print('Please provide the proper arguments!')
print('')
print('Usage: python3 custodian.py <platform> <app combo>')
print(' where <app combo> is the number of instances')
print(' of every app, in the form of a,b,c,d')
print('')
# Initializer
# args: custodian.py <platform> <app combo>
elif len(argv) == 3:
# Grab the platform & set the app profile
platform = argv[1]
# Grab the application a,b,c,d combination, turn it into a list
combo = list(map(int, argv[2].split(',')))
# Initialize the task matrix & store it
remove_matrix(workdir + '/task_matrix.pkl')
task_matrix = pd.DataFrame(columns=['z', 'w', 'D', 'Start Timestamp', 'Predicted Duration'])
task_matrix.set_index('z', inplace=True)
write_matrix(task_matrix, workdir + '/task_matrix.pkl')
# Initialize the state matrix & store it too
state_matrix = pd.DataFrame(columns=['ID', 'App', 'State', 'z'])
state_matrix.set_index('ID', inplace=True)
state_matrix = state_matrix.astype(str)
remove_matrix(workdir + '/state_matrix.pkl')
# Delete any leftover queue files
for f in glob(workdir + '/*.que'):
remove(f)
# Switch to the root edgebench folder to properly launch the docker images
chdir(rootdir)
# Initialize the app counter
k = 1
# Loop through the 4 applications
for i in range(0, 4):
# Loop through the number of instances for each application
for j in range(combo[i]):
# Launch the docker image through a python subprocess
Popen(['./entrypoint.sh', platform, 'listen', apps[i], str(k)], stdout=DEVNULL)
# Add a new entry in the state matrix
state_matrix.loc[k] = [ apps[i], 'idle', 0 ]
k += 1
# Switch back to the working directory
chdir(workdir)
# Store the updated state matrix
write_matrix(state_matrix, workdir + '/state_matrix.pkl')
# Use generator function for progress bar (see shared.py)
stage = 0 # progress bar stage
gen = print_progress_bar()
# inf loop, exit with CTRL+C
while True:
# Print logos, matrices and other information
sleep(0.1)
system('clear')
print_logo('edgebench')
print_logo('custodian', -1, 'PURPLE')
print('')
print('')
print('\t ⚒ TASKS ⚒')
print('')
print(tabulate(task_matrix.drop(['Start Timestamp', 'Predicted Duration'], 1), ['z', 'App'], tablefmt='fancy_grid'))
print('')
print('')
print('\t ⛯ STATES ⛯')
print('')
print(tabulate(state_matrix, ['ID', 'App', 'State', 'z'], tablefmt='fancy_grid'))
print('')
print('')
next(gen)
print('')
#################################
### Check 1 : Completed Tasks ###
#################################
# Look through every machine instance
for index, row in state_matrix.iterrows():
# Check if instance is labeled as running, but the indicator file is gone
if row['State'] == 'running' and not path.isfile(workdir + '/app_' + str(index) + '/exec.tmp'):
# Grab finish timestamp
et = round(time(), 3)
# Get ID of task
z = row['z']
# Update the state matrix
state_matrix.at[index, 'State'] = 'idle'
state_matrix.at[index, 'z'] = 0
write_matrix(state_matrix, workdir + '/state_matrix.pkl')
# Drop the task row from the task matrix
task_matrix = read_matrix(workdir + '/task_matrix.pkl')
st = task_matrix.loc[z, 'Start Timestamp']
pd = task_matrix.loc[z, 'Predicted Duration']
task_matrix.drop(z, axis=0, inplace=True)
current_tasks = len(task_matrix)
tasks_weighted = []
for i in range(4):
tasks_weighted.append(sum_mask_numpy(task_matrix, i))
# Update the rest of the tasks
for index_tm, row_tm in task_matrix.iterrows():
w_tm = row_tm['w']
# Calculate times
done_t = time() - row_tm['Start Timestamp']
total_t = row_tm['Predicted Duration']
remaining_percentage = 1 - ( done_t / total_t )
# Calculate remaining time and total predicted duration
remaining_t = calculate_time(w, current_tasks, tasks_weighted, remaining_percentage)
duration = done_t + remaining_t
task_matrix.at[index_tm, 'Predicted Duration'] = duration
# Store the updated task matrix
write_matrix(task_matrix, workdir + '/task_matrix.pkl')
# ✍ Log: Execution
# (Task ID, Execution Start Timestamp, Execution Finish Timestamp, Predicted Duration)
payload = make_payload(z, st, et, pd)
publish.single('edgebench/log/execution', payload, qos=1, hostname=broker)
###########################
### Check 2 : New Tasks ###
###########################
# Look for queue files in workdir
new_tasks = glob('*.que')
if len(new_tasks) > 0:
# Create new_task list with task information
new_task = new_tasks[0][:-4].split(',')
# Grab task information
z = int(new_task[0])
w = int(new_task[1])
D = int(new_task[2])
# Check state matrix to find available machine instance
for index, row in state_matrix.iterrows():
if row['App'] == apps[w] and row['State'] == 'idle':
task_matrix = read_matrix(workdir + '/task_matrix.pkl')
# Delete queue file
remove(new_tasks[0])
# Create task table with weights
current_tasks = len(task_matrix)
tasks_weighted = []
for i in range(4):
tasks_weighted.append(sum_mask_numpy(task_matrix, i))
tasks_weighted[w] = tasks_weighted[w] + 1
# Update the rest of the tasks
for index_tm, row_tm in task_matrix.iterrows():
w_tm = row_tm['w']
# Calculate times
done_t = time() - row_tm['Start Timestamp']
total_t = row_tm['Predicted Duration']
remaining_per = 1 + ( done_t / total_t )
# Calculate remaining time and total predicted duration
remaining_t = calculate_time(int(w_tm), current_tasks + 1, tasks_weighted, remaining_per)
duration = round(done_t + remaining_t, 2)
task_matrix.at[index_tm, 'Predicted Duration'] = duration
# Calculate predicted duration
duration = calculate_time(w, current_tasks + 1, tasks_weighted)
# Add new row with new task information in task_matrix
st = round(time(), 3)
task_matrix.loc[z] = [ w, D, st, duration ]
write_matrix(task_matrix, workdir + '/task_matrix.pkl')
# Grab task name and appropriate payload
task_name, task_payload = categorize_task(w)
# Update state matrix
state_matrix.at[index, 'State'] = 'running'
state_matrix.at[index, 'z'] = z
write_matrix(state_matrix, workdir + '/state_matrix.pkl')
machine = index
# Start task
copy(payloaddir + '/' + task_name + '/' + task_payload,
workdir + '/app_' + str(machine) + '/' + task_payload)
break | en | 0.688526 | # Edgebench Platform # Worker Scripts # Custodian Module # # Starts, monitors and maintains a combination of applications # # Data Structures: # Pandas DataFrames: # task_matrix: [ 'z', 'w', 'D', 'Start Timestamp', 'Predicted Duration' ] # Types: [ int, int, int, flt, flt ] # Stores information for tasks currently running # # state_matrix: [ 'ID', 'App', 'State', 'z' ] # Types: [ int, str, str, int ] # Stores information for the state of machine instances # # Queue File: z,w,D.que # Information: Task ID, Task Type, Task Deadline # Print help message # Initializer # args: custodian.py <platform> <app combo> # Grab the platform & set the app profile # Grab the application a,b,c,d combination, turn it into a list # Initialize the task matrix & store it # Initialize the state matrix & store it too # Delete any leftover queue files # Switch to the root edgebench folder to properly launch the docker images # Initialize the app counter # Loop through the 4 applications # Loop through the number of instances for each application # Launch the docker image through a python subprocess # Add a new entry in the state matrix # Switch back to the working directory # Store the updated state matrix # Use generator function for progress bar (see shared.py) # progress bar stage # inf loop, exit with CTRL+C # Print logos, matrices and other information ################################# ### Check 1 : Completed Tasks ### ################################# # Look through every machine instance # Check if instance is labeled as running, but the indicator file is gone # Grab finish timestamp # Get ID of task # Update the state matrix # Drop the task row from the task matrix # Update the rest of the tasks # Calculate times # Calculate remaining time and total predicted duration # Store the updated task matrix # ✍ Log: Execution # (Task ID, Execution Start Timestamp, Execution Finish Timestamp, Predicted Duration) ########################### ### Check 2 : New Tasks ### ########################### # Look for queue files in workdir # Create new_task list with task information # Grab task information # Check state matrix to find available machine instance # Delete queue file # Create task table with weights # Update the rest of the tasks # Calculate times # Calculate remaining time and total predicted duration # Calculate predicted duration # Add new row with new task information in task_matrix # Grab task name and appropriate payload # Update state matrix # Start task | 2.319859 | 2 |
rxnebm/proposer/gln_openretro/test.py | coleygroup/rxn-ebm | 5 | 6621270 | <gh_stars>1-10
import argparse
import logging
import os
import sys
from datetime import datetime
from gln.common.cmd_args import cmd_args as gln_args
from models.gln_model.gln_tester import GLNTester
try:
from models.transformer_model.transformer_tester import TransformerTester
from onmt.bin.translate import _get_parser
except Exception as e:
print(e)
from rdkit import RDLogger
def parse_args():
parser = argparse.ArgumentParser("test.py")
parser.add_argument("--test_all_ckpts", help="whether to test all checkpoints", action="store_true")
parser.add_argument("--model_name", help="model name", type=str, default="")
parser.add_argument("--data_name", help="name of dataset, for easier reference", type=str, default="")
parser.add_argument("--log_file", help="log file", type=str, default="")
parser.add_argument("--config_file", help="model config file (optional)", type=str, default="")
parser.add_argument("--train_file", help="train SMILES file", type=str, default="")
parser.add_argument("--val_file", help="validation SMILES files", type=str, default="")
parser.add_argument("--test_file", help="test SMILES files", type=str, default="")
parser.add_argument("--processed_data_path", help="output path for processed data", type=str, default="")
parser.add_argument("--model_path", help="model output path", type=str, default="")
parser.add_argument("--test_output_path", help="test output path", type=str, default="")
return parser.parse_known_args()
def test_main(args):
"""Simplified interface for testing only. For actual usage downstream use the respective proposer class"""
os.makedirs(args.test_output_path, exist_ok=True)
if args.model_name == "gln":
# Overwrite default gln_args with runtime args
gln_args.test_all_ckpts = args.test_all_ckpts
tester = GLNTester(
model_name="gln",
model_args=gln_args,
model_config={},
data_name=args.data_name,
raw_data_files=[args.train_file, args.val_file, args.test_file],
processed_data_path=args.processed_data_path,
model_path=args.model_path,
test_output_path=args.test_output_path
)
elif args.model_name == "transformer":
# adapted from onmt.bin.translate.main()
parser = _get_parser()
opt, _unknown = parser.parse_known_args()
# update runtime args
opt.config = args.config_file
opt.log_file = args.log_file
tester = TransformerTester(
model_name="transformer",
model_args=opt,
model_config={},
data_name=args.data_name,
raw_data_files=[],
processed_data_path=args.processed_data_path,
model_path=args.model_path,
test_output_path=args.test_output_path
)
else:
raise ValueError(f"Model {args.model_name} not supported!")
logging.info("Start testing")
tester.test()
logging.info('Finished testing')
sys.exit()
if __name__ == "__main__":
args, unknown = parse_args()
# logger setup
RDLogger.DisableLog("rdApp.warning")
os.makedirs("./logs/test", exist_ok=True)
dt = datetime.strftime(datetime.now(), "%y%m%d-%H%Mh")
args.log_file = f"./logs/test/{args.log_file}.{dt}"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(args.log_file)
fh.setLevel(logging.INFO)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
logger.addHandler(fh)
logger.addHandler(sh)
# test interface
test_main(args)
| import argparse
import logging
import os
import sys
from datetime import datetime
from gln.common.cmd_args import cmd_args as gln_args
from models.gln_model.gln_tester import GLNTester
try:
from models.transformer_model.transformer_tester import TransformerTester
from onmt.bin.translate import _get_parser
except Exception as e:
print(e)
from rdkit import RDLogger
def parse_args():
parser = argparse.ArgumentParser("test.py")
parser.add_argument("--test_all_ckpts", help="whether to test all checkpoints", action="store_true")
parser.add_argument("--model_name", help="model name", type=str, default="")
parser.add_argument("--data_name", help="name of dataset, for easier reference", type=str, default="")
parser.add_argument("--log_file", help="log file", type=str, default="")
parser.add_argument("--config_file", help="model config file (optional)", type=str, default="")
parser.add_argument("--train_file", help="train SMILES file", type=str, default="")
parser.add_argument("--val_file", help="validation SMILES files", type=str, default="")
parser.add_argument("--test_file", help="test SMILES files", type=str, default="")
parser.add_argument("--processed_data_path", help="output path for processed data", type=str, default="")
parser.add_argument("--model_path", help="model output path", type=str, default="")
parser.add_argument("--test_output_path", help="test output path", type=str, default="")
return parser.parse_known_args()
def test_main(args):
"""Simplified interface for testing only. For actual usage downstream use the respective proposer class"""
os.makedirs(args.test_output_path, exist_ok=True)
if args.model_name == "gln":
# Overwrite default gln_args with runtime args
gln_args.test_all_ckpts = args.test_all_ckpts
tester = GLNTester(
model_name="gln",
model_args=gln_args,
model_config={},
data_name=args.data_name,
raw_data_files=[args.train_file, args.val_file, args.test_file],
processed_data_path=args.processed_data_path,
model_path=args.model_path,
test_output_path=args.test_output_path
)
elif args.model_name == "transformer":
# adapted from onmt.bin.translate.main()
parser = _get_parser()
opt, _unknown = parser.parse_known_args()
# update runtime args
opt.config = args.config_file
opt.log_file = args.log_file
tester = TransformerTester(
model_name="transformer",
model_args=opt,
model_config={},
data_name=args.data_name,
raw_data_files=[],
processed_data_path=args.processed_data_path,
model_path=args.model_path,
test_output_path=args.test_output_path
)
else:
raise ValueError(f"Model {args.model_name} not supported!")
logging.info("Start testing")
tester.test()
logging.info('Finished testing')
sys.exit()
if __name__ == "__main__":
args, unknown = parse_args()
# logger setup
RDLogger.DisableLog("rdApp.warning")
os.makedirs("./logs/test", exist_ok=True)
dt = datetime.strftime(datetime.now(), "%y%m%d-%H%Mh")
args.log_file = f"./logs/test/{args.log_file}.{dt}"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(args.log_file)
fh.setLevel(logging.INFO)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
logger.addHandler(fh)
logger.addHandler(sh)
# test interface
test_main(args) | en | 0.544068 | Simplified interface for testing only. For actual usage downstream use the respective proposer class # Overwrite default gln_args with runtime args # adapted from onmt.bin.translate.main() # update runtime args # logger setup # test interface | 2.334249 | 2 |
bin/python/metrices_animation.py | liran121211/NeuralNetwork-From-Scratch-Java | 0 | 6621271 | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
import random
import pandas as pd
def animate(i, data_1, data_2, line1_fig, line2_fig):
temp1 = data_1.iloc[:int(i+1)]
temp2 = data_2.iloc[:int(i+1)]
line1_fig.set_data(temp1.index, temp1['values']) # (values) column
line2_fig.set_data(temp2.index, temp2['values']) # (values) column
return (line1_fig, line2_fig)
def create_animation(model_type, data_1, data_2):
fig = plt.figure() # init fig
plt.title(f'Accuracy & Loss', fontsize=15) # Main Title
plt.xlabel('Epochs', fontsize=20) # Bottom Title
plt.ylabel('Loss VS Accuracy', fontsize=15) # Y Label
plt.xlim(min(data_1.index.min(), data_2.index.min()), max(data_1.index.max(), data_2.index.max())) # set min-max range of x-axis
plt.ylim(min(data_1.values.min(), data_2.values.min()), max(data_1.values.max(), data_2.values.max())) # set min-max range of y-axis
l1_fig, = plt.plot([], [], 'o-', label='Train Accuracy', color='b', markevery=[-1])
l2_fig, = plt.plot([], [], 'o-', label='Train Loss', color='r', markevery=[-1])
plt.legend(loc='center right', fontsize='medium')
ani = animation.FuncAnimation(fig, animate, fargs=(data_1, data_2, l1_fig, l2_fig), repeat=True, interval=50, repeat_delay=50)
plt.show()
# create datasets
def init():
try:
data_1 = pd.read_csv('bin\\metrices\\accuracy_logs.csv')
data_2 = pd.read_csv('bin\\metrices\\loss_logs.csv')
except FileNotFoundError:
print("Animation Failed!, Files are missing...")
exit(-1)
data_1.reset_index(inplace=True)
data_1.drop('index', axis=1, inplace=True)
data_2.reset_index(inplace=True)
data_2.drop('index', axis=1, inplace=True)
create_animation('test', data_1, data_2)
if __name__ == "__main__":
init() | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
import random
import pandas as pd
def animate(i, data_1, data_2, line1_fig, line2_fig):
temp1 = data_1.iloc[:int(i+1)]
temp2 = data_2.iloc[:int(i+1)]
line1_fig.set_data(temp1.index, temp1['values']) # (values) column
line2_fig.set_data(temp2.index, temp2['values']) # (values) column
return (line1_fig, line2_fig)
def create_animation(model_type, data_1, data_2):
fig = plt.figure() # init fig
plt.title(f'Accuracy & Loss', fontsize=15) # Main Title
plt.xlabel('Epochs', fontsize=20) # Bottom Title
plt.ylabel('Loss VS Accuracy', fontsize=15) # Y Label
plt.xlim(min(data_1.index.min(), data_2.index.min()), max(data_1.index.max(), data_2.index.max())) # set min-max range of x-axis
plt.ylim(min(data_1.values.min(), data_2.values.min()), max(data_1.values.max(), data_2.values.max())) # set min-max range of y-axis
l1_fig, = plt.plot([], [], 'o-', label='Train Accuracy', color='b', markevery=[-1])
l2_fig, = plt.plot([], [], 'o-', label='Train Loss', color='r', markevery=[-1])
plt.legend(loc='center right', fontsize='medium')
ani = animation.FuncAnimation(fig, animate, fargs=(data_1, data_2, l1_fig, l2_fig), repeat=True, interval=50, repeat_delay=50)
plt.show()
# create datasets
def init():
try:
data_1 = pd.read_csv('bin\\metrices\\accuracy_logs.csv')
data_2 = pd.read_csv('bin\\metrices\\loss_logs.csv')
except FileNotFoundError:
print("Animation Failed!, Files are missing...")
exit(-1)
data_1.reset_index(inplace=True)
data_1.drop('index', axis=1, inplace=True)
data_2.reset_index(inplace=True)
data_2.drop('index', axis=1, inplace=True)
create_animation('test', data_1, data_2)
if __name__ == "__main__":
init() | en | 0.399169 | # (values) column # (values) column # init fig # Main Title # Bottom Title # Y Label # set min-max range of x-axis # set min-max range of y-axis # create datasets | 3.379778 | 3 |
src/opencmiss/importer/base.py | OpenCMISS-Bindings/opencmiss.importer | 0 | 6621272 | import os.path
def valid(inputs, description):
if type(inputs) == list:
if type(inputs) != type(description):
return False
if len(inputs) != len(description):
return False
for index, input_ in enumerate(inputs):
if "mimetype" in description[index]:
if not os.path.isfile(input_):
return False
else:
if "mimetype" in description:
if not os.path.isfile(inputs):
return False
return True
| import os.path
def valid(inputs, description):
if type(inputs) == list:
if type(inputs) != type(description):
return False
if len(inputs) != len(description):
return False
for index, input_ in enumerate(inputs):
if "mimetype" in description[index]:
if not os.path.isfile(input_):
return False
else:
if "mimetype" in description:
if not os.path.isfile(inputs):
return False
return True
| none | 1 | 2.80774 | 3 | |
tests/__init__.py | kueda/underfoot | 4 | 6621273 | <gh_stars>1-10
# Does this need to be a module?
| # Does this need to be a module? | en | 0.871671 | # Does this need to be a module? | 1.072006 | 1 |
partlist.py | insomniacslk/partlist | 0 | 6621274 | <filename>partlist.py
#!/usr/bin/env python
# Author: <NAME> <<EMAIL>>
# License: 3-clause BSD
# This simple script fetches a partition list and saves it as JSON and as a C
# function.
# Usage: ./partitions.py
#
import re
import json
import urllib
import collections
partitions_url = 'http://www.win.tue.nl/~aeb/partitions/partition_types-1.html'
partlist_url = 'https://github.com/insomniacslk/partlist'
rx = re.compile(r'^<DT><B>(?P<code>[0-9a-f]{2}) (?P<name>.+)</B><DD>$')
def fetch_partitions():
print('Fetching {}'.format(partitions_url))
return urllib.urlopen(partitions_url).read()
def parse_partitions(data):
print('Parsing partitions')
partitions = collections.defaultdict(list)
for line in data.splitlines():
match = rx.match(line)
if match:
mdict = match.groupdict()
code = int(mdict['code'], 16)
name = mdict['name']
partitions[code].append(name)
return partitions
def simple_quote(s):
return s.replace('"', '\\"').replace("'", "\\'")
def to_json(partitions):
with open('partitions.json', 'w') as fd:
json.dump(partitions, fd, indent=4)
print('Saved to partitions.json')
def to_c(partitions):
with open('partitions.c', 'w') as fd:
fd.write('/* Generated with partlist <{url}> */\n'.format(
url=partlist_url))
fd.write('/* Original data source: {url} */\n'.format(
url=partitions_url))
fd.write('const char *get_partition_type(unsigned char ptype) {\n')
fd.write('\n')
fd.write(' switch (ptype) {\n')
for part_id, part_names in partitions.iteritems():
part_names = simple_quote(', '.join(part_names))
fd.write(' case {part_id}:\n'.format(part_id=part_id))
fd.write(' return "{part_names}";\n'.format(
part_names=part_names))
fd.write(' default:\n')
fd.write(' return "Unknown partition type";\n')
fd.write(' }\n')
fd.write('}\n')
print('Saved to partitions.c')
def main():
data = fetch_partitions()
partitions = parse_partitions(data)
to_json(partitions)
to_c(partitions)
if __name__ == '__main__':
main()
| <filename>partlist.py
#!/usr/bin/env python
# Author: <NAME> <<EMAIL>>
# License: 3-clause BSD
# This simple script fetches a partition list and saves it as JSON and as a C
# function.
# Usage: ./partitions.py
#
import re
import json
import urllib
import collections
partitions_url = 'http://www.win.tue.nl/~aeb/partitions/partition_types-1.html'
partlist_url = 'https://github.com/insomniacslk/partlist'
rx = re.compile(r'^<DT><B>(?P<code>[0-9a-f]{2}) (?P<name>.+)</B><DD>$')
def fetch_partitions():
print('Fetching {}'.format(partitions_url))
return urllib.urlopen(partitions_url).read()
def parse_partitions(data):
print('Parsing partitions')
partitions = collections.defaultdict(list)
for line in data.splitlines():
match = rx.match(line)
if match:
mdict = match.groupdict()
code = int(mdict['code'], 16)
name = mdict['name']
partitions[code].append(name)
return partitions
def simple_quote(s):
return s.replace('"', '\\"').replace("'", "\\'")
def to_json(partitions):
with open('partitions.json', 'w') as fd:
json.dump(partitions, fd, indent=4)
print('Saved to partitions.json')
def to_c(partitions):
with open('partitions.c', 'w') as fd:
fd.write('/* Generated with partlist <{url}> */\n'.format(
url=partlist_url))
fd.write('/* Original data source: {url} */\n'.format(
url=partitions_url))
fd.write('const char *get_partition_type(unsigned char ptype) {\n')
fd.write('\n')
fd.write(' switch (ptype) {\n')
for part_id, part_names in partitions.iteritems():
part_names = simple_quote(', '.join(part_names))
fd.write(' case {part_id}:\n'.format(part_id=part_id))
fd.write(' return "{part_names}";\n'.format(
part_names=part_names))
fd.write(' default:\n')
fd.write(' return "Unknown partition type";\n')
fd.write(' }\n')
fd.write('}\n')
print('Saved to partitions.c')
def main():
data = fetch_partitions()
partitions = parse_partitions(data)
to_json(partitions)
to_c(partitions)
if __name__ == '__main__':
main()
| en | 0.752641 | #!/usr/bin/env python # Author: <NAME> <<EMAIL>> # License: 3-clause BSD # This simple script fetches a partition list and saves it as JSON and as a C # function. # Usage: ./partitions.py # | 2.735548 | 3 |
app/external_systems/identification_system.py | tamayonauta/contact-directory | 0 | 6621275 | from .data import PERSONAL_DATA
class IdentificationSystem:
@classmethod
def get_personal_data(cls, data):
personal_data = cls._get_personal_data(data)
return personal_data
@classmethod
def _get_personal_data(cls, data):
if not len(data) or "id_number" not in data:
return None
for personal_data in PERSONAL_DATA:
if personal_data['id_number'] == data['id_number']:
return personal_data
return None
| from .data import PERSONAL_DATA
class IdentificationSystem:
@classmethod
def get_personal_data(cls, data):
personal_data = cls._get_personal_data(data)
return personal_data
@classmethod
def _get_personal_data(cls, data):
if not len(data) or "id_number" not in data:
return None
for personal_data in PERSONAL_DATA:
if personal_data['id_number'] == data['id_number']:
return personal_data
return None
| none | 1 | 3.011604 | 3 | |
tuxtvicons.py | i026e/Python-playlist-editor | 0 | 6621276 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 27 09:56:31 2016
@author: pavel
"""
import os
from sys import argv
import mconfig as conf
OUTPUT_FILE = "add.txt" #/usr/share/freetuxtv/tv_channels.xml
pattern = """
<tvchannel name="{channel_name}">
<logo_filename>{logo_name}</logo_filename>
</tvchannel>
"""
def get_icons(folder):
icons = {}
for f_name in os.listdir(folder):
path = os.path.join(folder, f_name)
if os.path.isfile(path):
channel_name = os.path.splitext( os.path.basename(f_name))[0]
icons[channel_name] = f_name
return icons
def main(*args):
icons = get_icons(conf.ICONS_FOLDER)
with open(OUTPUT_FILE, "w") as output:
for channel_name, f_name in icons.items():
s = pattern.format(channel_name=channel_name, logo_name = f_name)
output.write(s)
print(args)
if __name__ == "__main__":
# execute only if run as a script
main(argv)
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 27 09:56:31 2016
@author: pavel
"""
import os
from sys import argv
import mconfig as conf
OUTPUT_FILE = "add.txt" #/usr/share/freetuxtv/tv_channels.xml
pattern = """
<tvchannel name="{channel_name}">
<logo_filename>{logo_name}</logo_filename>
</tvchannel>
"""
def get_icons(folder):
icons = {}
for f_name in os.listdir(folder):
path = os.path.join(folder, f_name)
if os.path.isfile(path):
channel_name = os.path.splitext( os.path.basename(f_name))[0]
icons[channel_name] = f_name
return icons
def main(*args):
icons = get_icons(conf.ICONS_FOLDER)
with open(OUTPUT_FILE, "w") as output:
for channel_name, f_name in icons.items():
s = pattern.format(channel_name=channel_name, logo_name = f_name)
output.write(s)
print(args)
if __name__ == "__main__":
# execute only if run as a script
main(argv)
| en | 0.53707 | #!/usr/bin/python3 # -*- coding: utf-8 -*- Created on Wed Apr 27 09:56:31 2016 @author: pavel #/usr/share/freetuxtv/tv_channels.xml <tvchannel name="{channel_name}"> <logo_filename>{logo_name}</logo_filename> </tvchannel> # execute only if run as a script | 2.873078 | 3 |
hello.py | feat7/machine-learning-hello-world | 0 | 6621277 | # Load libraries
import pandas
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# Load dataset
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pandas.read_csv(url, names=names)
# Docs for pandas.read_csv
# url -> csv file = buffer/file path
# names -> list of columns to be used (set their names)
# retruns -> DataFrame or TextParser
# shape
# print(dataset.shape)
# returns -> tuple representing the dimension of the DataFrame
# head
# print(dataset.head(20))
# return -> first n rows
# descriptions
# print(dataset.describe())
# returns -> describes the DataFrame except NaN values (use include='all' to get for NaN too)
# class distribution
# print(dataset.groupby('class').size())
# by='class' can be string, dict, etc. string of some column name can be passed here. (it is 'class in this particular code')
# returns -> GroupBy obj
# metplotlib stuff to draw graphs
# box and whisker plots
# dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
# plt.show()
# histograms
# dataset.hist()
# plt.show()
# scatter plot matrix
# scatter_matrix(dataset)
# plt.show()
# Split-out validation dataset
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
# Test options and evaluation metric
seed = 7
scoring = 'accuracy'
# Spot Check Algorithms
# models = []
# models.append(('LR', LogisticRegression()))
# models.append(('LDA', LinearDiscriminantAnalysis()))
# models.append(('KNN', KNeighborsClassifier()))
# models.append(('CART', DecisionTreeClassifier()))
# models.append(('NB', GaussianNB()))
# models.append(('SVM', SVC()))
# # evaluate each model in turn
# results = []
# names = []
# for name, model in models:
# kfold = model_selection.KFold(n_splits=10, random_state=seed)
# cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
# results.append(cv_results)
# names.append(name)
# msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
# print(msg)
# Compare Algorithms
# fig = plt.figure()
# fig.suptitle('Algorithm Comparison')
# ax = fig.add_subplot(111)
# plt.boxplot(results)
# ax.set_xticklabels(names)
# plt.show()
# Make predictions on validation dataset
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train)
predictions = knn.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| # Load libraries
import pandas
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# Load dataset
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pandas.read_csv(url, names=names)
# Docs for pandas.read_csv
# url -> csv file = buffer/file path
# names -> list of columns to be used (set their names)
# retruns -> DataFrame or TextParser
# shape
# print(dataset.shape)
# returns -> tuple representing the dimension of the DataFrame
# head
# print(dataset.head(20))
# return -> first n rows
# descriptions
# print(dataset.describe())
# returns -> describes the DataFrame except NaN values (use include='all' to get for NaN too)
# class distribution
# print(dataset.groupby('class').size())
# by='class' can be string, dict, etc. string of some column name can be passed here. (it is 'class in this particular code')
# returns -> GroupBy obj
# metplotlib stuff to draw graphs
# box and whisker plots
# dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
# plt.show()
# histograms
# dataset.hist()
# plt.show()
# scatter plot matrix
# scatter_matrix(dataset)
# plt.show()
# Split-out validation dataset
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
# Test options and evaluation metric
seed = 7
scoring = 'accuracy'
# Spot Check Algorithms
# models = []
# models.append(('LR', LogisticRegression()))
# models.append(('LDA', LinearDiscriminantAnalysis()))
# models.append(('KNN', KNeighborsClassifier()))
# models.append(('CART', DecisionTreeClassifier()))
# models.append(('NB', GaussianNB()))
# models.append(('SVM', SVC()))
# # evaluate each model in turn
# results = []
# names = []
# for name, model in models:
# kfold = model_selection.KFold(n_splits=10, random_state=seed)
# cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
# results.append(cv_results)
# names.append(name)
# msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
# print(msg)
# Compare Algorithms
# fig = plt.figure()
# fig.suptitle('Algorithm Comparison')
# ax = fig.add_subplot(111)
# plt.boxplot(results)
# ax.set_xticklabels(names)
# plt.show()
# Make predictions on validation dataset
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train)
predictions = knn.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| en | 0.421561 | # Load libraries # Load dataset # Docs for pandas.read_csv # url -> csv file = buffer/file path # names -> list of columns to be used (set their names) # retruns -> DataFrame or TextParser # shape # print(dataset.shape) # returns -> tuple representing the dimension of the DataFrame # head # print(dataset.head(20)) # return -> first n rows # descriptions # print(dataset.describe()) # returns -> describes the DataFrame except NaN values (use include='all' to get for NaN too) # class distribution # print(dataset.groupby('class').size()) # by='class' can be string, dict, etc. string of some column name can be passed here. (it is 'class in this particular code') # returns -> GroupBy obj # metplotlib stuff to draw graphs # box and whisker plots # dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False) # plt.show() # histograms # dataset.hist() # plt.show() # scatter plot matrix # scatter_matrix(dataset) # plt.show() # Split-out validation dataset # Test options and evaluation metric # Spot Check Algorithms # models = [] # models.append(('LR', LogisticRegression())) # models.append(('LDA', LinearDiscriminantAnalysis())) # models.append(('KNN', KNeighborsClassifier())) # models.append(('CART', DecisionTreeClassifier())) # models.append(('NB', GaussianNB())) # models.append(('SVM', SVC())) # # evaluate each model in turn # results = [] # names = [] # for name, model in models: # kfold = model_selection.KFold(n_splits=10, random_state=seed) # cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring) # results.append(cv_results) # names.append(name) # msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std()) # print(msg) # Compare Algorithms # fig = plt.figure() # fig.suptitle('Algorithm Comparison') # ax = fig.add_subplot(111) # plt.boxplot(results) # ax.set_xticklabels(names) # plt.show() # Make predictions on validation dataset | 3.131496 | 3 |
py/model.py | Enigmatisms/NeRF | 1 | 6621278 | <gh_stars>1-10
#-*-coding:utf-8-*-
"""
NeRF network details. To be finished ...
"""
import torch
from torch import nn
from torch.nn import functional as F
from apex import amp
from py.nerf_helper import makeMLP, positional_encoding
# import tinycudann as tcnn
# This module is shared by coarse and fine network, with no need to modify
class NeRF(nn.Module):
@staticmethod
def init_weight(m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def __init__(self, position_flevel, direction_flevel, cat_origin = True) -> None:
super().__init__()
self.position_flevel = position_flevel
self.direction_flevel = direction_flevel
extra_width = 3 if cat_origin else 0
module_list = makeMLP(60 + extra_width, 256)
for _ in range(3):
module_list.extend(makeMLP(256, 256))
self.lin_block1 = nn.Sequential(*module_list) # MLP before skip connection
self.lin_block2 = nn.Sequential(
*makeMLP(316 + extra_width, 256),
*makeMLP(256, 256), *makeMLP(256, 256)
)
self.bottle_neck = nn.Sequential(*makeMLP(256, 256, None))
self.opacity_head = nn.Sequential( # authors said that ReLU is used here
*makeMLP(256, 1)
)
self.rgb_layer = nn.Sequential(
*makeMLP(280 + extra_width, 128),
*makeMLP(128, 3, nn.Sigmoid())
)
self.cat_origin = cat_origin
self.apply(self.init_weight)
def loadFromFile(self, load_path:str, use_amp = False, opt = None):
save = torch.load(load_path)
save_model = save['model']
model_dict = self.state_dict()
state_dict = {k:v for k, v in save_model.items()}
model_dict.update(state_dict)
self.load_state_dict(model_dict)
if not opt is None:
opt.load_state_dict(save['optimizer'])
if use_amp:
amp.load_state_dict(save['amp'])
print("NeRF Model loaded from '%s'"%(load_path))
# for coarse network, input is obtained by sampling, sampling result is (ray_num, point_num, 9), (depth) (ray_num, point_num)
# TODO: fine-network输入的point_num是192,会产生影响吗?
def forward(self, pts:torch.Tensor, encoded_pt:torch.Tensor = None) -> torch.Tensor:
position_dim, direction_dim = 6 * self.position_flevel, 6 * self.direction_flevel
if not encoded_pt is None:
encoded_x = encoded_pt
else:
encoded_x = positional_encoding(pts[:, :, :3], self.position_flevel)
rotation = pts[:, :, 3:6].reshape(-1, 3)
rotation = rotation / rotation.norm(dim = -1, keepdim = True)
encoded_r = positional_encoding(rotation, self.direction_flevel)
encoded_x = encoded_x.view(pts.shape[0], pts.shape[1], position_dim)
encoded_r = encoded_r.view(pts.shape[0], pts.shape[1], direction_dim)
if self.cat_origin:
encoded_x = torch.cat((pts[:, :, :3], encoded_x), -1)
encoded_r = torch.cat((rotation.view(pts.shape[0], pts.shape[1], -1), encoded_r), -1)
tmp = self.lin_block1(encoded_x)
encoded_x = torch.cat((encoded_x, tmp), dim = -1)
encoded_x = self.lin_block2(encoded_x)
opacity = self.opacity_head(encoded_x)
encoded_x = self.bottle_neck(encoded_x)
rgb = self.rgb_layer(torch.cat((encoded_x, encoded_r), dim = -1))
return torch.cat((rgb, opacity), dim = -1) # output (ray_num, point_num, 4)
# rays is of shape (ray_num, 6)
@staticmethod
def coarseFineMerge(rays:torch.Tensor, c_zvals:torch.Tensor, f_zvals:torch.Tensor) -> torch.Tensor:
zvals = torch.cat((f_zvals, c_zvals), dim = -1)
zvals, _ = torch.sort(zvals, dim = -1)
sample_pnum = f_zvals.shape[1] + c_zvals.shape[1]
# Use sort depth to calculate sampled points
pts = rays[...,None,:3] + rays[...,None,3:] * zvals[...,:,None]
# depth * ray_direction + origin (this should be further tested)
return torch.cat((pts, rays[:, 3:].unsqueeze(-2).repeat(1, sample_pnum, 1)), dim = -1), zvals # output is (ray_num, coarse_pts num + fine pts num, 6)
"""
This function is important for inverse transform sampling, since for every ray
we will have 64 normalized weights (summing to 1.) for inverse sampling
"""
@staticmethod
def getNormedWeight(opacity:torch.Tensor, depth:torch.Tensor) -> torch.Tensor:
delta:torch.Tensor = torch.cat((depth[:, 1:] - depth[:, :-1], torch.FloatTensor([1e10]).repeat((depth.shape[0], 1)).cuda()), dim = -1)
# print(opacity.shape, depth[:, 1:].shape, raw_delta.shape, delta.shape)
mult:torch.Tensor = torch.exp(-F.relu(opacity) * delta)
alpha:torch.Tensor = 1. - mult
# fusion requires normalization, rgb output should be passed through sigmoid
weights = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1)).cuda(), mult + 1e-10], -1), -1)[:, :-1]
return weights
# depth shape: (ray_num, point_num)
# need the norm of rays, shape: (ray_num, point_num)
@staticmethod
def render(rgbo:torch.Tensor, depth:torch.Tensor, ray_dirs:torch.Tensor) -> torch.Tensor:
depth = depth * (ray_dirs.norm(dim = -1, keepdim = True))
rgb:torch.Tensor = rgbo[..., :3] # shape (ray_num, pnum, 3)
opacity:torch.Tensor = rgbo[..., -1] # 1e-5 is used for eliminating numerical instability
weights = NeRF.getNormedWeight(opacity, depth)
weighted_rgb:torch.Tensor = weights[:, :, None] * rgb
return torch.sum(weighted_rgb, dim = -2), weights # output (ray_num, 3) and (ray_num, point_num)
if __name__ == "__main__":
print("Hello NeRF world!")
| #-*-coding:utf-8-*-
"""
NeRF network details. To be finished ...
"""
import torch
from torch import nn
from torch.nn import functional as F
from apex import amp
from py.nerf_helper import makeMLP, positional_encoding
# import tinycudann as tcnn
# This module is shared by coarse and fine network, with no need to modify
class NeRF(nn.Module):
@staticmethod
def init_weight(m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def __init__(self, position_flevel, direction_flevel, cat_origin = True) -> None:
super().__init__()
self.position_flevel = position_flevel
self.direction_flevel = direction_flevel
extra_width = 3 if cat_origin else 0
module_list = makeMLP(60 + extra_width, 256)
for _ in range(3):
module_list.extend(makeMLP(256, 256))
self.lin_block1 = nn.Sequential(*module_list) # MLP before skip connection
self.lin_block2 = nn.Sequential(
*makeMLP(316 + extra_width, 256),
*makeMLP(256, 256), *makeMLP(256, 256)
)
self.bottle_neck = nn.Sequential(*makeMLP(256, 256, None))
self.opacity_head = nn.Sequential( # authors said that ReLU is used here
*makeMLP(256, 1)
)
self.rgb_layer = nn.Sequential(
*makeMLP(280 + extra_width, 128),
*makeMLP(128, 3, nn.Sigmoid())
)
self.cat_origin = cat_origin
self.apply(self.init_weight)
def loadFromFile(self, load_path:str, use_amp = False, opt = None):
save = torch.load(load_path)
save_model = save['model']
model_dict = self.state_dict()
state_dict = {k:v for k, v in save_model.items()}
model_dict.update(state_dict)
self.load_state_dict(model_dict)
if not opt is None:
opt.load_state_dict(save['optimizer'])
if use_amp:
amp.load_state_dict(save['amp'])
print("NeRF Model loaded from '%s'"%(load_path))
# for coarse network, input is obtained by sampling, sampling result is (ray_num, point_num, 9), (depth) (ray_num, point_num)
# TODO: fine-network输入的point_num是192,会产生影响吗?
def forward(self, pts:torch.Tensor, encoded_pt:torch.Tensor = None) -> torch.Tensor:
position_dim, direction_dim = 6 * self.position_flevel, 6 * self.direction_flevel
if not encoded_pt is None:
encoded_x = encoded_pt
else:
encoded_x = positional_encoding(pts[:, :, :3], self.position_flevel)
rotation = pts[:, :, 3:6].reshape(-1, 3)
rotation = rotation / rotation.norm(dim = -1, keepdim = True)
encoded_r = positional_encoding(rotation, self.direction_flevel)
encoded_x = encoded_x.view(pts.shape[0], pts.shape[1], position_dim)
encoded_r = encoded_r.view(pts.shape[0], pts.shape[1], direction_dim)
if self.cat_origin:
encoded_x = torch.cat((pts[:, :, :3], encoded_x), -1)
encoded_r = torch.cat((rotation.view(pts.shape[0], pts.shape[1], -1), encoded_r), -1)
tmp = self.lin_block1(encoded_x)
encoded_x = torch.cat((encoded_x, tmp), dim = -1)
encoded_x = self.lin_block2(encoded_x)
opacity = self.opacity_head(encoded_x)
encoded_x = self.bottle_neck(encoded_x)
rgb = self.rgb_layer(torch.cat((encoded_x, encoded_r), dim = -1))
return torch.cat((rgb, opacity), dim = -1) # output (ray_num, point_num, 4)
# rays is of shape (ray_num, 6)
@staticmethod
def coarseFineMerge(rays:torch.Tensor, c_zvals:torch.Tensor, f_zvals:torch.Tensor) -> torch.Tensor:
zvals = torch.cat((f_zvals, c_zvals), dim = -1)
zvals, _ = torch.sort(zvals, dim = -1)
sample_pnum = f_zvals.shape[1] + c_zvals.shape[1]
# Use sort depth to calculate sampled points
pts = rays[...,None,:3] + rays[...,None,3:] * zvals[...,:,None]
# depth * ray_direction + origin (this should be further tested)
return torch.cat((pts, rays[:, 3:].unsqueeze(-2).repeat(1, sample_pnum, 1)), dim = -1), zvals # output is (ray_num, coarse_pts num + fine pts num, 6)
"""
This function is important for inverse transform sampling, since for every ray
we will have 64 normalized weights (summing to 1.) for inverse sampling
"""
@staticmethod
def getNormedWeight(opacity:torch.Tensor, depth:torch.Tensor) -> torch.Tensor:
delta:torch.Tensor = torch.cat((depth[:, 1:] - depth[:, :-1], torch.FloatTensor([1e10]).repeat((depth.shape[0], 1)).cuda()), dim = -1)
# print(opacity.shape, depth[:, 1:].shape, raw_delta.shape, delta.shape)
mult:torch.Tensor = torch.exp(-F.relu(opacity) * delta)
alpha:torch.Tensor = 1. - mult
# fusion requires normalization, rgb output should be passed through sigmoid
weights = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1)).cuda(), mult + 1e-10], -1), -1)[:, :-1]
return weights
# depth shape: (ray_num, point_num)
# need the norm of rays, shape: (ray_num, point_num)
@staticmethod
def render(rgbo:torch.Tensor, depth:torch.Tensor, ray_dirs:torch.Tensor) -> torch.Tensor:
depth = depth * (ray_dirs.norm(dim = -1, keepdim = True))
rgb:torch.Tensor = rgbo[..., :3] # shape (ray_num, pnum, 3)
opacity:torch.Tensor = rgbo[..., -1] # 1e-5 is used for eliminating numerical instability
weights = NeRF.getNormedWeight(opacity, depth)
weighted_rgb:torch.Tensor = weights[:, :, None] * rgb
return torch.sum(weighted_rgb, dim = -2), weights # output (ray_num, 3) and (ray_num, point_num)
if __name__ == "__main__":
print("Hello NeRF world!") | en | 0.843133 | #-*-coding:utf-8-*- NeRF network details. To be finished ... # import tinycudann as tcnn # This module is shared by coarse and fine network, with no need to modify # MLP before skip connection # authors said that ReLU is used here # for coarse network, input is obtained by sampling, sampling result is (ray_num, point_num, 9), (depth) (ray_num, point_num) # TODO: fine-network输入的point_num是192,会产生影响吗? # output (ray_num, point_num, 4) # rays is of shape (ray_num, 6) # Use sort depth to calculate sampled points # depth * ray_direction + origin (this should be further tested) # output is (ray_num, coarse_pts num + fine pts num, 6) This function is important for inverse transform sampling, since for every ray we will have 64 normalized weights (summing to 1.) for inverse sampling # print(opacity.shape, depth[:, 1:].shape, raw_delta.shape, delta.shape) # fusion requires normalization, rgb output should be passed through sigmoid # depth shape: (ray_num, point_num) # need the norm of rays, shape: (ray_num, point_num) # shape (ray_num, pnum, 3) # 1e-5 is used for eliminating numerical instability # output (ray_num, 3) and (ray_num, point_num) | 2.12963 | 2 |
tests/core/test_serializer_list.py | hugosenari/dbus_curio | 0 | 6621279 | <filename>tests/core/test_serializer_list.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for `dbus_curio.core.serializer.serialize_list` function.
"""
import sys
import unittest
from dbus_curio.core.serializer import serialize_list
class TestSerializerList(unittest.TestCase):
def test_000_list_int(self):
signature = b'n'
expected = b''.join([b'\x04\x00\x00\x00',
b'\x01\x00',
b'\x02\x03'])
target = [1, 770]
actual = b''.join(serialize_list(target, signature))
self.assertEqual(expected, actual)
def test_001_list_str(self):
signature = b's'
expected = b''.join([b'\x17\x00\x00\x00',
b'\x05\x00\x00\x00Hello\x00\x00\x00',
b'\x06\x00\x00\x00World!\x00'])
target = ['Hello', "World!"]
actual = b''.join(serialize_list(target, signature))
self.assertEqual(expected, actual) | <filename>tests/core/test_serializer_list.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for `dbus_curio.core.serializer.serialize_list` function.
"""
import sys
import unittest
from dbus_curio.core.serializer import serialize_list
class TestSerializerList(unittest.TestCase):
def test_000_list_int(self):
signature = b'n'
expected = b''.join([b'\x04\x00\x00\x00',
b'\x01\x00',
b'\x02\x03'])
target = [1, 770]
actual = b''.join(serialize_list(target, signature))
self.assertEqual(expected, actual)
def test_001_list_str(self):
signature = b's'
expected = b''.join([b'\x17\x00\x00\x00',
b'\x05\x00\x00\x00Hello\x00\x00\x00',
b'\x06\x00\x00\x00World!\x00'])
target = ['Hello', "World!"]
actual = b''.join(serialize_list(target, signature))
self.assertEqual(expected, actual) | en | 0.282827 | #!/usr/bin/env python # -*- coding: utf-8 -*- Tests for `dbus_curio.core.serializer.serialize_list` function. | 2.7798 | 3 |
lib/Flask-ACL/flask_acl/globals.py | mikeboers/Spoon | 4 | 6621280 | <reponame>mikeboers/Spoon<gh_stars>1-10
import functools
import werkzeug as wz
from flask import current_app
# Proxy to the current app's AuthManager
current_auth = wz.local.LocalProxy(lambda: current_app.auth_manager)
| import functools
import werkzeug as wz
from flask import current_app
# Proxy to the current app's AuthManager
current_auth = wz.local.LocalProxy(lambda: current_app.auth_manager) | en | 0.882306 | # Proxy to the current app's AuthManager | 1.752477 | 2 |
python/ray/rllib/RL/envs/target_tracking/belief_tracker.py | christopher-hsu/ray | 1 | 6621281 | """Belief Trackers
KFbelief : Belief Update using Kalman Filter
UKFbelief : Belief Update using Unscented Kalman Filter using filterpy library
"""
import numpy as np
import envs.env_utils as util
from numpy import linalg as LA
import pdb
from filterpy.kalman import JulierSigmaPoints, UnscentedKalmanFilter, ExtendedKalmanFilter
class KFbelief(object):
"""
Kalman Filter for the target tracking problem.
state : target state
x : agent state
z : observation (r, alpha)
"""
def __init__(self, dim, limit, dim_z=2, A=None, W=None,
obs_noise_func=None, collision_func=None):
"""
dim : dimension of state
limit : An array of two vectors. limit[0] = minimum values for the state,
limit[1] = maximum value for the state
dim_z : dimension of observation,
A : state transition matrix
W : state noise matrix
obs_noise_func : observation noise matrix function of z
collision_func : collision checking function
"""
self.dim = dim
self.limit = limit
self.A = np.eye(self.dim) if A is None else A
self.W = W if W is not None else np.zeros((self.dim, self.dim))
self.obs_noise_func = obs_noise_func
self.collision_func = collision_func
def reset(self, init_state, init_cov):
self.state = init_state
self.cov = init_cov*np.eye(self.dim)
def update(self, observed, z_t, x_t):
# Kalman Filter Prediction and Update
# Prediction
state_predicted = np.matmul(self.A, self.state)
cov_predicted = np.matmul(np.matmul(self.A, self.cov), self.A.T)+ self.W
# Update
if observed:
r_pred, alpha_pred, diff_pred = util.relative_measure(state_predicted, x_t)
if self.dim == 2:
Hmat = np.array([[diff_pred[0],diff_pred[1]],
[-diff_pred[1]/r_pred, diff_pred[0]/r_pred]])/r_pred
elif self.dim == 4:
Hmat = np.array([[diff_pred[0], diff_pred[1], 0.0, 0.0],
[-diff_pred[1]/r_pred, diff_pred[0]/r_pred, 0.0, 0.0]])/r_pred
else:
raise ValueError('target dimension for KF must be either 2 or 4')
innov = z_t - np.array([r_pred, alpha_pred])
innov[1] = util.wrap_around(innov[1])
R = np.matmul(np.matmul(Hmat, cov_predicted), Hmat.T) \
+ self.obs_noise_func((r_pred, alpha_pred))
K = np.matmul(np.matmul(cov_predicted, Hmat.T), LA.inv(R))
C = np.eye(self.dim) - np.matmul(K, Hmat)
cov_new = np.matmul(C, cov_predicted)
state_new = state_predicted + np.matmul(K, innov)
else:
cov_new = cov_predicted
state_new = state_predicted
if LA.det(cov_new) < 1e6:
self.cov = cov_new
if not(self.collision_func(state_new[:2])):
self.state = np.clip(state_new, self.limit[0], self.limit[1])
class UKFbelief(object):
"""
Unscented Kalman Filter from filterpy
"""
def __init__(self, dim, limit, dim_z=2, fx=None, W=None, obs_noise_func=None,
collision_func=None, sampling_period=0.5, kappa=1):
"""
dim : dimension of state
***Assuming dim==3: (x,y,theta), dim==4: (x,y,xdot,ydot), dim==5: (x,y,theta,v,w)
limit : An array of two vectors. limit[0] = minimum values for the state,
limit[1] = maximum value for the state
dim_z : dimension of observation,
fx : x_tp1 = fx(x_t, dt), state dynamic function
W : state noise matrix
obs_noise_func : observation noise matrix function of z
collision_func : collision checking function
n : the number of sigma points
"""
self.dim = dim
self.limit = limit
self.W = W if W is not None else np.zeros((self.dim, self.dim))
self.obs_noise_func = obs_noise_func
self.collision_func = collision_func
def hx(y, agent_state, measure_func=util.relative_measure):
r_pred, alpha_pred, _ = measure_func(y, agent_state)
return np.array([r_pred, alpha_pred])
def x_mean_fn_(sigmas, Wm):
if dim == 3:
x = np.zeros(dim)
sum_sin, sum_cos = 0., 0.
for i in range(len(sigmas)):
s = sigmas[i]
x[0] += s[0] * Wm[i]
x[1] += s[1] * Wm[i]
sum_sin += np.sin(s[2])*Wm[i]
sum_cos += np.cos(s[2])*Wm[i]
x[2] = np.arctan2(sum_sin, sum_cos)
return x
elif dim == 5:
x = np.zeros(dim)
sum_sin, sum_cos = 0., 0.
for i in range(len(sigmas)):
s = sigmas[i]
x[0] += s[0] * Wm[i]
x[1] += s[1] * Wm[i]
x[3] += s[3] * Wm[i]
x[4] += s[4] * Wm[i]
sum_sin += np.sin(s[2])*Wm[i]
sum_cos += np.cos(s[2])*Wm[i]
x[2] = np.arctan2(sum_sin, sum_cos)
return x
else:
return None
def z_mean_fn_(sigmas, Wm):
x = np.zeros(dim_z)
sum_sin, sum_cos = 0., 0.
for i in range(len(sigmas)):
s = sigmas[i]
x[0] += s[0] * Wm[i]
sum_sin += np.sin(s[1])*Wm[i]
sum_cos += np.cos(s[1])*Wm[i]
x[1] = np.arctan2(sum_sin, sum_cos)
return x
def residual_x_(x, xp):
"""
x : state, [x, y, theta]
xp : predicted state
"""
if dim == 3 or dim == 5:
r_x = x - xp
r_x[2] = util.wrap_around(r_x[2])
return r_x
else:
return None
def residual_z_(z, zp):
"""
z : observation, [r, alpha]
zp : predicted observation
"""
r_z = z - zp
r_z[1] = util.wrap_around(r_z[1])
return r_z
sigmas = JulierSigmaPoints(n=dim, kappa=kappa)
self.ukf = UnscentedKalmanFilter(dim, dim_z, sampling_period, fx=fx, hx=hx,
points=sigmas, x_mean_fn=x_mean_fn_, z_mean_fn=z_mean_fn_,
residual_x=residual_x_, residual_z=residual_z_)
def reset(self, init_state, init_cov):
self.state = init_state
self.cov = init_cov*np.eye(self.dim)
self.ukf.x = self.state
self.ukf.P = self.cov
self.ukf.Q = self.W # process noise matrix
def update(self, observed, z_t, x_t, u_t=None):
# Kalman Filter Update
self.ukf.predict(u=u_t)
if observed:
r_pred, alpha_pred, _ = util.relative_measure(self.ukf.x, x_t)
self.ukf.update(z_t, R=self.obs_noise_func((r_pred, alpha_pred)), agent_state=x_t)
cov_new = self.ukf.P
state_new = self.ukf.x
if LA.det(cov_new) < 1e6:
self.cov = cov_new
if not(self.collision_func(state_new[:2])):
self.state = np.clip(state_new, self.limit[0], self.limit[1])
| """Belief Trackers
KFbelief : Belief Update using Kalman Filter
UKFbelief : Belief Update using Unscented Kalman Filter using filterpy library
"""
import numpy as np
import envs.env_utils as util
from numpy import linalg as LA
import pdb
from filterpy.kalman import JulierSigmaPoints, UnscentedKalmanFilter, ExtendedKalmanFilter
class KFbelief(object):
"""
Kalman Filter for the target tracking problem.
state : target state
x : agent state
z : observation (r, alpha)
"""
def __init__(self, dim, limit, dim_z=2, A=None, W=None,
obs_noise_func=None, collision_func=None):
"""
dim : dimension of state
limit : An array of two vectors. limit[0] = minimum values for the state,
limit[1] = maximum value for the state
dim_z : dimension of observation,
A : state transition matrix
W : state noise matrix
obs_noise_func : observation noise matrix function of z
collision_func : collision checking function
"""
self.dim = dim
self.limit = limit
self.A = np.eye(self.dim) if A is None else A
self.W = W if W is not None else np.zeros((self.dim, self.dim))
self.obs_noise_func = obs_noise_func
self.collision_func = collision_func
def reset(self, init_state, init_cov):
self.state = init_state
self.cov = init_cov*np.eye(self.dim)
def update(self, observed, z_t, x_t):
# Kalman Filter Prediction and Update
# Prediction
state_predicted = np.matmul(self.A, self.state)
cov_predicted = np.matmul(np.matmul(self.A, self.cov), self.A.T)+ self.W
# Update
if observed:
r_pred, alpha_pred, diff_pred = util.relative_measure(state_predicted, x_t)
if self.dim == 2:
Hmat = np.array([[diff_pred[0],diff_pred[1]],
[-diff_pred[1]/r_pred, diff_pred[0]/r_pred]])/r_pred
elif self.dim == 4:
Hmat = np.array([[diff_pred[0], diff_pred[1], 0.0, 0.0],
[-diff_pred[1]/r_pred, diff_pred[0]/r_pred, 0.0, 0.0]])/r_pred
else:
raise ValueError('target dimension for KF must be either 2 or 4')
innov = z_t - np.array([r_pred, alpha_pred])
innov[1] = util.wrap_around(innov[1])
R = np.matmul(np.matmul(Hmat, cov_predicted), Hmat.T) \
+ self.obs_noise_func((r_pred, alpha_pred))
K = np.matmul(np.matmul(cov_predicted, Hmat.T), LA.inv(R))
C = np.eye(self.dim) - np.matmul(K, Hmat)
cov_new = np.matmul(C, cov_predicted)
state_new = state_predicted + np.matmul(K, innov)
else:
cov_new = cov_predicted
state_new = state_predicted
if LA.det(cov_new) < 1e6:
self.cov = cov_new
if not(self.collision_func(state_new[:2])):
self.state = np.clip(state_new, self.limit[0], self.limit[1])
class UKFbelief(object):
"""
Unscented Kalman Filter from filterpy
"""
def __init__(self, dim, limit, dim_z=2, fx=None, W=None, obs_noise_func=None,
collision_func=None, sampling_period=0.5, kappa=1):
"""
dim : dimension of state
***Assuming dim==3: (x,y,theta), dim==4: (x,y,xdot,ydot), dim==5: (x,y,theta,v,w)
limit : An array of two vectors. limit[0] = minimum values for the state,
limit[1] = maximum value for the state
dim_z : dimension of observation,
fx : x_tp1 = fx(x_t, dt), state dynamic function
W : state noise matrix
obs_noise_func : observation noise matrix function of z
collision_func : collision checking function
n : the number of sigma points
"""
self.dim = dim
self.limit = limit
self.W = W if W is not None else np.zeros((self.dim, self.dim))
self.obs_noise_func = obs_noise_func
self.collision_func = collision_func
def hx(y, agent_state, measure_func=util.relative_measure):
r_pred, alpha_pred, _ = measure_func(y, agent_state)
return np.array([r_pred, alpha_pred])
def x_mean_fn_(sigmas, Wm):
if dim == 3:
x = np.zeros(dim)
sum_sin, sum_cos = 0., 0.
for i in range(len(sigmas)):
s = sigmas[i]
x[0] += s[0] * Wm[i]
x[1] += s[1] * Wm[i]
sum_sin += np.sin(s[2])*Wm[i]
sum_cos += np.cos(s[2])*Wm[i]
x[2] = np.arctan2(sum_sin, sum_cos)
return x
elif dim == 5:
x = np.zeros(dim)
sum_sin, sum_cos = 0., 0.
for i in range(len(sigmas)):
s = sigmas[i]
x[0] += s[0] * Wm[i]
x[1] += s[1] * Wm[i]
x[3] += s[3] * Wm[i]
x[4] += s[4] * Wm[i]
sum_sin += np.sin(s[2])*Wm[i]
sum_cos += np.cos(s[2])*Wm[i]
x[2] = np.arctan2(sum_sin, sum_cos)
return x
else:
return None
def z_mean_fn_(sigmas, Wm):
x = np.zeros(dim_z)
sum_sin, sum_cos = 0., 0.
for i in range(len(sigmas)):
s = sigmas[i]
x[0] += s[0] * Wm[i]
sum_sin += np.sin(s[1])*Wm[i]
sum_cos += np.cos(s[1])*Wm[i]
x[1] = np.arctan2(sum_sin, sum_cos)
return x
def residual_x_(x, xp):
"""
x : state, [x, y, theta]
xp : predicted state
"""
if dim == 3 or dim == 5:
r_x = x - xp
r_x[2] = util.wrap_around(r_x[2])
return r_x
else:
return None
def residual_z_(z, zp):
"""
z : observation, [r, alpha]
zp : predicted observation
"""
r_z = z - zp
r_z[1] = util.wrap_around(r_z[1])
return r_z
sigmas = JulierSigmaPoints(n=dim, kappa=kappa)
self.ukf = UnscentedKalmanFilter(dim, dim_z, sampling_period, fx=fx, hx=hx,
points=sigmas, x_mean_fn=x_mean_fn_, z_mean_fn=z_mean_fn_,
residual_x=residual_x_, residual_z=residual_z_)
def reset(self, init_state, init_cov):
self.state = init_state
self.cov = init_cov*np.eye(self.dim)
self.ukf.x = self.state
self.ukf.P = self.cov
self.ukf.Q = self.W # process noise matrix
def update(self, observed, z_t, x_t, u_t=None):
# Kalman Filter Update
self.ukf.predict(u=u_t)
if observed:
r_pred, alpha_pred, _ = util.relative_measure(self.ukf.x, x_t)
self.ukf.update(z_t, R=self.obs_noise_func((r_pred, alpha_pred)), agent_state=x_t)
cov_new = self.ukf.P
state_new = self.ukf.x
if LA.det(cov_new) < 1e6:
self.cov = cov_new
if not(self.collision_func(state_new[:2])):
self.state = np.clip(state_new, self.limit[0], self.limit[1])
| en | 0.467715 | Belief Trackers KFbelief : Belief Update using Kalman Filter UKFbelief : Belief Update using Unscented Kalman Filter using filterpy library Kalman Filter for the target tracking problem. state : target state x : agent state z : observation (r, alpha) dim : dimension of state limit : An array of two vectors. limit[0] = minimum values for the state, limit[1] = maximum value for the state dim_z : dimension of observation, A : state transition matrix W : state noise matrix obs_noise_func : observation noise matrix function of z collision_func : collision checking function # Kalman Filter Prediction and Update # Prediction # Update Unscented Kalman Filter from filterpy dim : dimension of state ***Assuming dim==3: (x,y,theta), dim==4: (x,y,xdot,ydot), dim==5: (x,y,theta,v,w) limit : An array of two vectors. limit[0] = minimum values for the state, limit[1] = maximum value for the state dim_z : dimension of observation, fx : x_tp1 = fx(x_t, dt), state dynamic function W : state noise matrix obs_noise_func : observation noise matrix function of z collision_func : collision checking function n : the number of sigma points x : state, [x, y, theta] xp : predicted state z : observation, [r, alpha] zp : predicted observation # process noise matrix # Kalman Filter Update | 2.862833 | 3 |
services/traction/acapy_wrapper/models/v20_cred_ex_record_detail.py | Open-Earth-Foundation/traction | 12 | 6621282 | <gh_stars>10-100
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
from acapy_wrapper.models.v20_cred_ex_record import V20CredExRecord
from acapy_wrapper.models.v20_cred_ex_record_indy import V20CredExRecordIndy
from acapy_wrapper.models.v20_cred_ex_record_ld_proof import V20CredExRecordLDProof
class V20CredExRecordDetail(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
V20CredExRecordDetail - a model defined in OpenAPI
cred_ex_record: The cred_ex_record of this V20CredExRecordDetail [Optional].
indy: The indy of this V20CredExRecordDetail [Optional].
ld_proof: The ld_proof of this V20CredExRecordDetail [Optional].
"""
cred_ex_record: Optional[V20CredExRecord] = None
indy: Optional[V20CredExRecordIndy] = None
ld_proof: Optional[V20CredExRecordLDProof] = None
V20CredExRecordDetail.update_forward_refs()
| # coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
from acapy_wrapper.models.v20_cred_ex_record import V20CredExRecord
from acapy_wrapper.models.v20_cred_ex_record_indy import V20CredExRecordIndy
from acapy_wrapper.models.v20_cred_ex_record_ld_proof import V20CredExRecordLDProof
class V20CredExRecordDetail(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
V20CredExRecordDetail - a model defined in OpenAPI
cred_ex_record: The cred_ex_record of this V20CredExRecordDetail [Optional].
indy: The indy of this V20CredExRecordDetail [Optional].
ld_proof: The ld_proof of this V20CredExRecordDetail [Optional].
"""
cred_ex_record: Optional[V20CredExRecord] = None
indy: Optional[V20CredExRecordIndy] = None
ld_proof: Optional[V20CredExRecordLDProof] = None
V20CredExRecordDetail.update_forward_refs() | en | 0.650944 | # coding: utf-8 # noqa: F401 # noqa: F401 # noqa: F401 # noqa: F401 NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually. V20CredExRecordDetail - a model defined in OpenAPI cred_ex_record: The cred_ex_record of this V20CredExRecordDetail [Optional]. indy: The indy of this V20CredExRecordDetail [Optional]. ld_proof: The ld_proof of this V20CredExRecordDetail [Optional]. | 1.963244 | 2 |
source/ch08/magic-numbers.py | AngelLiang/programming-in-python3-2nd-edition | 0 | 6621283 | <reponame>AngelLiang/programming-in-python3-2nd-edition<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import os
import sys
if sys.platform.startswith("win"):
import glob
USE_SIMPLE_GET_FUNCTION = True
def main():
modules = load_modules()
get_file_type_functions = []
for module in modules:
get_file_type = get_function(module, "get_file_type")
if get_file_type is not None:
get_file_type_functions.append(get_file_type)
for file in get_files(sys.argv[1:]):
fh = None
try:
fh = open(file, "rb")
magic = fh.read(1000)
for get_file_type in get_file_type_functions:
filetype = get_file_type(magic,
os.path.splitext(file)[1])
if filetype is not None:
print("{0:.<20}{1}".format(filetype, file))
break
else:
print("{0:.<20}{1}".format("Unknown", file))
except EnvironmentError as err:
print(err)
finally:
if fh is not None:
fh.close()
if sys.platform.startswith("win"):
def get_files(names):
for name in names:
if os.path.isfile(name):
yield name
else:
for file in glob.iglob(name):
if not os.path.isfile(file):
continue
yield file
else:
def get_files(names):
return (file for file in names if os.path.isfile(file))
if USE_SIMPLE_GET_FUNCTION:
def get_function(module, function_name):
function = get_function.cache.get((module, function_name), None)
if function is None:
try:
function = getattr(module, function_name)
if not hasattr(function, "__call__"):
raise AttributeError()
get_function.cache[module, function_name] = function
except AttributeError:
function = None
return function
get_function.cache = {}
else:
def get_function(module, function_name):
function = get_function.cache.get((module, function_name), None)
if (function is None and
(module, function_name) not in get_function.bad_cache):
try:
function = getattr(module, function_name)
if not hasattr(function, "__call__"):
raise AttributeError()
get_function.cache[module, function_name] = function
except AttributeError:
function = None
get_function.bad_cache.add((module, function_name))
return function
get_function.cache = {}
get_function.bad_cache = set()
if len(sys.argv) == 1 or sys.argv[1] in {"-h", "--help"}:
print("usage: {0} [-1|-2] file1 [file2 [... fileN]]".format(
os.path.basename(sys.argv[0])))
sys.exit(2)
if sys.argv[1] == "-1":
del sys.argv[1]
# Version 1
def load_modules():
modules = []
for name in os.listdir(os.path.dirname(__file__) or "."):
if name.endswith(".py") and "magic" in name.lower():
name = os.path.splitext(name)[0]
if name.isidentifier() and name not in sys.modules:
try:
exec("import " + name)
modules.append(sys.modules[name])
except SyntaxError as err:
print(err)
return modules
elif sys.argv[1] == "-2":
del sys.argv[1]
# Version 2
def load_modules():
modules = []
for name in os.listdir(os.path.dirname(__file__) or "."):
if name.endswith(".py") and "magic" in name.lower():
filename = name
name = os.path.splitext(name)[0]
if name.isidentifier() and name not in sys.modules:
fh = None
try:
fh = open(filename, "r", encoding="utf8")
code = fh.read()
module = type(sys)(name)
sys.modules[name] = module
exec(code, module.__dict__)
modules.append(module)
except (EnvironmentError, SyntaxError) as err:
sys.modules.pop(name, None)
print(err)
finally:
if fh is not None:
fh.close()
return modules
else:
# Version 3
def load_modules():
modules = []
for name in os.listdir(os.path.dirname(__file__) or "."):
if name.endswith(".py") and "magic" in name.lower():
name = os.path.splitext(name)[0]
if name.isidentifier() and name not in sys.modules:
try:
module = __import__(name)
modules.append(module)
except (ImportError, SyntaxError) as err:
print(err)
return modules
main()
| #!/usr/bin/env python3
# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import os
import sys
if sys.platform.startswith("win"):
import glob
USE_SIMPLE_GET_FUNCTION = True
def main():
modules = load_modules()
get_file_type_functions = []
for module in modules:
get_file_type = get_function(module, "get_file_type")
if get_file_type is not None:
get_file_type_functions.append(get_file_type)
for file in get_files(sys.argv[1:]):
fh = None
try:
fh = open(file, "rb")
magic = fh.read(1000)
for get_file_type in get_file_type_functions:
filetype = get_file_type(magic,
os.path.splitext(file)[1])
if filetype is not None:
print("{0:.<20}{1}".format(filetype, file))
break
else:
print("{0:.<20}{1}".format("Unknown", file))
except EnvironmentError as err:
print(err)
finally:
if fh is not None:
fh.close()
if sys.platform.startswith("win"):
def get_files(names):
for name in names:
if os.path.isfile(name):
yield name
else:
for file in glob.iglob(name):
if not os.path.isfile(file):
continue
yield file
else:
def get_files(names):
return (file for file in names if os.path.isfile(file))
if USE_SIMPLE_GET_FUNCTION:
def get_function(module, function_name):
function = get_function.cache.get((module, function_name), None)
if function is None:
try:
function = getattr(module, function_name)
if not hasattr(function, "__call__"):
raise AttributeError()
get_function.cache[module, function_name] = function
except AttributeError:
function = None
return function
get_function.cache = {}
else:
def get_function(module, function_name):
function = get_function.cache.get((module, function_name), None)
if (function is None and
(module, function_name) not in get_function.bad_cache):
try:
function = getattr(module, function_name)
if not hasattr(function, "__call__"):
raise AttributeError()
get_function.cache[module, function_name] = function
except AttributeError:
function = None
get_function.bad_cache.add((module, function_name))
return function
get_function.cache = {}
get_function.bad_cache = set()
if len(sys.argv) == 1 or sys.argv[1] in {"-h", "--help"}:
print("usage: {0} [-1|-2] file1 [file2 [... fileN]]".format(
os.path.basename(sys.argv[0])))
sys.exit(2)
if sys.argv[1] == "-1":
del sys.argv[1]
# Version 1
def load_modules():
modules = []
for name in os.listdir(os.path.dirname(__file__) or "."):
if name.endswith(".py") and "magic" in name.lower():
name = os.path.splitext(name)[0]
if name.isidentifier() and name not in sys.modules:
try:
exec("import " + name)
modules.append(sys.modules[name])
except SyntaxError as err:
print(err)
return modules
elif sys.argv[1] == "-2":
del sys.argv[1]
# Version 2
def load_modules():
modules = []
for name in os.listdir(os.path.dirname(__file__) or "."):
if name.endswith(".py") and "magic" in name.lower():
filename = name
name = os.path.splitext(name)[0]
if name.isidentifier() and name not in sys.modules:
fh = None
try:
fh = open(filename, "r", encoding="utf8")
code = fh.read()
module = type(sys)(name)
sys.modules[name] = module
exec(code, module.__dict__)
modules.append(module)
except (EnvironmentError, SyntaxError) as err:
sys.modules.pop(name, None)
print(err)
finally:
if fh is not None:
fh.close()
return modules
else:
# Version 3
def load_modules():
modules = []
for name in os.listdir(os.path.dirname(__file__) or "."):
if name.endswith(".py") and "magic" in name.lower():
name = os.path.splitext(name)[0]
if name.isidentifier() and name not in sys.modules:
try:
module = __import__(name)
modules.append(module)
except (ImportError, SyntaxError) as err:
print(err)
return modules
main() | en | 0.835666 | #!/usr/bin/env python3 # Copyright (c) 2008-11 Qtrac Ltd. All rights reserved. # This program or module is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. It is provided for educational # purposes and is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # Version 1 # Version 2 # Version 3 | 2.63039 | 3 |
src/accounts/serializers.py | Bounty1993/rest-crm | 0 | 6621284 | <filename>src/accounts/serializers.py
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from rest_framework import serializers
from rest_framework.serializers import (
ModelSerializer, Serializer,
ValidationError, HyperlinkedModelSerializer,
)
from rest_framework.validators import UniqueValidator
from .models import Departament
User = get_user_model()
class UserRegistrationSerializer(ModelSerializer):
password2 = serializers.CharField(label='<PASSWORD>', max_length=50)
class Meta:
model = User
fields = [
'username',
'email',
'password',
'password2',
]
extra_kwargs = {
'password': {'write_only': True},
'password2': {'write_only': True},
}
def validate_password2(self, value):
data = self.initial_data
password = data.get('password')
password2 = value
if password != password:
msg = 'Hasła nie różnią!'
raise ValidationError(msg)
return value
def create(self, validated_data):
username = validated_data['username']
email = validated_data['email']
password = validated_data['password']
user = User(
username=username,
email=email,
)
user.set_password(password)
user.save()
return validated_data
class UserLoginSerializer(serializers.ModelSerializer):
token = serializers.CharField(max_length=50, read_only=True)
username = serializers.CharField()
class Meta:
model = User
fields = [
'username',
'password',
'token',
]
def validate(self, data):
username = data['username']
password = data['password']
users = User.objects.filter(username=username)
msg = 'Użytkownik lub hasło są nieprawidłowe'
if users.count() == 1:
user_obj = users.first()
else:
raise serializers.ValidationError(msg)
if not user_obj.check_password(password):
raise ValidationError(msg)
data['token'] = 'SOME_TOKEN'
return data
class UserListSerializer(ModelSerializer):
class Meta:
model = User
fields = [
'id', 'first_name',
'last_name', 'username',
'email',
]
extra_kwargs = {
'username': {'read_only': True}
}
class ChangePasswordSerializer(Serializer):
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
def validate_new_password(self, value):
validate_password(value)
return value
class DepartamentSerializer(ModelSerializer):
workers = serializers.PrimaryKeyRelatedField(
many=True, queryset=User.objects.all(), required=False
)
class Meta:
model = Departament
fields = [
'name', 'workers'
]
extra_kwargs = {
'name': {
'validators': [
UniqueValidator(queryset=Departament.objects.all())
]
}
}
| <filename>src/accounts/serializers.py
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from rest_framework import serializers
from rest_framework.serializers import (
ModelSerializer, Serializer,
ValidationError, HyperlinkedModelSerializer,
)
from rest_framework.validators import UniqueValidator
from .models import Departament
User = get_user_model()
class UserRegistrationSerializer(ModelSerializer):
password2 = serializers.CharField(label='<PASSWORD>', max_length=50)
class Meta:
model = User
fields = [
'username',
'email',
'password',
'password2',
]
extra_kwargs = {
'password': {'write_only': True},
'password2': {'write_only': True},
}
def validate_password2(self, value):
data = self.initial_data
password = data.get('password')
password2 = value
if password != password:
msg = 'Hasła nie różnią!'
raise ValidationError(msg)
return value
def create(self, validated_data):
username = validated_data['username']
email = validated_data['email']
password = validated_data['password']
user = User(
username=username,
email=email,
)
user.set_password(password)
user.save()
return validated_data
class UserLoginSerializer(serializers.ModelSerializer):
token = serializers.CharField(max_length=50, read_only=True)
username = serializers.CharField()
class Meta:
model = User
fields = [
'username',
'password',
'token',
]
def validate(self, data):
username = data['username']
password = data['password']
users = User.objects.filter(username=username)
msg = 'Użytkownik lub hasło są nieprawidłowe'
if users.count() == 1:
user_obj = users.first()
else:
raise serializers.ValidationError(msg)
if not user_obj.check_password(password):
raise ValidationError(msg)
data['token'] = 'SOME_TOKEN'
return data
class UserListSerializer(ModelSerializer):
class Meta:
model = User
fields = [
'id', 'first_name',
'last_name', 'username',
'email',
]
extra_kwargs = {
'username': {'read_only': True}
}
class ChangePasswordSerializer(Serializer):
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
def validate_new_password(self, value):
validate_password(value)
return value
class DepartamentSerializer(ModelSerializer):
workers = serializers.PrimaryKeyRelatedField(
many=True, queryset=User.objects.all(), required=False
)
class Meta:
model = Departament
fields = [
'name', 'workers'
]
extra_kwargs = {
'name': {
'validators': [
UniqueValidator(queryset=Departament.objects.all())
]
}
}
| none | 1 | 2.358344 | 2 | |
processing/preprocessing.py | LiamWahahaha/how-creative-you-are | 0 | 6621285 | <reponame>LiamWahahaha/how-creative-you-are
import time
from modules.spark_processor import SparkProcessor
from modules.utils import Print
def main():
tic = time.perf_counter()
Print.info('Start preprocessing')
parallel_processor = SparkProcessor()
spark = parallel_processor.spark
Print.info('Load metadata file from s3')
metadata_s3_path = 's3a://code-database-s3/real-challenges-meta'
metadata_df = spark.read.option('header', 'true').csv(metadata_s3_path)
metadata_df.show()
packages_info_df = parallel_processor.extract_imported_packages_to_df(metadata_df)
packages_info_df.show()
packages_hash_df = parallel_processor.add_package_hash_to_df(packages_info_df)
packages_hash_df.show()
Print.info('Upload metadata file to s3 in parquet format')
packages_hash_df.write.parquet(
's3a://code-database-s3/real-challenge-final-dataset/final.parquet',
mode='overwrite'
)
toc = time.perf_counter()
Print.info('===============================================')
Print.info('Processed {packages_hash_df.count()} records')
Print.info(f'Total processing time: {toc - tic:0.4f} seconds')
Print.info('===============================================')
if __name__ == '__main__':
main()
| import time
from modules.spark_processor import SparkProcessor
from modules.utils import Print
def main():
tic = time.perf_counter()
Print.info('Start preprocessing')
parallel_processor = SparkProcessor()
spark = parallel_processor.spark
Print.info('Load metadata file from s3')
metadata_s3_path = 's3a://code-database-s3/real-challenges-meta'
metadata_df = spark.read.option('header', 'true').csv(metadata_s3_path)
metadata_df.show()
packages_info_df = parallel_processor.extract_imported_packages_to_df(metadata_df)
packages_info_df.show()
packages_hash_df = parallel_processor.add_package_hash_to_df(packages_info_df)
packages_hash_df.show()
Print.info('Upload metadata file to s3 in parquet format')
packages_hash_df.write.parquet(
's3a://code-database-s3/real-challenge-final-dataset/final.parquet',
mode='overwrite'
)
toc = time.perf_counter()
Print.info('===============================================')
Print.info('Processed {packages_hash_df.count()} records')
Print.info(f'Total processing time: {toc - tic:0.4f} seconds')
Print.info('===============================================')
if __name__ == '__main__':
main() | none | 1 | 2.634349 | 3 | |
ttastromech/ttastromech_pyaudio.py | MomsFriendlyRobotCompany/ttr2d2 | 5 | 6621286 | from ttastromech import TTAstromech
try:
import pyaudio
class TTAstromechPyAudio(TTAstromech):
def __init__(self, path="/sounds"):
TTAstromech.__init__(self, path)
def _play(self, data):
p = pyaudio.PyAudio()
stream = p.open(
format=p.get_format_from_width(2),
channels=1,
rate=22050,
output=True
)
stream.write(data)
p.terminate()
except ImportError:
class TTAstromechPyAudio(TTAstromech):
def __init__(self, path="/sounds"):
TTAstromech.__init__(self, path)
print('<<< Need to install pyaudio >>>')
def _play(self, data):
print('Error: no pyaudio installed')
| from ttastromech import TTAstromech
try:
import pyaudio
class TTAstromechPyAudio(TTAstromech):
def __init__(self, path="/sounds"):
TTAstromech.__init__(self, path)
def _play(self, data):
p = pyaudio.PyAudio()
stream = p.open(
format=p.get_format_from_width(2),
channels=1,
rate=22050,
output=True
)
stream.write(data)
p.terminate()
except ImportError:
class TTAstromechPyAudio(TTAstromech):
def __init__(self, path="/sounds"):
TTAstromech.__init__(self, path)
print('<<< Need to install pyaudio >>>')
def _play(self, data):
print('Error: no pyaudio installed')
| none | 1 | 2.793695 | 3 | |
DropMenu.py | StormInside/DropDownMenu | 1 | 6621287 | from PyQt5.QtCore import Qt, QSettings
from PyQt5.QtWidgets import QApplication, \
QMainWindow, \
QVBoxLayout, \
QSizeGrip
from auto_generated_UI import UI_main
class DropMenu(QMainWindow, UI_main.Ui_MainWindow):
def __init__(self, app: QApplication):
super().__init__()
self.app = app
self.setupUi(self)
settings = QSettings()
settings.beginGroup("Screen")
size = settings.value("main_frame_geometry")
pos = settings.value("main_pos")
self.resize(size)
self.move(pos)
settings.endGroup()
self.settings = settings
self.setStyleSheet("QMainWindow{background-color: darkgray;border: 1px solid black}")
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Tool | Qt.WindowStaysOnTopHint)
self.setFocusPolicy(Qt.NoFocus)
self.app.focusChanged.connect(self.on_focus_change)
layout = QVBoxLayout()
sizegrip = QSizeGrip(self)
layout.addWidget(sizegrip, 0, Qt.AlignBottom | Qt.AlignRight)
self.setLayout(layout)
def on_focus_change(self):
# print(self.hasFocus())
if not self.isActiveWindow():
self.hide()
def show_hide(self):
if self.isVisible():
self.hide()
else:
self.show()
self.setFocus()
self.activateWindow()
| from PyQt5.QtCore import Qt, QSettings
from PyQt5.QtWidgets import QApplication, \
QMainWindow, \
QVBoxLayout, \
QSizeGrip
from auto_generated_UI import UI_main
class DropMenu(QMainWindow, UI_main.Ui_MainWindow):
def __init__(self, app: QApplication):
super().__init__()
self.app = app
self.setupUi(self)
settings = QSettings()
settings.beginGroup("Screen")
size = settings.value("main_frame_geometry")
pos = settings.value("main_pos")
self.resize(size)
self.move(pos)
settings.endGroup()
self.settings = settings
self.setStyleSheet("QMainWindow{background-color: darkgray;border: 1px solid black}")
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Tool | Qt.WindowStaysOnTopHint)
self.setFocusPolicy(Qt.NoFocus)
self.app.focusChanged.connect(self.on_focus_change)
layout = QVBoxLayout()
sizegrip = QSizeGrip(self)
layout.addWidget(sizegrip, 0, Qt.AlignBottom | Qt.AlignRight)
self.setLayout(layout)
def on_focus_change(self):
# print(self.hasFocus())
if not self.isActiveWindow():
self.hide()
def show_hide(self):
if self.isVisible():
self.hide()
else:
self.show()
self.setFocus()
self.activateWindow()
| en | 0.139706 | # print(self.hasFocus()) | 2.354929 | 2 |
abides-gym/abides_gym/envs/markets_execution_environment_v0.py | jpmorganchase/ABIDES-jpmc-gym | 1 | 6621288 | <reponame>jpmorganchase/ABIDES-jpmc-gym<filename>abides-gym/abides_gym/envs/markets_execution_environment_v0.py
import importlib
from dataclasses import asdict, dataclass, field
from typing import Any, Dict, List
from abc import ABC
import gym
import numpy as np
import abides_markets.agents.utils as markets_agent_utils
from abides_core import NanosecondTime
from abides_core.utils import str_to_ns
from abides_core.generators import ConstantTimeGenerator
from .markets_environment import AbidesGymMarketsEnv
class SubGymMarketsExecutionEnv_v0(AbidesGymMarketsEnv):
"""
Execution V0 environnement. It defines one of the ABIDES-Gym-markets environnement.
This environment presents an example of the algorithmic orderexecution problem.
The agent has either an initial inventory of the stocks it tries to trade out of or no initial inventory and
tries to acquire a target number of shares. The goal is to realize thistask while minimizing transaction cost from spreads
and marketimpact. It does so by splitting the parent order into several smallerchild orders.
Arguments:
- background_config: the handcrafted agents configuration used for the environnement
- mkt_close: time the market day ends
- timestep_duration: how long between 2 wakes up of the gym experimental agent
- starting_cash: cash of the agents at the beginning of the simulation
- order_fixed_size: size of the order placed by the experimental gym agent
- state_history_length: length of the raw state buffer
- market_data_buffer_length: length of the market data buffer
- first_interval: how long the simulation is run before the first wake up of the gym experimental agent
- parent_order_size: Total size the agent has to execute (eitherbuy or sell).
- execution_window: Time length the agent is given to proceed with 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟𝑆𝑖𝑧𝑒execution.
- direction: direction of the 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟 (buy or sell)
- not_enough_reward_update: it is a constant penalty per non-executed share atthe end of the𝑡𝑖𝑚𝑒𝑊𝑖𝑛𝑑𝑜𝑤
- just_quantity_reward_update: update reward if all order is completed
- reward_mode: can use a dense of sparse reward formulation
- done_ratio: ratio (mark2market_t/starting_cash) that defines when an episode is done (if agent has lost too much mark to market value)
- debug_mode: arguments to change the info dictionnary (lighter version if performance is an issue)
- background_config_extra_kvargs: dictionary of extra key value arguments passed to the background config builder function
Daily Investor V0:
- Action Space:
- MKT order_fixed_size
- LMT order_fixed_size
- Hold
- State Space:
- holdings_pct
- time_pct
- diff_pct
- imbalance_all
- imbalance_5
- price_impact
- spread
- direction
- returns
"""
raw_state_pre_process = markets_agent_utils.ignore_buffers_decorator
raw_state_to_state_pre_process = (
markets_agent_utils.ignore_mkt_data_buffer_decorator
)
@dataclass
class CustomMetricsTracker(ABC):
"""
Data Class used to track custom metrics that are output to rllib
"""
slippage_reward: float = 0
late_penalty_reward: float = 0 # at the end of the episode
executed_quantity: int = 0 # at the end of the episode
remaining_quantity: int = 0 # at the end of the episode
action_counter: Dict[str, int] = field(default_factory=dict)
holdings_pct: float = 0
time_pct: float = 0
diff_pct: float = 0
imbalance_all: float = 0
imbalance_5: float = 0
price_impact: int = 0
spread: int = 0
direction_feature: float = 0
num_max_steps_per_episode: float = 0
def __init__(
self,
background_config: Any = "rmsc04",
mkt_close: str = "16:00:00",
timestep_duration: str = "60s",
starting_cash: int = 1_000_000,
order_fixed_size: int = 10,
state_history_length: int = 4,
market_data_buffer_length: int = 5,
first_interval: str = "00:00:30",
parent_order_size: int = 1000,
execution_window: str = "00:10:00",
direction: str = "BUY",
not_enough_reward_update: int = -1000,
too_much_reward_update: int = -100,
just_quantity_reward_update: int = 0,
debug_mode: bool = False,
background_config_extra_kvargs: Dict[str, Any] = {},
) -> None:
self.background_config: Any = importlib.import_module(
"abides_markets.configs.{}".format(background_config), package=None
)
self.mkt_close: NanosecondTime = str_to_ns(mkt_close)
self.timestep_duration: NanosecondTime = str_to_ns(timestep_duration)
self.starting_cash: int = starting_cash
self.order_fixed_size: int = order_fixed_size
self.state_history_length: int = state_history_length
self.market_data_buffer_length: int = market_data_buffer_length
self.first_interval: NanosecondTime = str_to_ns(first_interval)
self.parent_order_size: int = parent_order_size
self.execution_window: str = str_to_ns(execution_window)
self.direction: str = direction
self.debug_mode: bool = debug_mode
self.too_much_reward_update: int = too_much_reward_update
self.not_enough_reward_update: int = not_enough_reward_update
self.just_quantity_reward_update: int = just_quantity_reward_update
self.entry_price: int = 1
self.far_touch: int = 1
self.near_touch: int = 1
self.step_index: int = 0
self.custom_metrics_tracker = (
self.CustomMetricsTracker()
) # init the custom metric tracker
##################
# CHECK PROPERTIES
assert background_config in [
"rmsc03",
"rmsc04",
"smc_01",
], "Select rmsc03 or rmsc04 as config"
assert (self.first_interval <= str_to_ns("16:00:00")) & (
self.first_interval >= str_to_ns("00:00:00")
), "Select authorized FIRST_INTERVAL delay"
assert (self.mkt_close <= str_to_ns("16:00:00")) & (
self.mkt_close >= str_to_ns("09:30:00")
), "Select authorized market hours"
assert (self.timestep_duration <= str_to_ns("06:30:00")) & (
self.timestep_duration >= str_to_ns("00:00:00")
), "Select authorized timestep_duration"
assert (type(self.starting_cash) == int) & (
self.starting_cash >= 0
), "Select positive integer value for starting_cash"
assert (type(self.order_fixed_size) == int) & (
self.order_fixed_size >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.state_history_length) == int) & (
self.state_history_length >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.market_data_buffer_length) == int) & (
self.market_data_buffer_length >= 0
), "Select positive integer value for order_fixed_size"
assert self.debug_mode in [
True,
False,
], "debug_mode needs to be True or False"
assert self.direction in [
"BUY",
"SELL",
], "direction needs to be BUY or SELL"
assert (type(self.parent_order_size) == int) & (
self.order_fixed_size >= 0
), "Select positive integer value for parent_order_size"
assert (self.execution_window <= str_to_ns("06:30:00")) & (
self.execution_window >= str_to_ns("00:00:00")
), "Select authorized execution_window"
assert (
type(self.too_much_reward_update) == int
), "Select integer value for too_much_reward_update"
assert (
type(self.not_enough_reward_update) == int
), "Select integer value for not_enough_reward_update"
assert (
type(self.just_quantity_reward_update) == int
), "Select integer value for just_quantity_reward_update"
background_config_args = {"end_time": self.mkt_close}
background_config_args.update(background_config_extra_kvargs)
super().__init__(
background_config_pair=(
self.background_config.build_config,
background_config_args,
),
wakeup_interval_generator=ConstantTimeGenerator(
step_duration=self.timestep_duration
),
starting_cash=self.starting_cash,
state_buffer_length=self.state_history_length,
market_data_buffer_length=self.market_data_buffer_length,
first_interval=self.first_interval,
)
# Action Space
# MKT order_fixed_size | LMT order_fixed_size | Hold
self.num_actions: int = 3
self.action_space: gym.Space = gym.spaces.Discrete(self.num_actions)
# instantiate the action counter
for i in range(self.num_actions):
self.custom_metrics_tracker.action_counter[f"action_{i}"] = 0
num_ns_episode = self.first_interval + self.execution_window
step_length = self.timestep_duration
num_max_steps_per_episode = num_ns_episode / step_length
self.custom_metrics_tracker.num_max_steps_per_episode = (
num_max_steps_per_episode
)
# State Space
# [holdings, imbalance,spread, direction_feature] + padded_returns
self.num_state_features: int = 8 + self.state_history_length - 1
# construct state space "box"
# holdings_pct, time_pct, diff_pct, imbalance_all, imbalance_5, price_impact, spread, direction, returns
self.state_highs: np.ndarray = np.array(
[
2, # holdings_pct
2, # time_pct
4, # diff_pct
1, # imbalance_all
1, # imbalance_5
np.finfo(np.float32).max, # price_impact
np.finfo(np.float32).max, # spread
np.finfo(np.float32).max,
]
+ (self.state_history_length - 1) # directiom
* [np.finfo(np.float32).max], # returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.state_lows: np.ndarray = np.array(
[
-2, # holdings_pct
-2, # time_pct
-4, # diff_pct
0, # imbalance_all
0, # imbalance_5
np.finfo(np.float32).min, # price_impact
np.finfo(np.float32).min, # spread
np.finfo(np.float32).min,
]
+ (self.state_history_length - 1) # direction
* [np.finfo(np.float32).min], # returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.observation_space: gym.Space = gym.spaces.Box(
self.state_lows,
self.state_highs,
shape=(self.num_state_features, 1),
dtype=np.float32,
)
# initialize previous_marked_to_market to starting_cash (No holding at the beginning of the episode)
self.previous_marked_to_market: int = self.starting_cash
def _map_action_space_to_ABIDES_SIMULATOR_SPACE(
self, action: int
) -> List[Dict[str, Any]]:
"""
utility function that maps open ai action definition (integers) to environnement API action definition (list of dictionaries)
The action space ranges [0, 1, 2] where:
- `0` MKT direction order_fixed_size
- '1' LMT direction order_fixed_size
- '2' DO NOTHING
Arguments:
- action: integer representation of the different actions
Returns:
- action_list: list of the corresponding series of action mapped into abides env apis
"""
self.custom_metrics_tracker.action_counter[
f"action_{action}"
] += 1 # increase counter
if action == 0:
return [
{"type": "CCL_ALL"},
{
"type": "MKT",
"direction": self.direction,
"size": self.order_fixed_size,
},
]
elif action == 1:
return [
{"type": "CCL_ALL"},
{
"type": "LMT",
"direction": self.direction,
"size": self.order_fixed_size,
"limit_price": self.near_touch,
},
]
elif action == 2:
return []
else:
raise ValueError(
f"Action {action} is not part of the actions supported by the function."
)
@raw_state_to_state_pre_process
def raw_state_to_state(self, raw_state: Dict[str, Any]) -> np.ndarray:
"""
method that transforms a raw state into a state representation
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- state: state representation defining the MDP for the execution v0 environnement
"""
# 0) Preliminary
bids = raw_state["parsed_mkt_data"]["bids"]
asks = raw_state["parsed_mkt_data"]["asks"]
last_transactions = raw_state["parsed_mkt_data"]["last_transaction"]
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
holdings_pct = holdings[-1] / self.parent_order_size
# 2) Timing
# 2)a) mkt_open
mkt_open = raw_state["internal_data"]["mkt_open"][-1]
# 2)b) time from beginning of execution (parent arrival)
current_time = raw_state["internal_data"]["current_time"][-1]
time_from_parent_arrival = current_time - mkt_open - self.first_interval
assert (
current_time >= mkt_open + self.first_interval
), "Agent has woken up earlier than its first interval"
# 2)c) time limit
time_limit = self.execution_window
# 2)d) compute percentage time advancement
time_pct = time_from_parent_arrival / time_limit
# 3) Advancement Comparison
diff_pct = holdings_pct - time_pct
# 3) Imbalance
imbalances_all = [
markets_agent_utils.get_imbalance(b, a, depth=None)
for (b, a) in zip(bids, asks)
]
imbalance_all = imbalances_all[-1]
imbalances_5 = [
markets_agent_utils.get_imbalance(b, a, depth=5)
for (b, a) in zip(bids, asks)
]
imbalance_5 = imbalances_5[-1]
# 4) price_impact
mid_prices = [
markets_agent_utils.get_mid_price(b, a, lt)
for (b, a, lt) in zip(bids, asks, last_transactions)
]
mid_price = mid_prices[-1]
if self.step_index == 0: # 0 order has been executed yet
self.entry_price = mid_price
entry_price = self.entry_price
book = (
raw_state["parsed_mkt_data"]["bids"][-1]
if self.direction == "BUY"
else raw_state["parsed_mkt_data"]["asks"][-1]
)
self.near_touch = book[0][0] if len(book) > 0 else last_transactions[-1]
# Compute the price impact
price_impact = (
np.log(mid_price / entry_price)
if self.direction == "BUY"
else np.log(entry_price / mid_price)
)
# 5) Spread
best_bids = [
bids[0][0] if len(bids) > 0 else mid
for (bids, mid) in zip(bids, mid_prices)
]
best_asks = [
asks[0][0] if len(asks) > 0 else mid
for (asks, mid) in zip(asks, mid_prices)
]
spreads = np.array(best_asks) - np.array(best_bids)
spread = spreads[-1]
# 6) direction feature
direction_features = np.array(mid_prices) - np.array(last_transactions)
direction_feature = direction_features[-1]
# 7) mid_price
mid_prices = [
markets_agent_utils.get_mid_price(b, a, lt)
for (b, a, lt) in zip(bids, asks, last_transactions)
]
returns = np.diff(mid_prices)
padded_returns = np.zeros(self.state_history_length - 1)
padded_returns[-len(returns) :] = (
returns if len(returns) > 0 else padded_returns
)
# log custom metrics to tracker
self.custom_metrics_tracker.holdings_pct = holdings_pct
self.custom_metrics_tracker.time_pct = time_pct
self.custom_metrics_tracker.diff_pct = diff_pct
self.custom_metrics_tracker.imbalance_all = imbalance_all
self.custom_metrics_tracker.imbalance_5 = imbalance_5
self.custom_metrics_tracker.price_impact = price_impact
self.custom_metrics_tracker.spread = spread
self.custom_metrics_tracker.direction_feature = direction_feature
# 8) Computed State
computed_state = np.array(
[
holdings_pct,
time_pct,
diff_pct,
imbalance_all,
imbalance_5,
price_impact,
spread,
direction_feature,
]
+ padded_returns.tolist(),
dtype=np.float32,
)
#
self.step_index += 1
return computed_state.reshape(self.num_state_features, 1)
@raw_state_pre_process
def raw_state_to_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the reward obtained during the step
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: immediate reward computed at each step for the execution v0 environnement
"""
# here we define the reward as cash + position marked to market normalized by parent_order_size
# 1) entry_price
entry_price = self.entry_price
# 2) inter_wakeup_executed_orders
inter_wakeup_executed_orders = raw_state["internal_data"][
"inter_wakeup_executed_orders"
]
# 3) Compute PNL of the orders
if len(inter_wakeup_executed_orders) == 0:
pnl = 0
else:
pnl = (
sum(
(entry_price - order.fill_price) * order.quantity
for order in inter_wakeup_executed_orders
)
if self.direction == "BUY"
else sum(
(order.fill_price - entry_price) * order.quantity
for order in inter_wakeup_executed_orders
)
)
self.pnl = pnl
# 4) normalization
reward = pnl / self.parent_order_size
# log custom metrics to tracker
self.custom_metrics_tracker.slippage_reward = reward
return reward
@raw_state_pre_process
def raw_state_to_update_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the final step reward update (if needed)
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: update reward computed at the end of the episode for the execution v0 environnement
"""
# can update with additional reward at end of episode depending on scenario normalized by parent_order_size
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) parent_order_size
parent_order_size = self.parent_order_size
# 3) Compute update_reward
if (self.direction == "BUY") and (holdings >= parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.too_much_reward_update
) # executed buy too much
elif (self.direction == "BUY") and (holdings < parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.not_enough_reward_update
) # executed buy not enough
elif (self.direction == "SELL") and (holdings <= -parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.too_much_reward_update
) # executed sell too much
elif (self.direction == "SELL") and (holdings > -parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.not_enough_reward_update
) # executed sell not enough
else:
update_reward = self.just_quantity_reward_update
# 4) Normalization
update_reward = update_reward / self.parent_order_size
self.custom_metrics_tracker.late_penalty_reward = update_reward
return update_reward
@raw_state_pre_process
def raw_state_to_done(self, raw_state: Dict[str, Any]) -> bool:
"""
method that transforms a raw state into the flag if an episode is done
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- done: flag that describes if the episode is terminated or not for the execution v0 environnement
"""
# episode can stop because market closes or because some condition is met
# here the condition is parent order fully executed
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) parent_order_size
parent_order_size = self.parent_order_size
# 3) current time
current_time = raw_state["internal_data"]["current_time"]
# 4) time_limit
# 4)a) mkt_open
mkt_open = raw_state["internal_data"]["mkt_open"]
# 4)b time_limit
time_limit = mkt_open + self.first_interval + self.execution_window
# 5) conditions
if (self.direction == "BUY") and (holdings >= parent_order_size):
done = True # Buy parent order executed
elif (self.direction == "SELL") and (holdings <= -parent_order_size):
done = True # Sell parent order executed
elif current_time >= time_limit:
done = True # Mkt Close
else:
done = False
self.custom_metrics_tracker.executed_quantity = (
holdings if self.direction == "BUY" else -holdings
)
self.custom_metrics_tracker.remaining_quantity = (
parent_order_size - self.custom_metrics_tracker.executed_quantity
)
return done
@raw_state_pre_process
def raw_state_to_info(self, raw_state: Dict[str, Any]) -> Dict[str, Any]:
"""
method that transforms a raw state into an info dictionnary
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: info dictionnary computed at each step for the execution v0 environnement
"""
# Agent cannot use this info for taking decision
# only for debugging
# 1) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 2) Last Known best bid
bids = raw_state["parsed_mkt_data"]["bids"]
best_bid = bids[0][0] if len(bids) > 0 else last_transaction
# 3) Last Known best ask
asks = raw_state["parsed_mkt_data"]["asks"]
best_ask = asks[0][0] if len(asks) > 0 else last_transaction
# 4) Current Time
current_time = raw_state["internal_data"]["current_time"]
# 5) Holdings
holdings = raw_state["internal_data"]["holdings"]
if self.debug_mode == True:
return {
"last_transaction": last_transaction,
"best_bid": best_bid,
"best_ask": best_ask,
"current_time": current_time,
"holdings": holdings,
"parent_size": self.parent_order_size,
"pnl": self.pnl,
"reward": self.pnl / self.parent_order_size,
}
else:
return asdict(self.custom_metrics_tracker)
| import importlib
from dataclasses import asdict, dataclass, field
from typing import Any, Dict, List
from abc import ABC
import gym
import numpy as np
import abides_markets.agents.utils as markets_agent_utils
from abides_core import NanosecondTime
from abides_core.utils import str_to_ns
from abides_core.generators import ConstantTimeGenerator
from .markets_environment import AbidesGymMarketsEnv
class SubGymMarketsExecutionEnv_v0(AbidesGymMarketsEnv):
"""
Execution V0 environnement. It defines one of the ABIDES-Gym-markets environnement.
This environment presents an example of the algorithmic orderexecution problem.
The agent has either an initial inventory of the stocks it tries to trade out of or no initial inventory and
tries to acquire a target number of shares. The goal is to realize thistask while minimizing transaction cost from spreads
and marketimpact. It does so by splitting the parent order into several smallerchild orders.
Arguments:
- background_config: the handcrafted agents configuration used for the environnement
- mkt_close: time the market day ends
- timestep_duration: how long between 2 wakes up of the gym experimental agent
- starting_cash: cash of the agents at the beginning of the simulation
- order_fixed_size: size of the order placed by the experimental gym agent
- state_history_length: length of the raw state buffer
- market_data_buffer_length: length of the market data buffer
- first_interval: how long the simulation is run before the first wake up of the gym experimental agent
- parent_order_size: Total size the agent has to execute (eitherbuy or sell).
- execution_window: Time length the agent is given to proceed with 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟𝑆𝑖𝑧𝑒execution.
- direction: direction of the 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟 (buy or sell)
- not_enough_reward_update: it is a constant penalty per non-executed share atthe end of the𝑡𝑖𝑚𝑒𝑊𝑖𝑛𝑑𝑜𝑤
- just_quantity_reward_update: update reward if all order is completed
- reward_mode: can use a dense of sparse reward formulation
- done_ratio: ratio (mark2market_t/starting_cash) that defines when an episode is done (if agent has lost too much mark to market value)
- debug_mode: arguments to change the info dictionnary (lighter version if performance is an issue)
- background_config_extra_kvargs: dictionary of extra key value arguments passed to the background config builder function
Daily Investor V0:
- Action Space:
- MKT order_fixed_size
- LMT order_fixed_size
- Hold
- State Space:
- holdings_pct
- time_pct
- diff_pct
- imbalance_all
- imbalance_5
- price_impact
- spread
- direction
- returns
"""
raw_state_pre_process = markets_agent_utils.ignore_buffers_decorator
raw_state_to_state_pre_process = (
markets_agent_utils.ignore_mkt_data_buffer_decorator
)
@dataclass
class CustomMetricsTracker(ABC):
"""
Data Class used to track custom metrics that are output to rllib
"""
slippage_reward: float = 0
late_penalty_reward: float = 0 # at the end of the episode
executed_quantity: int = 0 # at the end of the episode
remaining_quantity: int = 0 # at the end of the episode
action_counter: Dict[str, int] = field(default_factory=dict)
holdings_pct: float = 0
time_pct: float = 0
diff_pct: float = 0
imbalance_all: float = 0
imbalance_5: float = 0
price_impact: int = 0
spread: int = 0
direction_feature: float = 0
num_max_steps_per_episode: float = 0
def __init__(
self,
background_config: Any = "rmsc04",
mkt_close: str = "16:00:00",
timestep_duration: str = "60s",
starting_cash: int = 1_000_000,
order_fixed_size: int = 10,
state_history_length: int = 4,
market_data_buffer_length: int = 5,
first_interval: str = "00:00:30",
parent_order_size: int = 1000,
execution_window: str = "00:10:00",
direction: str = "BUY",
not_enough_reward_update: int = -1000,
too_much_reward_update: int = -100,
just_quantity_reward_update: int = 0,
debug_mode: bool = False,
background_config_extra_kvargs: Dict[str, Any] = {},
) -> None:
self.background_config: Any = importlib.import_module(
"abides_markets.configs.{}".format(background_config), package=None
)
self.mkt_close: NanosecondTime = str_to_ns(mkt_close)
self.timestep_duration: NanosecondTime = str_to_ns(timestep_duration)
self.starting_cash: int = starting_cash
self.order_fixed_size: int = order_fixed_size
self.state_history_length: int = state_history_length
self.market_data_buffer_length: int = market_data_buffer_length
self.first_interval: NanosecondTime = str_to_ns(first_interval)
self.parent_order_size: int = parent_order_size
self.execution_window: str = str_to_ns(execution_window)
self.direction: str = direction
self.debug_mode: bool = debug_mode
self.too_much_reward_update: int = too_much_reward_update
self.not_enough_reward_update: int = not_enough_reward_update
self.just_quantity_reward_update: int = just_quantity_reward_update
self.entry_price: int = 1
self.far_touch: int = 1
self.near_touch: int = 1
self.step_index: int = 0
self.custom_metrics_tracker = (
self.CustomMetricsTracker()
) # init the custom metric tracker
##################
# CHECK PROPERTIES
assert background_config in [
"rmsc03",
"rmsc04",
"smc_01",
], "Select rmsc03 or rmsc04 as config"
assert (self.first_interval <= str_to_ns("16:00:00")) & (
self.first_interval >= str_to_ns("00:00:00")
), "Select authorized FIRST_INTERVAL delay"
assert (self.mkt_close <= str_to_ns("16:00:00")) & (
self.mkt_close >= str_to_ns("09:30:00")
), "Select authorized market hours"
assert (self.timestep_duration <= str_to_ns("06:30:00")) & (
self.timestep_duration >= str_to_ns("00:00:00")
), "Select authorized timestep_duration"
assert (type(self.starting_cash) == int) & (
self.starting_cash >= 0
), "Select positive integer value for starting_cash"
assert (type(self.order_fixed_size) == int) & (
self.order_fixed_size >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.state_history_length) == int) & (
self.state_history_length >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.market_data_buffer_length) == int) & (
self.market_data_buffer_length >= 0
), "Select positive integer value for order_fixed_size"
assert self.debug_mode in [
True,
False,
], "debug_mode needs to be True or False"
assert self.direction in [
"BUY",
"SELL",
], "direction needs to be BUY or SELL"
assert (type(self.parent_order_size) == int) & (
self.order_fixed_size >= 0
), "Select positive integer value for parent_order_size"
assert (self.execution_window <= str_to_ns("06:30:00")) & (
self.execution_window >= str_to_ns("00:00:00")
), "Select authorized execution_window"
assert (
type(self.too_much_reward_update) == int
), "Select integer value for too_much_reward_update"
assert (
type(self.not_enough_reward_update) == int
), "Select integer value for not_enough_reward_update"
assert (
type(self.just_quantity_reward_update) == int
), "Select integer value for just_quantity_reward_update"
background_config_args = {"end_time": self.mkt_close}
background_config_args.update(background_config_extra_kvargs)
super().__init__(
background_config_pair=(
self.background_config.build_config,
background_config_args,
),
wakeup_interval_generator=ConstantTimeGenerator(
step_duration=self.timestep_duration
),
starting_cash=self.starting_cash,
state_buffer_length=self.state_history_length,
market_data_buffer_length=self.market_data_buffer_length,
first_interval=self.first_interval,
)
# Action Space
# MKT order_fixed_size | LMT order_fixed_size | Hold
self.num_actions: int = 3
self.action_space: gym.Space = gym.spaces.Discrete(self.num_actions)
# instantiate the action counter
for i in range(self.num_actions):
self.custom_metrics_tracker.action_counter[f"action_{i}"] = 0
num_ns_episode = self.first_interval + self.execution_window
step_length = self.timestep_duration
num_max_steps_per_episode = num_ns_episode / step_length
self.custom_metrics_tracker.num_max_steps_per_episode = (
num_max_steps_per_episode
)
# State Space
# [holdings, imbalance,spread, direction_feature] + padded_returns
self.num_state_features: int = 8 + self.state_history_length - 1
# construct state space "box"
# holdings_pct, time_pct, diff_pct, imbalance_all, imbalance_5, price_impact, spread, direction, returns
self.state_highs: np.ndarray = np.array(
[
2, # holdings_pct
2, # time_pct
4, # diff_pct
1, # imbalance_all
1, # imbalance_5
np.finfo(np.float32).max, # price_impact
np.finfo(np.float32).max, # spread
np.finfo(np.float32).max,
]
+ (self.state_history_length - 1) # directiom
* [np.finfo(np.float32).max], # returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.state_lows: np.ndarray = np.array(
[
-2, # holdings_pct
-2, # time_pct
-4, # diff_pct
0, # imbalance_all
0, # imbalance_5
np.finfo(np.float32).min, # price_impact
np.finfo(np.float32).min, # spread
np.finfo(np.float32).min,
]
+ (self.state_history_length - 1) # direction
* [np.finfo(np.float32).min], # returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.observation_space: gym.Space = gym.spaces.Box(
self.state_lows,
self.state_highs,
shape=(self.num_state_features, 1),
dtype=np.float32,
)
# initialize previous_marked_to_market to starting_cash (No holding at the beginning of the episode)
self.previous_marked_to_market: int = self.starting_cash
def _map_action_space_to_ABIDES_SIMULATOR_SPACE(
self, action: int
) -> List[Dict[str, Any]]:
"""
utility function that maps open ai action definition (integers) to environnement API action definition (list of dictionaries)
The action space ranges [0, 1, 2] where:
- `0` MKT direction order_fixed_size
- '1' LMT direction order_fixed_size
- '2' DO NOTHING
Arguments:
- action: integer representation of the different actions
Returns:
- action_list: list of the corresponding series of action mapped into abides env apis
"""
self.custom_metrics_tracker.action_counter[
f"action_{action}"
] += 1 # increase counter
if action == 0:
return [
{"type": "CCL_ALL"},
{
"type": "MKT",
"direction": self.direction,
"size": self.order_fixed_size,
},
]
elif action == 1:
return [
{"type": "CCL_ALL"},
{
"type": "LMT",
"direction": self.direction,
"size": self.order_fixed_size,
"limit_price": self.near_touch,
},
]
elif action == 2:
return []
else:
raise ValueError(
f"Action {action} is not part of the actions supported by the function."
)
@raw_state_to_state_pre_process
def raw_state_to_state(self, raw_state: Dict[str, Any]) -> np.ndarray:
"""
method that transforms a raw state into a state representation
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- state: state representation defining the MDP for the execution v0 environnement
"""
# 0) Preliminary
bids = raw_state["parsed_mkt_data"]["bids"]
asks = raw_state["parsed_mkt_data"]["asks"]
last_transactions = raw_state["parsed_mkt_data"]["last_transaction"]
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
holdings_pct = holdings[-1] / self.parent_order_size
# 2) Timing
# 2)a) mkt_open
mkt_open = raw_state["internal_data"]["mkt_open"][-1]
# 2)b) time from beginning of execution (parent arrival)
current_time = raw_state["internal_data"]["current_time"][-1]
time_from_parent_arrival = current_time - mkt_open - self.first_interval
assert (
current_time >= mkt_open + self.first_interval
), "Agent has woken up earlier than its first interval"
# 2)c) time limit
time_limit = self.execution_window
# 2)d) compute percentage time advancement
time_pct = time_from_parent_arrival / time_limit
# 3) Advancement Comparison
diff_pct = holdings_pct - time_pct
# 3) Imbalance
imbalances_all = [
markets_agent_utils.get_imbalance(b, a, depth=None)
for (b, a) in zip(bids, asks)
]
imbalance_all = imbalances_all[-1]
imbalances_5 = [
markets_agent_utils.get_imbalance(b, a, depth=5)
for (b, a) in zip(bids, asks)
]
imbalance_5 = imbalances_5[-1]
# 4) price_impact
mid_prices = [
markets_agent_utils.get_mid_price(b, a, lt)
for (b, a, lt) in zip(bids, asks, last_transactions)
]
mid_price = mid_prices[-1]
if self.step_index == 0: # 0 order has been executed yet
self.entry_price = mid_price
entry_price = self.entry_price
book = (
raw_state["parsed_mkt_data"]["bids"][-1]
if self.direction == "BUY"
else raw_state["parsed_mkt_data"]["asks"][-1]
)
self.near_touch = book[0][0] if len(book) > 0 else last_transactions[-1]
# Compute the price impact
price_impact = (
np.log(mid_price / entry_price)
if self.direction == "BUY"
else np.log(entry_price / mid_price)
)
# 5) Spread
best_bids = [
bids[0][0] if len(bids) > 0 else mid
for (bids, mid) in zip(bids, mid_prices)
]
best_asks = [
asks[0][0] if len(asks) > 0 else mid
for (asks, mid) in zip(asks, mid_prices)
]
spreads = np.array(best_asks) - np.array(best_bids)
spread = spreads[-1]
# 6) direction feature
direction_features = np.array(mid_prices) - np.array(last_transactions)
direction_feature = direction_features[-1]
# 7) mid_price
mid_prices = [
markets_agent_utils.get_mid_price(b, a, lt)
for (b, a, lt) in zip(bids, asks, last_transactions)
]
returns = np.diff(mid_prices)
padded_returns = np.zeros(self.state_history_length - 1)
padded_returns[-len(returns) :] = (
returns if len(returns) > 0 else padded_returns
)
# log custom metrics to tracker
self.custom_metrics_tracker.holdings_pct = holdings_pct
self.custom_metrics_tracker.time_pct = time_pct
self.custom_metrics_tracker.diff_pct = diff_pct
self.custom_metrics_tracker.imbalance_all = imbalance_all
self.custom_metrics_tracker.imbalance_5 = imbalance_5
self.custom_metrics_tracker.price_impact = price_impact
self.custom_metrics_tracker.spread = spread
self.custom_metrics_tracker.direction_feature = direction_feature
# 8) Computed State
computed_state = np.array(
[
holdings_pct,
time_pct,
diff_pct,
imbalance_all,
imbalance_5,
price_impact,
spread,
direction_feature,
]
+ padded_returns.tolist(),
dtype=np.float32,
)
#
self.step_index += 1
return computed_state.reshape(self.num_state_features, 1)
@raw_state_pre_process
def raw_state_to_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the reward obtained during the step
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: immediate reward computed at each step for the execution v0 environnement
"""
# here we define the reward as cash + position marked to market normalized by parent_order_size
# 1) entry_price
entry_price = self.entry_price
# 2) inter_wakeup_executed_orders
inter_wakeup_executed_orders = raw_state["internal_data"][
"inter_wakeup_executed_orders"
]
# 3) Compute PNL of the orders
if len(inter_wakeup_executed_orders) == 0:
pnl = 0
else:
pnl = (
sum(
(entry_price - order.fill_price) * order.quantity
for order in inter_wakeup_executed_orders
)
if self.direction == "BUY"
else sum(
(order.fill_price - entry_price) * order.quantity
for order in inter_wakeup_executed_orders
)
)
self.pnl = pnl
# 4) normalization
reward = pnl / self.parent_order_size
# log custom metrics to tracker
self.custom_metrics_tracker.slippage_reward = reward
return reward
@raw_state_pre_process
def raw_state_to_update_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the final step reward update (if needed)
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: update reward computed at the end of the episode for the execution v0 environnement
"""
# can update with additional reward at end of episode depending on scenario normalized by parent_order_size
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) parent_order_size
parent_order_size = self.parent_order_size
# 3) Compute update_reward
if (self.direction == "BUY") and (holdings >= parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.too_much_reward_update
) # executed buy too much
elif (self.direction == "BUY") and (holdings < parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.not_enough_reward_update
) # executed buy not enough
elif (self.direction == "SELL") and (holdings <= -parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.too_much_reward_update
) # executed sell too much
elif (self.direction == "SELL") and (holdings > -parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.not_enough_reward_update
) # executed sell not enough
else:
update_reward = self.just_quantity_reward_update
# 4) Normalization
update_reward = update_reward / self.parent_order_size
self.custom_metrics_tracker.late_penalty_reward = update_reward
return update_reward
@raw_state_pre_process
def raw_state_to_done(self, raw_state: Dict[str, Any]) -> bool:
"""
method that transforms a raw state into the flag if an episode is done
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- done: flag that describes if the episode is terminated or not for the execution v0 environnement
"""
# episode can stop because market closes or because some condition is met
# here the condition is parent order fully executed
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) parent_order_size
parent_order_size = self.parent_order_size
# 3) current time
current_time = raw_state["internal_data"]["current_time"]
# 4) time_limit
# 4)a) mkt_open
mkt_open = raw_state["internal_data"]["mkt_open"]
# 4)b time_limit
time_limit = mkt_open + self.first_interval + self.execution_window
# 5) conditions
if (self.direction == "BUY") and (holdings >= parent_order_size):
done = True # Buy parent order executed
elif (self.direction == "SELL") and (holdings <= -parent_order_size):
done = True # Sell parent order executed
elif current_time >= time_limit:
done = True # Mkt Close
else:
done = False
self.custom_metrics_tracker.executed_quantity = (
holdings if self.direction == "BUY" else -holdings
)
self.custom_metrics_tracker.remaining_quantity = (
parent_order_size - self.custom_metrics_tracker.executed_quantity
)
return done
@raw_state_pre_process
def raw_state_to_info(self, raw_state: Dict[str, Any]) -> Dict[str, Any]:
"""
method that transforms a raw state into an info dictionnary
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: info dictionnary computed at each step for the execution v0 environnement
"""
# Agent cannot use this info for taking decision
# only for debugging
# 1) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 2) Last Known best bid
bids = raw_state["parsed_mkt_data"]["bids"]
best_bid = bids[0][0] if len(bids) > 0 else last_transaction
# 3) Last Known best ask
asks = raw_state["parsed_mkt_data"]["asks"]
best_ask = asks[0][0] if len(asks) > 0 else last_transaction
# 4) Current Time
current_time = raw_state["internal_data"]["current_time"]
# 5) Holdings
holdings = raw_state["internal_data"]["holdings"]
if self.debug_mode == True:
return {
"last_transaction": last_transaction,
"best_bid": best_bid,
"best_ask": best_ask,
"current_time": current_time,
"holdings": holdings,
"parent_size": self.parent_order_size,
"pnl": self.pnl,
"reward": self.pnl / self.parent_order_size,
}
else:
return asdict(self.custom_metrics_tracker) | en | 0.783212 | Execution V0 environnement. It defines one of the ABIDES-Gym-markets environnement. This environment presents an example of the algorithmic orderexecution problem. The agent has either an initial inventory of the stocks it tries to trade out of or no initial inventory and tries to acquire a target number of shares. The goal is to realize thistask while minimizing transaction cost from spreads and marketimpact. It does so by splitting the parent order into several smallerchild orders. Arguments: - background_config: the handcrafted agents configuration used for the environnement - mkt_close: time the market day ends - timestep_duration: how long between 2 wakes up of the gym experimental agent - starting_cash: cash of the agents at the beginning of the simulation - order_fixed_size: size of the order placed by the experimental gym agent - state_history_length: length of the raw state buffer - market_data_buffer_length: length of the market data buffer - first_interval: how long the simulation is run before the first wake up of the gym experimental agent - parent_order_size: Total size the agent has to execute (eitherbuy or sell). - execution_window: Time length the agent is given to proceed with 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟𝑆𝑖𝑧𝑒execution. - direction: direction of the 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟 (buy or sell) - not_enough_reward_update: it is a constant penalty per non-executed share atthe end of the𝑡𝑖𝑚𝑒𝑊𝑖𝑛𝑑𝑜𝑤 - just_quantity_reward_update: update reward if all order is completed - reward_mode: can use a dense of sparse reward formulation - done_ratio: ratio (mark2market_t/starting_cash) that defines when an episode is done (if agent has lost too much mark to market value) - debug_mode: arguments to change the info dictionnary (lighter version if performance is an issue) - background_config_extra_kvargs: dictionary of extra key value arguments passed to the background config builder function Daily Investor V0: - Action Space: - MKT order_fixed_size - LMT order_fixed_size - Hold - State Space: - holdings_pct - time_pct - diff_pct - imbalance_all - imbalance_5 - price_impact - spread - direction - returns Data Class used to track custom metrics that are output to rllib # at the end of the episode # at the end of the episode # at the end of the episode # init the custom metric tracker ################## # CHECK PROPERTIES # Action Space # MKT order_fixed_size | LMT order_fixed_size | Hold # instantiate the action counter # State Space # [holdings, imbalance,spread, direction_feature] + padded_returns # construct state space "box" # holdings_pct, time_pct, diff_pct, imbalance_all, imbalance_5, price_impact, spread, direction, returns # holdings_pct # time_pct # diff_pct # imbalance_all # imbalance_5 # price_impact # spread # directiom # returns # holdings_pct # time_pct # diff_pct # imbalance_all # imbalance_5 # price_impact # spread # direction # returns # initialize previous_marked_to_market to starting_cash (No holding at the beginning of the episode) utility function that maps open ai action definition (integers) to environnement API action definition (list of dictionaries) The action space ranges [0, 1, 2] where: - `0` MKT direction order_fixed_size - '1' LMT direction order_fixed_size - '2' DO NOTHING Arguments: - action: integer representation of the different actions Returns: - action_list: list of the corresponding series of action mapped into abides env apis # increase counter method that transforms a raw state into a state representation Arguments: - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent Returns: - state: state representation defining the MDP for the execution v0 environnement # 0) Preliminary # 1) Holdings # 2) Timing # 2)a) mkt_open # 2)b) time from beginning of execution (parent arrival) # 2)c) time limit # 2)d) compute percentage time advancement # 3) Advancement Comparison # 3) Imbalance # 4) price_impact # 0 order has been executed yet # Compute the price impact # 5) Spread # 6) direction feature # 7) mid_price # log custom metrics to tracker # 8) Computed State # method that transforms a raw state into the reward obtained during the step Arguments: - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent Returns: - reward: immediate reward computed at each step for the execution v0 environnement # here we define the reward as cash + position marked to market normalized by parent_order_size # 1) entry_price # 2) inter_wakeup_executed_orders # 3) Compute PNL of the orders # 4) normalization # log custom metrics to tracker method that transforms a raw state into the final step reward update (if needed) Arguments: - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent Returns: - reward: update reward computed at the end of the episode for the execution v0 environnement # can update with additional reward at end of episode depending on scenario normalized by parent_order_size # 1) Holdings # 2) parent_order_size # 3) Compute update_reward # executed buy too much # executed buy not enough # executed sell too much # executed sell not enough # 4) Normalization method that transforms a raw state into the flag if an episode is done Arguments: - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent Returns: - done: flag that describes if the episode is terminated or not for the execution v0 environnement # episode can stop because market closes or because some condition is met # here the condition is parent order fully executed # 1) Holdings # 2) parent_order_size # 3) current time # 4) time_limit # 4)a) mkt_open # 4)b time_limit # 5) conditions # Buy parent order executed # Sell parent order executed # Mkt Close method that transforms a raw state into an info dictionnary Arguments: - raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent Returns: - reward: info dictionnary computed at each step for the execution v0 environnement # Agent cannot use this info for taking decision # only for debugging # 1) Last Known Market Transaction Price # 2) Last Known best bid # 3) Last Known best ask # 4) Current Time # 5) Holdings | 2.742692 | 3 |
Raspberry-Pi/src/solarduino.py | PHPirates/SolArduino | 3 | 6621289 | import socket
import subprocess
import sys
import cherrypy
import psutil
from cherrypy.process.plugins import Daemonizer, PIDFile
from src.webserver.webserver import Webserver
pid_path = '/tmp/solarduino.pid'
def get_ip() -> str:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
def kill_if_exists():
""" Kill current process if it is running. """
try:
with open(pid_path, 'r') as f:
pid = int(f.read())
process = psutil.Process(pid)
process.terminate()
except psutil.AccessDenied:
subprocess.check_call(['sudo', 'kill', str(pid)])
except FileNotFoundError:
pass
if __name__ == '__main__':
""" Start SolArduino. """
cherrypy.engine.exit()
kill_if_exists()
PIDFile(cherrypy.engine, pid_path).subscribe()
# Don't daemonize when Pycharm is debugging
gettrace = getattr(sys, 'gettrace', None)
if gettrace is None or not gettrace():
Daemonizer(cherrypy.engine,
stdout='logs/solarduino_access.log',
stderr='logs/solarduino_error.log').subscribe()
cherrypy.config.update({'server.socket_host': get_ip(),
'server.socket_port': 8080,
})
cherrypy.quickstart(Webserver(), '')
| import socket
import subprocess
import sys
import cherrypy
import psutil
from cherrypy.process.plugins import Daemonizer, PIDFile
from src.webserver.webserver import Webserver
pid_path = '/tmp/solarduino.pid'
def get_ip() -> str:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
def kill_if_exists():
""" Kill current process if it is running. """
try:
with open(pid_path, 'r') as f:
pid = int(f.read())
process = psutil.Process(pid)
process.terminate()
except psutil.AccessDenied:
subprocess.check_call(['sudo', 'kill', str(pid)])
except FileNotFoundError:
pass
if __name__ == '__main__':
""" Start SolArduino. """
cherrypy.engine.exit()
kill_if_exists()
PIDFile(cherrypy.engine, pid_path).subscribe()
# Don't daemonize when Pycharm is debugging
gettrace = getattr(sys, 'gettrace', None)
if gettrace is None or not gettrace():
Daemonizer(cherrypy.engine,
stdout='logs/solarduino_access.log',
stderr='logs/solarduino_error.log').subscribe()
cherrypy.config.update({'server.socket_host': get_ip(),
'server.socket_port': 8080,
})
cherrypy.quickstart(Webserver(), '')
| en | 0.699726 | Kill current process if it is running. Start SolArduino. # Don't daemonize when Pycharm is debugging | 2.075211 | 2 |
corehq/apps/ota/migrations/0011_remove_devicelogrequest_deviceid.py | dimagilg/commcare-hq | 471 | 6621290 | <filename>corehq/apps/ota/migrations/0011_remove_devicelogrequest_deviceid.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-03-11 19:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ota', '0010_alter_devicelogrequest'),
]
operations = [
migrations.AlterUniqueTogether(
name='devicelogrequest',
unique_together=set([('domain', 'username')]),
),
migrations.RemoveField(
model_name='devicelogrequest',
name='device_id',
),
]
| <filename>corehq/apps/ota/migrations/0011_remove_devicelogrequest_deviceid.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-03-11 19:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ota', '0010_alter_devicelogrequest'),
]
operations = [
migrations.AlterUniqueTogether(
name='devicelogrequest',
unique_together=set([('domain', 'username')]),
),
migrations.RemoveField(
model_name='devicelogrequest',
name='device_id',
),
]
| en | 0.689485 | # -*- coding: utf-8 -*- # Generated by Django 1.11.27 on 2020-03-11 19:26 | 1.547446 | 2 |
api/views.py | YSP-SINERGY/xserver-sinergy2021-event-website | 0 | 6621291 | from db import Db
from flask import request
from flask_restful import Resource
from sqlalchemy import text as sql_text
class YouthVote(Resource): # YouthページでAPIリクエスト時のロジックを制御するクラス
""" The votes View """
def __init__(self):
self.db = Db()
def get(self):
""" Returns a list of votes """
# query = "SELECT * FROM youth_vote ORDER BY id ASC"
query = "SELECT ip_address, user_agent FROM youth_connection;"
res = self.db.connection.execute(query)
rows = res.fetchall()
keys = res.keys()
user_terminals = self.db.clean_select_results(rows, keys)
return {
'user_terminals': user_terminals
}
def patch(self):
"""
Add a vote to the db
Expect a JSON payload with the following format
{
"vote_counts": "The number of votes gained"
}
"""
data = request.get_json()
vote_query = "UPDATE youth_vote SET vote_counts = vote_counts + 1 WHERE id = :id"
connection_query = "INSERT INTO youth_connection (id, presenter_id, ip_address, user_agent) VALUES (DEFAULT, :id, :ip, :user_agent)"
try:
self.db.connection.execute(sql_text(vote_query), data)
self.db.connection.execute(sql_text(connection_query), data)
return True
except Exception as err:
return err
class TeensVote(Resource): # TeensページでAPIリクエスト時のロジックを制御するクラス
""" The votes View """
def __init__(self):
self.db = Db()
def get(self):
""" Returns a list of votes """
# query = "SELECT * FROM teens_vote ORDER BY id ASC"
query = "SELECT ip_address, user_agent FROM teens_connection;"
res = self.db.connection.execute(query)
rows = res.fetchall()
keys = res.keys()
user_terminals = self.db.clean_select_results(rows, keys)
return {
'user_terminals': user_terminals
}
def patch(self):
"""
Add a vote to the db
Expect a JSON payload with the following format
{
"vote_counts": "The number of votes gained"
}
"""
data = request.get_json()
query = "UPDATE teens_vote SET vote_counts = vote_counts + 1 WHERE id = :id"
connection_query = "INSERT INTO teens_connection (id, presenter_id, ip_address, user_agent) VALUES (DEFAULT, :id, :ip, :user_agent)"
try:
self.db.connection.execute(sql_text(query), data)
self.db.connection.execute(sql_text(connection_query), data)
return True
except Exception as err:
return err | from db import Db
from flask import request
from flask_restful import Resource
from sqlalchemy import text as sql_text
class YouthVote(Resource): # YouthページでAPIリクエスト時のロジックを制御するクラス
""" The votes View """
def __init__(self):
self.db = Db()
def get(self):
""" Returns a list of votes """
# query = "SELECT * FROM youth_vote ORDER BY id ASC"
query = "SELECT ip_address, user_agent FROM youth_connection;"
res = self.db.connection.execute(query)
rows = res.fetchall()
keys = res.keys()
user_terminals = self.db.clean_select_results(rows, keys)
return {
'user_terminals': user_terminals
}
def patch(self):
"""
Add a vote to the db
Expect a JSON payload with the following format
{
"vote_counts": "The number of votes gained"
}
"""
data = request.get_json()
vote_query = "UPDATE youth_vote SET vote_counts = vote_counts + 1 WHERE id = :id"
connection_query = "INSERT INTO youth_connection (id, presenter_id, ip_address, user_agent) VALUES (DEFAULT, :id, :ip, :user_agent)"
try:
self.db.connection.execute(sql_text(vote_query), data)
self.db.connection.execute(sql_text(connection_query), data)
return True
except Exception as err:
return err
class TeensVote(Resource): # TeensページでAPIリクエスト時のロジックを制御するクラス
""" The votes View """
def __init__(self):
self.db = Db()
def get(self):
""" Returns a list of votes """
# query = "SELECT * FROM teens_vote ORDER BY id ASC"
query = "SELECT ip_address, user_agent FROM teens_connection;"
res = self.db.connection.execute(query)
rows = res.fetchall()
keys = res.keys()
user_terminals = self.db.clean_select_results(rows, keys)
return {
'user_terminals': user_terminals
}
def patch(self):
"""
Add a vote to the db
Expect a JSON payload with the following format
{
"vote_counts": "The number of votes gained"
}
"""
data = request.get_json()
query = "UPDATE teens_vote SET vote_counts = vote_counts + 1 WHERE id = :id"
connection_query = "INSERT INTO teens_connection (id, presenter_id, ip_address, user_agent) VALUES (DEFAULT, :id, :ip, :user_agent)"
try:
self.db.connection.execute(sql_text(query), data)
self.db.connection.execute(sql_text(connection_query), data)
return True
except Exception as err:
return err | en | 0.50366 | # YouthページでAPIリクエスト時のロジックを制御するクラス The votes View Returns a list of votes # query = "SELECT * FROM youth_vote ORDER BY id ASC" Add a vote to the db Expect a JSON payload with the following format { "vote_counts": "The number of votes gained" } # TeensページでAPIリクエスト時のロジックを制御するクラス The votes View Returns a list of votes # query = "SELECT * FROM teens_vote ORDER BY id ASC" Add a vote to the db Expect a JSON payload with the following format { "vote_counts": "The number of votes gained" } | 3.21424 | 3 |
mundo 3/091.py | thiagofreitascarneiro/Curso-de-Python---Curso-em-Video | 1 | 6621292 | from random import randint
from time import sleep
from operator import itemgetter
jogador = {}
cont = 0
print('Valores Sorteados:')
for i in range(1, 5):
jogador[i] = randint(1, 6)
for j, d in jogador.items():
sleep(1)
print(f'O Jogador{j} tirou {d}')
print('Ranking dos jogadores:')
print(jogador)
#Serve para ordenar um dicionario do maior para o menor
ranking = list()
ranking = sorted(jogador.items(), key=itemgetter(1), reverse=True)
print(ranking)
for i, v in enumerate(ranking):
print(f'O jogador{v[0]} ficou na posição {i + 1}º com {v[1]}')
sleep(1)
| from random import randint
from time import sleep
from operator import itemgetter
jogador = {}
cont = 0
print('Valores Sorteados:')
for i in range(1, 5):
jogador[i] = randint(1, 6)
for j, d in jogador.items():
sleep(1)
print(f'O Jogador{j} tirou {d}')
print('Ranking dos jogadores:')
print(jogador)
#Serve para ordenar um dicionario do maior para o menor
ranking = list()
ranking = sorted(jogador.items(), key=itemgetter(1), reverse=True)
print(ranking)
for i, v in enumerate(ranking):
print(f'O jogador{v[0]} ficou na posição {i + 1}º com {v[1]}')
sleep(1)
| pt | 0.739125 | #Serve para ordenar um dicionario do maior para o menor | 3.623769 | 4 |
examples/traductor/tests/test_translators.py | connectthefuture/docker-hacks | 5 | 6621293 | <filename>examples/traductor/tests/test_translators.py
import six
import unittest
from traductor.translators import (cap_add, cap_drop, container_name, cpu_shares, cpuset, devices,
dns, dns_search, entrypoint, env_file, environment, expose, hostname, labels, links,
log_driver, mac_address, mem_limit, memswap_limit, net, pid, ports, privileged, read_only,
restart, stdin_open, tty, user, volume_driver, volumes, volumes_from, working_dir)
class TestCapAdd(unittest.TestCase):
def test_coversion(self):
input=["ALL"]
expected_output="--cap-add=ALL"
output=cap_add.CapAdd().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input="NOTALL"
expected_output=""
output=cap_add.CapAdd().translate(input)
self.assertEqual(output, expected_output)
class TestCapDrop(unittest.TestCase):
def test_coversion(self):
input=["NET_ADMIN", "SYS_ADMIN"]
expected_output="--cap-drop=NET_ADMIN --cap-drop=SYS_ADMIN"
output=cap_drop.CapDrop().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=("NET_ADMIN", "SYS_ADMIN")
expected_output=""
output=cap_drop.CapDrop().translate(input)
self.assertEqual(output, expected_output)
class TestContainerName(unittest.TestCase):
def test_coversion(self):
input="my-web-container"
expected_output="--name=my-web-container"
output=container_name.ContainerName().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=container_name.ContainerName().translate(input)
self.assertEqual(output, expected_output)
class TestCpuShares(unittest.TestCase):
def test_coversion(self):
input="4"
expected_output="--cpu-shares=4"
output=cpu_shares.CpuShares().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=cpu_shares.CpuShares().translate(input)
self.assertEqual(output, expected_output)
class TestCpuset(unittest.TestCase):
def test_coversion(self):
input="0,1"
expected_output="--cpuset-cpus=0,1"
output=cpuset.Cpuset().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=cpuset.Cpuset().translate(input)
self.assertEqual(output, expected_output)
class TestDevices(unittest.TestCase):
def test_coversion(self):
input=["/dev/ttyUSB0:/dev/ttyUSB0", "/dev/ttyUSB1:/dev/ttyUSB1"]
expected_output="--device=/dev/ttyUSB0:/dev/ttyUSB0 --device=/dev/ttyUSB1:/dev/ttyUSB1"
output=devices.Devices().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=devices.Devices().translate(input)
self.assertEqual(output, expected_output)
class TestDns(unittest.TestCase):
def test_coversion_with_string(self):
input="8.8.8.8"
expected_output="--dns=8.8.8.8"
output=dns.Dns().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_with_list(self):
input=["8.8.8.8", "8.8.4.4"]
expected_output="--dns=8.8.8.8 --dns=8.8.4.4"
output=dns.Dns().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=dns.Dns().translate(input)
self.assertEqual(output, expected_output)
class TestDnsSearch(unittest.TestCase):
def test_coversion_with_string(self):
input="8.8.8.8"
expected_output="--dns-search=8.8.8.8"
output=dns_search.DnsSearch().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_with_list(self):
input=["8.8.8.8", "8.8.4.4"]
expected_output="--dns-search=8.8.8.8 --dns-search=8.8.4.4"
output=dns_search.DnsSearch().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=dns_search.DnsSearch().translate(input)
self.assertEqual(output, expected_output)
class TestEntrypoint(unittest.TestCase):
def test_coversion(self):
input="/code/entrypoint.sh"
expected_output="--entrypoint=/code/entrypoint.sh"
output=entrypoint.Entrypoint().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=entrypoint.Entrypoint().translate(input)
self.assertEqual(output, expected_output)
class TestEnvFile(unittest.TestCase):
def test_coversion_with_string(self):
input=".env"
expected_output="--env-file=.env"
output=env_file.EnvFile().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_with_list(self):
input=["./common.env", "./apps/web.env"]
expected_output="--env-file=./common.env --env-file=./apps/web.env"
output=env_file.EnvFile().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=env_file.EnvFile().translate(input)
self.assertEqual(output, expected_output)
class TestEnvironment(unittest.TestCase):
def test_coversion_with_dict(self):
input={
"RACK_ENV": "development",
"SESSION_SECRET": "",
}
expected_output="--env=RACK_ENV:development --env=SESSION_SECRET:"
output=environment.Environment().translate(input)
self.assertTrue(output, expected_output)
def test_coversion_with_list(self):
input=["RACK_ENV=development", "SESSION_SECRET"]
expected_output="--env=RACK_ENV:development --env=SESSION_SECRET:"
output=environment.Environment().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=environment.Environment().translate(input)
self.assertEqual(output, expected_output)
class TestExpose(unittest.TestCase):
def test_coversion(self):
input=["3000", "8000"]
expected_output="--expose=3000 --expose=8000"
output=expose.Expose().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=expose.Expose().translate(input)
self.assertEqual(output, expected_output)
class TestHostname(unittest.TestCase):
def test_coversion(self):
input="foo"
expected_output="--hostname=foo"
output=hostname.Hostname().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=hostname.Hostname().translate(input)
self.assertEqual(output, expected_output)
class TestLabels(unittest.TestCase):
def test_coversion_with_dict(self):
input={
"com.example.description": "Accounting webapp",
"com.example.department": "Finance",
"com.example.label-with-empty-value": "",
}
expected_output="--label=com.example.description:Accounting webapp " \
"--label=com.example.department:Finance --label=com.example.label-with-empty-value:"
output=labels.Labels().translate(input)
self.assertTrue(output, expected_output)
def test_coversion_with_list(self):
input=[
"com.example.description=Accounting webapp",
"com.example.department=Finance",
"com.example.label-with-empty-value",
]
expected_output="--label=com.example.description:Accounting webapp " \
"--label=com.example.department:Finance --label=com.example.label-with-empty-value:"
output=labels.Labels().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=labels.Labels().translate(input)
self.assertEqual(output, expected_output)
class TestLinks(unittest.TestCase):
def test_coversion(self):
input=["db", "db:database", "redis"]
expected_output="--link=db --link=db:database --link=redis"
output=links.Links().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=links.Links().translate(input)
self.assertEqual(output, expected_output)
class TestLogDriver(unittest.TestCase):
def test_coversion(self):
input="json-file"
expected_output="--log-driver=json-file"
output=log_driver.LogDriver().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=log_driver.LogDriver().translate(input)
self.assertEqual(output, expected_output)
class TestMacAddress(unittest.TestCase):
def test_coversion(self):
input="02:42:ac:11:65:43"
expected_output="--mac-address=02:42:ac:11:65:43"
output=mac_address.MacAddress().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=mac_address.MacAddress().translate(input)
self.assertEqual(output, expected_output)
class TestMemLimit(unittest.TestCase):
def test_coversion(self):
input="1000000000"
expected_output="--memory=1000000000"
output=mem_limit.MemLimit().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=mem_limit.MemLimit().translate(input)
self.assertEqual(output, expected_output)
class TestMemswapLimit(unittest.TestCase):
def test_coversion(self):
input="2000000000"
expected_output="--memory-swap=2000000000"
output=memswap_limit.MemswapLimit().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=memswap_limit.MemswapLimit().translate(input)
self.assertEqual(output, expected_output)
class TestNet(unittest.TestCase):
def test_coversion(self):
input="host"
expected_output="--net=host"
output=net.Net().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=net.Net().translate(input)
self.assertEqual(output, expected_output)
class TestPid(unittest.TestCase):
def test_coversion(self):
input="host"
expected_output="--pid=host"
output=pid.Pid().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=pid.Pid().translate(input)
self.assertEqual(output, expected_output)
class TestPorts(unittest.TestCase):
def test_coversion(self):
input=[
"3000",
"8000:8000",
"49100:22",
"127.0.0.1:8001:8001",
]
expected_output="--publish=3000 --publish=8000:8000 --publish=49100:22 " \
"--publish=127.0.0.1:8001:8001"
output=ports.Ports().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=ports.Ports().translate(input)
self.assertEqual(output, expected_output)
class TestPrivileged(unittest.TestCase):
def test_coversion(self):
input="true"
expected_output="--privileged=true"
output=privileged.Privileged().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=privileged.Privileged().translate(input)
self.assertEqual(output, expected_output)
class TestReadOnly(unittest.TestCase):
def test_coversion(self):
input="true"
expected_output="--read-only=true"
output=read_only.ReadOnly().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=read_only.ReadOnly().translate(input)
self.assertEqual(output, expected_output)
class TestRestart(unittest.TestCase):
def test_coversion(self):
input="always"
expected_output="--restart=always"
output=restart.Restart().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=restart.Restart().translate(input)
self.assertEqual(output, expected_output)
class TestStdinOpen(unittest.TestCase):
def test_coversion(self):
input="true"
expected_output="--interactive=true"
output=stdin_open.StdinOpen().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=stdin_open.StdinOpen().translate(input)
self.assertEqual(output, expected_output)
class TestTty(unittest.TestCase):
def test_coversion(self):
input="true"
expected_output="--tty=true"
output=tty.Tty().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=tty.Tty().translate(input)
self.assertEqual(output, expected_output)
class TestUser(unittest.TestCase):
def test_coversion(self):
input="postgresql:datastore"
expected_output="--user=postgresql:datastore"
output=user.User().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=user.User().translate(input)
self.assertEqual(output, expected_output)
class TestVolumeDriver(unittest.TestCase):
def test_coversion(self):
input="mydriver"
expected_output="--volume-driver=mydriver"
output=volume_driver.VolumeDriver().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=volume_driver.VolumeDriver().translate(input)
self.assertEqual(output, expected_output)
class TestVolumes(unittest.TestCase):
def test_coversion(self):
input=[
"/var/lib/mysql",
"./cache:/tmp/cache",
"~/configs:/etc/configs/:ro",
]
expected_output="--volume=/var/lib/mysql --volume=./cache:/tmp/cache " \
"--volume=~/configs:/etc/configs/:ro"
output=volumes.Volumes().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=volumes.Volumes().translate(input)
self.assertEqual(output, expected_output)
class TestVolumesFrom(unittest.TestCase):
def test_coversion(self):
input=["service_name", "container_name"]
expected_output="--volumes-from=service_name --volumes-from=container_name"
output=volumes_from.VolumesFrom().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=volumes_from.VolumesFrom().translate(input)
self.assertEqual(output, expected_output)
class TestWorkingDir(unittest.TestCase):
def test_coversion(self):
input="/code"
expected_output="--workdir=/code"
output=working_dir.WorkingDir().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=working_dir.WorkingDir().translate(input)
self.assertEqual(output, expected_output)
| <filename>examples/traductor/tests/test_translators.py
import six
import unittest
from traductor.translators import (cap_add, cap_drop, container_name, cpu_shares, cpuset, devices,
dns, dns_search, entrypoint, env_file, environment, expose, hostname, labels, links,
log_driver, mac_address, mem_limit, memswap_limit, net, pid, ports, privileged, read_only,
restart, stdin_open, tty, user, volume_driver, volumes, volumes_from, working_dir)
class TestCapAdd(unittest.TestCase):
def test_coversion(self):
input=["ALL"]
expected_output="--cap-add=ALL"
output=cap_add.CapAdd().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input="NOTALL"
expected_output=""
output=cap_add.CapAdd().translate(input)
self.assertEqual(output, expected_output)
class TestCapDrop(unittest.TestCase):
def test_coversion(self):
input=["NET_ADMIN", "SYS_ADMIN"]
expected_output="--cap-drop=NET_ADMIN --cap-drop=SYS_ADMIN"
output=cap_drop.CapDrop().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=("NET_ADMIN", "SYS_ADMIN")
expected_output=""
output=cap_drop.CapDrop().translate(input)
self.assertEqual(output, expected_output)
class TestContainerName(unittest.TestCase):
def test_coversion(self):
input="my-web-container"
expected_output="--name=my-web-container"
output=container_name.ContainerName().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=container_name.ContainerName().translate(input)
self.assertEqual(output, expected_output)
class TestCpuShares(unittest.TestCase):
def test_coversion(self):
input="4"
expected_output="--cpu-shares=4"
output=cpu_shares.CpuShares().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=cpu_shares.CpuShares().translate(input)
self.assertEqual(output, expected_output)
class TestCpuset(unittest.TestCase):
def test_coversion(self):
input="0,1"
expected_output="--cpuset-cpus=0,1"
output=cpuset.Cpuset().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=cpuset.Cpuset().translate(input)
self.assertEqual(output, expected_output)
class TestDevices(unittest.TestCase):
def test_coversion(self):
input=["/dev/ttyUSB0:/dev/ttyUSB0", "/dev/ttyUSB1:/dev/ttyUSB1"]
expected_output="--device=/dev/ttyUSB0:/dev/ttyUSB0 --device=/dev/ttyUSB1:/dev/ttyUSB1"
output=devices.Devices().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=devices.Devices().translate(input)
self.assertEqual(output, expected_output)
class TestDns(unittest.TestCase):
def test_coversion_with_string(self):
input="8.8.8.8"
expected_output="--dns=8.8.8.8"
output=dns.Dns().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_with_list(self):
input=["8.8.8.8", "8.8.4.4"]
expected_output="--dns=8.8.8.8 --dns=8.8.4.4"
output=dns.Dns().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=dns.Dns().translate(input)
self.assertEqual(output, expected_output)
class TestDnsSearch(unittest.TestCase):
def test_coversion_with_string(self):
input="8.8.8.8"
expected_output="--dns-search=8.8.8.8"
output=dns_search.DnsSearch().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_with_list(self):
input=["8.8.8.8", "8.8.4.4"]
expected_output="--dns-search=8.8.8.8 --dns-search=8.8.4.4"
output=dns_search.DnsSearch().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=dns_search.DnsSearch().translate(input)
self.assertEqual(output, expected_output)
class TestEntrypoint(unittest.TestCase):
def test_coversion(self):
input="/code/entrypoint.sh"
expected_output="--entrypoint=/code/entrypoint.sh"
output=entrypoint.Entrypoint().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=entrypoint.Entrypoint().translate(input)
self.assertEqual(output, expected_output)
class TestEnvFile(unittest.TestCase):
def test_coversion_with_string(self):
input=".env"
expected_output="--env-file=.env"
output=env_file.EnvFile().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_with_list(self):
input=["./common.env", "./apps/web.env"]
expected_output="--env-file=./common.env --env-file=./apps/web.env"
output=env_file.EnvFile().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=env_file.EnvFile().translate(input)
self.assertEqual(output, expected_output)
class TestEnvironment(unittest.TestCase):
def test_coversion_with_dict(self):
input={
"RACK_ENV": "development",
"SESSION_SECRET": "",
}
expected_output="--env=RACK_ENV:development --env=SESSION_SECRET:"
output=environment.Environment().translate(input)
self.assertTrue(output, expected_output)
def test_coversion_with_list(self):
input=["RACK_ENV=development", "SESSION_SECRET"]
expected_output="--env=RACK_ENV:development --env=SESSION_SECRET:"
output=environment.Environment().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=environment.Environment().translate(input)
self.assertEqual(output, expected_output)
class TestExpose(unittest.TestCase):
def test_coversion(self):
input=["3000", "8000"]
expected_output="--expose=3000 --expose=8000"
output=expose.Expose().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=expose.Expose().translate(input)
self.assertEqual(output, expected_output)
class TestHostname(unittest.TestCase):
def test_coversion(self):
input="foo"
expected_output="--hostname=foo"
output=hostname.Hostname().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=hostname.Hostname().translate(input)
self.assertEqual(output, expected_output)
class TestLabels(unittest.TestCase):
def test_coversion_with_dict(self):
input={
"com.example.description": "Accounting webapp",
"com.example.department": "Finance",
"com.example.label-with-empty-value": "",
}
expected_output="--label=com.example.description:Accounting webapp " \
"--label=com.example.department:Finance --label=com.example.label-with-empty-value:"
output=labels.Labels().translate(input)
self.assertTrue(output, expected_output)
def test_coversion_with_list(self):
input=[
"com.example.description=Accounting webapp",
"com.example.department=Finance",
"com.example.label-with-empty-value",
]
expected_output="--label=com.example.description:Accounting webapp " \
"--label=com.example.department:Finance --label=com.example.label-with-empty-value:"
output=labels.Labels().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=labels.Labels().translate(input)
self.assertEqual(output, expected_output)
class TestLinks(unittest.TestCase):
def test_coversion(self):
input=["db", "db:database", "redis"]
expected_output="--link=db --link=db:database --link=redis"
output=links.Links().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=links.Links().translate(input)
self.assertEqual(output, expected_output)
class TestLogDriver(unittest.TestCase):
def test_coversion(self):
input="json-file"
expected_output="--log-driver=json-file"
output=log_driver.LogDriver().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=log_driver.LogDriver().translate(input)
self.assertEqual(output, expected_output)
class TestMacAddress(unittest.TestCase):
def test_coversion(self):
input="02:42:ac:11:65:43"
expected_output="--mac-address=02:42:ac:11:65:43"
output=mac_address.MacAddress().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=mac_address.MacAddress().translate(input)
self.assertEqual(output, expected_output)
class TestMemLimit(unittest.TestCase):
def test_coversion(self):
input="1000000000"
expected_output="--memory=1000000000"
output=mem_limit.MemLimit().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=mem_limit.MemLimit().translate(input)
self.assertEqual(output, expected_output)
class TestMemswapLimit(unittest.TestCase):
def test_coversion(self):
input="2000000000"
expected_output="--memory-swap=2000000000"
output=memswap_limit.MemswapLimit().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=memswap_limit.MemswapLimit().translate(input)
self.assertEqual(output, expected_output)
class TestNet(unittest.TestCase):
def test_coversion(self):
input="host"
expected_output="--net=host"
output=net.Net().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=net.Net().translate(input)
self.assertEqual(output, expected_output)
class TestPid(unittest.TestCase):
def test_coversion(self):
input="host"
expected_output="--pid=host"
output=pid.Pid().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=pid.Pid().translate(input)
self.assertEqual(output, expected_output)
class TestPorts(unittest.TestCase):
def test_coversion(self):
input=[
"3000",
"8000:8000",
"49100:22",
"127.0.0.1:8001:8001",
]
expected_output="--publish=3000 --publish=8000:8000 --publish=49100:22 " \
"--publish=127.0.0.1:8001:8001"
output=ports.Ports().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=ports.Ports().translate(input)
self.assertEqual(output, expected_output)
class TestPrivileged(unittest.TestCase):
def test_coversion(self):
input="true"
expected_output="--privileged=true"
output=privileged.Privileged().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=privileged.Privileged().translate(input)
self.assertEqual(output, expected_output)
class TestReadOnly(unittest.TestCase):
def test_coversion(self):
input="true"
expected_output="--read-only=true"
output=read_only.ReadOnly().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=read_only.ReadOnly().translate(input)
self.assertEqual(output, expected_output)
class TestRestart(unittest.TestCase):
def test_coversion(self):
input="always"
expected_output="--restart=always"
output=restart.Restart().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=restart.Restart().translate(input)
self.assertEqual(output, expected_output)
class TestStdinOpen(unittest.TestCase):
def test_coversion(self):
input="true"
expected_output="--interactive=true"
output=stdin_open.StdinOpen().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=stdin_open.StdinOpen().translate(input)
self.assertEqual(output, expected_output)
class TestTty(unittest.TestCase):
def test_coversion(self):
input="true"
expected_output="--tty=true"
output=tty.Tty().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=tty.Tty().translate(input)
self.assertEqual(output, expected_output)
class TestUser(unittest.TestCase):
def test_coversion(self):
input="postgresql:datastore"
expected_output="--user=postgresql:datastore"
output=user.User().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=user.User().translate(input)
self.assertEqual(output, expected_output)
class TestVolumeDriver(unittest.TestCase):
def test_coversion(self):
input="mydriver"
expected_output="--volume-driver=mydriver"
output=volume_driver.VolumeDriver().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=volume_driver.VolumeDriver().translate(input)
self.assertEqual(output, expected_output)
class TestVolumes(unittest.TestCase):
def test_coversion(self):
input=[
"/var/lib/mysql",
"./cache:/tmp/cache",
"~/configs:/etc/configs/:ro",
]
expected_output="--volume=/var/lib/mysql --volume=./cache:/tmp/cache " \
"--volume=~/configs:/etc/configs/:ro"
output=volumes.Volumes().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=volumes.Volumes().translate(input)
self.assertEqual(output, expected_output)
class TestVolumesFrom(unittest.TestCase):
def test_coversion(self):
input=["service_name", "container_name"]
expected_output="--volumes-from=service_name --volumes-from=container_name"
output=volumes_from.VolumesFrom().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=volumes_from.VolumesFrom().translate(input)
self.assertEqual(output, expected_output)
class TestWorkingDir(unittest.TestCase):
def test_coversion(self):
input="/code"
expected_output="--workdir=/code"
output=working_dir.WorkingDir().translate(input)
self.assertEqual(output, expected_output)
def test_coversion_fail(self):
input=""
expected_output=""
output=working_dir.WorkingDir().translate(input)
self.assertEqual(output, expected_output)
| none | 1 | 2.799461 | 3 | |
main.py | Stylix58/flame | 3 | 6621294 | <reponame>Stylix58/flame<gh_stars>1-10
#!/usr/bin/env python3
import fire
import subprocess
import ini
import os.path
from os import remove as removefile
__composer_not_installed__ = "Composer can't start because he is not installed! Install it at https://getcomposer.org/doc/00-intro.md#globally!"
__install_loc_not_set__ = "Flarum installation location is not set! Please set it first using flame locate LOCATION!"
__config_file_name__ = "flameconf.ini"
if not os.path.exists(__config_file_name__):
open(__config_file_name__, "a").close()
conf_e = ini.parse(open(__config_file_name__, "r").read())
def confsave():
removefile(__config_file_name__)
with open(__config_file_name__, "w") as f:
f.write(ini.stringify(conf_e))
f.close()
def err(e):
return "ERROR: " + e
def check_env():
try:
t = conf_e["install_loc"]
except:
print(err(__install_loc_not_set__))
os._exit(1)
def install(ext):
check_env()
subprocess.run("cd " + conf_e["install_loc"] + " | composer require -q -n " + ext, shell=True,
check=True)
def uninstall(ext):
check_env()
subprocess.run("cd " + conf_e["install_loc"] + " | composer remove -q -n " + ext, shell=True,
check=True)
class Flame(object):
"""
Flame V1\n
A extension installer for Flarum.
"""
def install(self, extension):
"""Installs a extension."""
print("Installing the extension " + extension + "...")
try:
install(extension)
except subprocess.CalledProcessError as e:
if e.returncode == 127:
return err(__composer_not_installed__)
os._exit(1)
else:
return "Extension " + extension + " have been correctly installed!"
def uninstall(self, extension):
"""Uninstalls a extension."""
print("Uninstalling the extension " + extension + "...")
try:
uninstall(extension)
except subprocess.CalledProcessError as e:
if e.returncode == 127:
return err(__composer_not_installed__)
os._exit(1)
else:
return "Extension " + extension + " have been correctly uninstalled!"
def locate(self, location):
"""Changes the configuration for the location of Flarum installation."""
try:
conf_e["install_loc"] = location
confsave()
except:
return err(__install_loc_not_set__)
os._exit(1)
else:
return "I have correctly changed the install location!"
if __name__ == '__main__':
fire.Fire(Flame)
| #!/usr/bin/env python3
import fire
import subprocess
import ini
import os.path
from os import remove as removefile
__composer_not_installed__ = "Composer can't start because he is not installed! Install it at https://getcomposer.org/doc/00-intro.md#globally!"
__install_loc_not_set__ = "Flarum installation location is not set! Please set it first using flame locate LOCATION!"
__config_file_name__ = "flameconf.ini"
if not os.path.exists(__config_file_name__):
open(__config_file_name__, "a").close()
conf_e = ini.parse(open(__config_file_name__, "r").read())
def confsave():
removefile(__config_file_name__)
with open(__config_file_name__, "w") as f:
f.write(ini.stringify(conf_e))
f.close()
def err(e):
return "ERROR: " + e
def check_env():
try:
t = conf_e["install_loc"]
except:
print(err(__install_loc_not_set__))
os._exit(1)
def install(ext):
check_env()
subprocess.run("cd " + conf_e["install_loc"] + " | composer require -q -n " + ext, shell=True,
check=True)
def uninstall(ext):
check_env()
subprocess.run("cd " + conf_e["install_loc"] + " | composer remove -q -n " + ext, shell=True,
check=True)
class Flame(object):
"""
Flame V1\n
A extension installer for Flarum.
"""
def install(self, extension):
"""Installs a extension."""
print("Installing the extension " + extension + "...")
try:
install(extension)
except subprocess.CalledProcessError as e:
if e.returncode == 127:
return err(__composer_not_installed__)
os._exit(1)
else:
return "Extension " + extension + " have been correctly installed!"
def uninstall(self, extension):
"""Uninstalls a extension."""
print("Uninstalling the extension " + extension + "...")
try:
uninstall(extension)
except subprocess.CalledProcessError as e:
if e.returncode == 127:
return err(__composer_not_installed__)
os._exit(1)
else:
return "Extension " + extension + " have been correctly uninstalled!"
def locate(self, location):
"""Changes the configuration for the location of Flarum installation."""
try:
conf_e["install_loc"] = location
confsave()
except:
return err(__install_loc_not_set__)
os._exit(1)
else:
return "I have correctly changed the install location!"
if __name__ == '__main__':
fire.Fire(Flame) | en | 0.59554 | #!/usr/bin/env python3 #globally!" Flame V1\n A extension installer for Flarum. Installs a extension. Uninstalls a extension. Changes the configuration for the location of Flarum installation. | 2.384089 | 2 |
arxiv/canonical/register/exceptions.py | arXiv/arxiv-canonical | 5 | 6621295 | class ConsistencyError(Exception):
"""Operation was attempted that would violate consistency of the record."""
class NoSuchResource(Exception):
"""Operation was attempted on a non-existant resource.""" | class ConsistencyError(Exception):
"""Operation was attempted that would violate consistency of the record."""
class NoSuchResource(Exception):
"""Operation was attempted on a non-existant resource.""" | en | 0.9937 | Operation was attempted that would violate consistency of the record. Operation was attempted on a non-existant resource. | 2.244683 | 2 |
theory/13th_sprint/F.py | abi83/YaPractice | 3 | 6621296 | <reponame>abi83/YaPractice
"""
Нужно реализовать класс StackMax, который поддерживает операцию определения
максимума среди всех элементов в стеке. Класс должен поддерживать операции
push, pop и get_max.
"""
class StackMax:
def __init__(self):
self.data = []
def __str__(self):
return str(self.data)
def push(self, x):
self.data.append(int(x))
def pop(self):
try:
self.data.pop()
except IndexError:
print('error')
def get_max(self):
if self.data:
print(max(self.data))
else:
print('None')
if __name__ == '__main__':
s = StackMax()
with open('input.txt') as file:
n = int(file.readline())
for i in range(n):
line = file.readline().strip()
try:
command, parameter = line.split()
except ValueError:
command = line
parameter = None
if parameter:
getattr(s, command)(parameter)
else:
getattr(s, command)()
| """
Нужно реализовать класс StackMax, который поддерживает операцию определения
максимума среди всех элементов в стеке. Класс должен поддерживать операции
push, pop и get_max.
"""
class StackMax:
def __init__(self):
self.data = []
def __str__(self):
return str(self.data)
def push(self, x):
self.data.append(int(x))
def pop(self):
try:
self.data.pop()
except IndexError:
print('error')
def get_max(self):
if self.data:
print(max(self.data))
else:
print('None')
if __name__ == '__main__':
s = StackMax()
with open('input.txt') as file:
n = int(file.readline())
for i in range(n):
line = file.readline().strip()
try:
command, parameter = line.split()
except ValueError:
command = line
parameter = None
if parameter:
getattr(s, command)(parameter)
else:
getattr(s, command)() | ru | 0.99632 | Нужно реализовать класс StackMax, который поддерживает операцию определения максимума среди всех элементов в стеке. Класс должен поддерживать операции push, pop и get_max. | 3.839402 | 4 |
improver_tests/wxcode/wxcode/__init__.py | bmatilla/improver | 0 | 6621297 | <filename>improver_tests/wxcode/wxcode/__init__.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Utilities for Unit tests for Weather Symbols"""
from typing import Any, Dict
def prob_above_name(diagnostic: str) -> str:
"""Inline function to construct probability cube name"""
return f"probability_of_{diagnostic}_above_threshold"
LIGHTNING_VICINITY_PROB = prob_above_name(
"number_of_lightning_flashes_per_unit_area_in_vicinity"
)
CLOUD_NAME = "low_and_medium_type_cloud_area_fraction"
CLOUD_PROB_ABOVE = prob_above_name(CLOUD_NAME)
LOW_CLOUD_PROB_ABOVE = prob_above_name("low_type_cloud_area_fraction")
TEXTURE_PROB_ABOVE = prob_above_name(f"texture_of_{CLOUD_NAME}")
CONVECTION_PROB_ABOVE = prob_above_name("convective_ratio")
PRECIP_PROB_ABOVE = prob_above_name("lwe_precipitation_rate")
PRECIP_VICINITY_PROB_ABOVE = prob_above_name("lwe_precipitation_rate_in_vicinity")
RAIN_PROB_ABOVE = prob_above_name("rainfall_rate")
SLEET_PROB_ABOVE = prob_above_name("lwe_sleetfall_rate")
SNOW_PROB_ABOVE = prob_above_name("lwe_snowfall_rate")
VIS_PROB_BELOW = "probability_of_visibility_in_air_below_threshold"
def wxcode_decision_tree_uk() -> Dict[str, Dict[str, Any]]:
"""
Define an example UK decision tree to test the weather symbols code.
Returns:
A dictionary containing the queries that comprise the decision
tree.
"""
queries = {
"lightning": {
"succeed": "lightning_cloud",
"fail": "heavy_precipitation",
"diagnostic_missing_action": "fail",
"probability_thresholds": [0.3],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [LIGHTNING_VICINITY_PROB],
"diagnostic_thresholds": [[0.0, "m-2"]],
"diagnostic_conditions": ["above"],
},
"lightning_cloud": {
"succeed": 29,
"fail": 30,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [TEXTURE_PROB_ABOVE],
"diagnostic_thresholds": [[0.05, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_precipitation": {
"succeed": "heavy_precipitation_cloud",
"fail": "precipitation_in_vicinity",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"heavy_precipitation_cloud": {
"succeed": "heavy_snow_shower",
"fail": "heavy_snow_continuous",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [TEXTURE_PROB_ABOVE],
"diagnostic_thresholds": [[0.05, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_snow_shower": {
"succeed": 26,
"fail": "heavy_rain_or_sleet_shower",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_rain_or_sleet_shower": {
"succeed": 14,
"fail": 17,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_snow_continuous": {
"succeed": 27,
"fail": "heavy_rain_or_sleet_continuous",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_rain_or_sleet_continuous": {
"succeed": 15,
"fail": 18,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"precipitation_in_vicinity": {
"succeed": "snow_in_vicinity",
"fail": "drizzle_mist",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_VICINITY_PROB_ABOVE],
"diagnostic_thresholds": [[0.1, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"snow_in_vicinity": {
"succeed": "snow_in_vicinity_cloud",
"fail": "rain_or_sleet_in_vicinity",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.03, "mm hr-1"], [0.03, "mm hr-1"], [0.03, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"snow_in_vicinity_cloud": {
"succeed": "heavy_snow_shower_in_vicinity",
"fail": "heavy_snow_continuous_in_vicinity",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [TEXTURE_PROB_ABOVE],
"diagnostic_thresholds": [[0.05, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_snow_shower_in_vicinity": {
"succeed": 26,
"fail": 23,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_VICINITY_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"heavy_snow_continuous_in_vicinity": {
"succeed": 27,
"fail": 24,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_VICINITY_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"rain_or_sleet_in_vicinity": {
"succeed": "rain_in_vicinity_cloud",
"fail": "sleet_in_vicinity_cloud",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.03, "mm hr-1"], [0.03, "mm hr-1"], [0.03, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"rain_in_vicinity_cloud": {
"succeed": "heavy_rain_shower_in_vicinity",
"fail": "heavy_rain_continuous_in_vicinity",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [TEXTURE_PROB_ABOVE],
"diagnostic_thresholds": [[0.05, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_rain_shower_in_vicinity": {
"succeed": 14,
"fail": 10,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_VICINITY_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"heavy_rain_continuous_in_vicinity": {
"succeed": 15,
"fail": 12,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_VICINITY_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"sleet_in_vicinity_cloud": {
"succeed": 17,
"fail": 18,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [TEXTURE_PROB_ABOVE],
"diagnostic_thresholds": [[0.05, 1]],
"diagnostic_conditions": ["above"],
},
"drizzle_mist": {
"succeed": "drizzle_is_rain",
"fail": "drizzle_cloud",
"probability_thresholds": [0.5, 0.5],
"threshold_condition": ">=",
"condition_combination": "AND",
"diagnostic_fields": [PRECIP_PROB_ABOVE, VIS_PROB_BELOW],
"diagnostic_thresholds": [[0.03, "mm hr-1"], [5000.0, "m"]],
"diagnostic_conditions": ["above", "below"],
},
"drizzle_cloud": {
"succeed": "drizzle_is_rain",
"fail": "mist_conditions",
"probability_thresholds": [0.5, 0.5],
"threshold_condition": ">=",
"condition_combination": "AND",
"diagnostic_fields": [PRECIP_PROB_ABOVE, LOW_CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.03, "mm hr-1"], [0.85, 1]],
"diagnostic_conditions": ["above", "above"],
},
"drizzle_is_rain": {
"succeed": 11,
"fail": "mist_conditions",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.03, "mm hr-1"], [0.03, "mm hr-1"], [0.03, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"mist_conditions": {
"succeed": "fog_conditions",
"fail": "no_precipitation_cloud",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [VIS_PROB_BELOW],
"diagnostic_thresholds": [[5000.0, "m"]],
"diagnostic_conditions": ["below"],
},
"fog_conditions": {
"succeed": 6,
"fail": 5,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [VIS_PROB_BELOW],
"diagnostic_thresholds": [[1000.0, "m"]],
"diagnostic_conditions": ["below"],
},
"no_precipitation_cloud": {
"succeed": "overcast_cloud",
"fail": "partly_cloudy",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.8125, 1]],
"diagnostic_conditions": ["above"],
},
"overcast_cloud": {
"succeed": 8,
"fail": 7,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [LOW_CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.85, 1]],
"diagnostic_conditions": ["above"],
},
"partly_cloudy": {
"succeed": 3,
"fail": 1,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.1875, 1]],
"diagnostic_conditions": ["above"],
},
}
return queries
def wxcode_decision_tree_global() -> Dict[str, Dict[str, Any]]:
"""
Define an example global decision tree to test the weather symbols code.
Returns:
A dictionary containing the queries that comprise the decision
tree.
"""
queries = {
"heavy_precipitation": {
"succeed": "heavy_precipitation_cloud",
"fail": "light_precipitation",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"heavy_precipitation_cloud": {
"succeed": "heavy_precipitation_convective_ratio",
"fail": "heavy_snow_shower",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.8125, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_precipitation_convective_ratio": {
"succeed": "heavy_snow_shower",
"fail": "heavy_snow_continuous",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CONVECTION_PROB_ABOVE],
"diagnostic_thresholds": [[0.8, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_snow_shower": {
"succeed": 26,
"fail": "heavy_rain_or_sleet_shower",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_rain_or_sleet_shower": {
"succeed": 14,
"fail": 17,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_snow_continuous": {
"succeed": 27,
"fail": "heavy_rain_or_sleet_continuous",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_rain_or_sleet_continuous": {
"succeed": 15,
"fail": 18,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"light_precipitation": {
"succeed": "light_precipitation_cloud",
"fail": "drizzle_mist",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_PROB_ABOVE],
"diagnostic_thresholds": [[0.1, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"light_precipitation_cloud": {
"succeed": "light_precipitation_convective_ratio",
"fail": "light_snow_shower",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.8125, 1]],
"diagnostic_conditions": ["above"],
},
"light_precipitation_convective_ratio": {
"succeed": "light_snow_shower",
"fail": "light_snow_continuous",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CONVECTION_PROB_ABOVE],
"diagnostic_thresholds": [[0.8, 1]],
"diagnostic_conditions": ["above"],
},
"light_snow_shower": {
"succeed": 23,
"fail": "light_rain_or_sleet_shower",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.1, "mm hr-1"], [0.1, "mm hr-1"], [0.1, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"light_rain_or_sleet_shower": {
"succeed": 10,
"fail": 17,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.1, "mm hr-1"], [0.1, "mm hr-1"], [0.1, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"light_snow_continuous": {
"succeed": 24,
"fail": "light_rain_or_sleet_continuous",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.1, "mm hr-1"], [0.1, "mm hr-1"], [0.1, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"light_rain_or_sleet_continuous": {
"succeed": 12,
"fail": 18,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.1, "mm hr-1"], [0.1, "mm hr-1"], [0.1, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"drizzle_mist": {
"succeed": "drizzle_is_rain",
"fail": "drizzle_cloud",
"probability_thresholds": [0.5, 0.5],
"threshold_condition": ">=",
"condition_combination": "AND",
"diagnostic_fields": [PRECIP_PROB_ABOVE, VIS_PROB_BELOW],
"diagnostic_thresholds": [[0.03, "mm hr-1"], [5000.0, "m"]],
"diagnostic_conditions": ["above", "below"],
},
"drizzle_cloud": {
"succeed": "drizzle_is_rain",
"fail": "mist_conditions",
"probability_thresholds": [0.5, 0.5],
"threshold_condition": ">=",
"condition_combination": "AND",
"diagnostic_fields": [PRECIP_PROB_ABOVE, LOW_CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.03, "mm hr-1"], [0.85, 1]],
"diagnostic_conditions": ["above", "above"],
},
"drizzle_is_rain": {
"succeed": 11,
"fail": "mist_conditions",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.03, "mm hr-1"], [0.03, "mm hr-1"], [0.03, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"mist_conditions": {
"succeed": "fog_conditions",
"fail": "no_precipitation_cloud",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [VIS_PROB_BELOW],
"diagnostic_thresholds": [[5000.0, "m"]],
"diagnostic_conditions": ["below"],
},
"fog_conditions": {
"succeed": 6,
"fail": 5,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [VIS_PROB_BELOW],
"diagnostic_thresholds": [[1000.0, "m"]],
"diagnostic_conditions": ["below"],
},
"no_precipitation_cloud": {
"succeed": "overcast_cloud",
"fail": "partly_cloudy",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.8125, 1]],
"diagnostic_conditions": ["above"],
},
"overcast_cloud": {
"succeed": 8,
"fail": 7,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [LOW_CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.85, 1]],
"diagnostic_conditions": ["above"],
},
"partly_cloudy": {
"succeed": 3,
"fail": 1,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.1875, 1]],
"diagnostic_conditions": ["above"],
},
}
return queries
| <filename>improver_tests/wxcode/wxcode/__init__.py
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Utilities for Unit tests for Weather Symbols"""
from typing import Any, Dict
def prob_above_name(diagnostic: str) -> str:
"""Inline function to construct probability cube name"""
return f"probability_of_{diagnostic}_above_threshold"
LIGHTNING_VICINITY_PROB = prob_above_name(
"number_of_lightning_flashes_per_unit_area_in_vicinity"
)
CLOUD_NAME = "low_and_medium_type_cloud_area_fraction"
CLOUD_PROB_ABOVE = prob_above_name(CLOUD_NAME)
LOW_CLOUD_PROB_ABOVE = prob_above_name("low_type_cloud_area_fraction")
TEXTURE_PROB_ABOVE = prob_above_name(f"texture_of_{CLOUD_NAME}")
CONVECTION_PROB_ABOVE = prob_above_name("convective_ratio")
PRECIP_PROB_ABOVE = prob_above_name("lwe_precipitation_rate")
PRECIP_VICINITY_PROB_ABOVE = prob_above_name("lwe_precipitation_rate_in_vicinity")
RAIN_PROB_ABOVE = prob_above_name("rainfall_rate")
SLEET_PROB_ABOVE = prob_above_name("lwe_sleetfall_rate")
SNOW_PROB_ABOVE = prob_above_name("lwe_snowfall_rate")
VIS_PROB_BELOW = "probability_of_visibility_in_air_below_threshold"
def wxcode_decision_tree_uk() -> Dict[str, Dict[str, Any]]:
"""
Define an example UK decision tree to test the weather symbols code.
Returns:
A dictionary containing the queries that comprise the decision
tree.
"""
queries = {
"lightning": {
"succeed": "lightning_cloud",
"fail": "heavy_precipitation",
"diagnostic_missing_action": "fail",
"probability_thresholds": [0.3],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [LIGHTNING_VICINITY_PROB],
"diagnostic_thresholds": [[0.0, "m-2"]],
"diagnostic_conditions": ["above"],
},
"lightning_cloud": {
"succeed": 29,
"fail": 30,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [TEXTURE_PROB_ABOVE],
"diagnostic_thresholds": [[0.05, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_precipitation": {
"succeed": "heavy_precipitation_cloud",
"fail": "precipitation_in_vicinity",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"heavy_precipitation_cloud": {
"succeed": "heavy_snow_shower",
"fail": "heavy_snow_continuous",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [TEXTURE_PROB_ABOVE],
"diagnostic_thresholds": [[0.05, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_snow_shower": {
"succeed": 26,
"fail": "heavy_rain_or_sleet_shower",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_rain_or_sleet_shower": {
"succeed": 14,
"fail": 17,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_snow_continuous": {
"succeed": 27,
"fail": "heavy_rain_or_sleet_continuous",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_rain_or_sleet_continuous": {
"succeed": 15,
"fail": 18,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"precipitation_in_vicinity": {
"succeed": "snow_in_vicinity",
"fail": "drizzle_mist",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_VICINITY_PROB_ABOVE],
"diagnostic_thresholds": [[0.1, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"snow_in_vicinity": {
"succeed": "snow_in_vicinity_cloud",
"fail": "rain_or_sleet_in_vicinity",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.03, "mm hr-1"], [0.03, "mm hr-1"], [0.03, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"snow_in_vicinity_cloud": {
"succeed": "heavy_snow_shower_in_vicinity",
"fail": "heavy_snow_continuous_in_vicinity",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [TEXTURE_PROB_ABOVE],
"diagnostic_thresholds": [[0.05, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_snow_shower_in_vicinity": {
"succeed": 26,
"fail": 23,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_VICINITY_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"heavy_snow_continuous_in_vicinity": {
"succeed": 27,
"fail": 24,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_VICINITY_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"rain_or_sleet_in_vicinity": {
"succeed": "rain_in_vicinity_cloud",
"fail": "sleet_in_vicinity_cloud",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.03, "mm hr-1"], [0.03, "mm hr-1"], [0.03, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"rain_in_vicinity_cloud": {
"succeed": "heavy_rain_shower_in_vicinity",
"fail": "heavy_rain_continuous_in_vicinity",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [TEXTURE_PROB_ABOVE],
"diagnostic_thresholds": [[0.05, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_rain_shower_in_vicinity": {
"succeed": 14,
"fail": 10,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_VICINITY_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"heavy_rain_continuous_in_vicinity": {
"succeed": 15,
"fail": 12,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_VICINITY_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"sleet_in_vicinity_cloud": {
"succeed": 17,
"fail": 18,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [TEXTURE_PROB_ABOVE],
"diagnostic_thresholds": [[0.05, 1]],
"diagnostic_conditions": ["above"],
},
"drizzle_mist": {
"succeed": "drizzle_is_rain",
"fail": "drizzle_cloud",
"probability_thresholds": [0.5, 0.5],
"threshold_condition": ">=",
"condition_combination": "AND",
"diagnostic_fields": [PRECIP_PROB_ABOVE, VIS_PROB_BELOW],
"diagnostic_thresholds": [[0.03, "mm hr-1"], [5000.0, "m"]],
"diagnostic_conditions": ["above", "below"],
},
"drizzle_cloud": {
"succeed": "drizzle_is_rain",
"fail": "mist_conditions",
"probability_thresholds": [0.5, 0.5],
"threshold_condition": ">=",
"condition_combination": "AND",
"diagnostic_fields": [PRECIP_PROB_ABOVE, LOW_CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.03, "mm hr-1"], [0.85, 1]],
"diagnostic_conditions": ["above", "above"],
},
"drizzle_is_rain": {
"succeed": 11,
"fail": "mist_conditions",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.03, "mm hr-1"], [0.03, "mm hr-1"], [0.03, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"mist_conditions": {
"succeed": "fog_conditions",
"fail": "no_precipitation_cloud",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [VIS_PROB_BELOW],
"diagnostic_thresholds": [[5000.0, "m"]],
"diagnostic_conditions": ["below"],
},
"fog_conditions": {
"succeed": 6,
"fail": 5,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [VIS_PROB_BELOW],
"diagnostic_thresholds": [[1000.0, "m"]],
"diagnostic_conditions": ["below"],
},
"no_precipitation_cloud": {
"succeed": "overcast_cloud",
"fail": "partly_cloudy",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.8125, 1]],
"diagnostic_conditions": ["above"],
},
"overcast_cloud": {
"succeed": 8,
"fail": 7,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [LOW_CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.85, 1]],
"diagnostic_conditions": ["above"],
},
"partly_cloudy": {
"succeed": 3,
"fail": 1,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.1875, 1]],
"diagnostic_conditions": ["above"],
},
}
return queries
def wxcode_decision_tree_global() -> Dict[str, Dict[str, Any]]:
"""
Define an example global decision tree to test the weather symbols code.
Returns:
A dictionary containing the queries that comprise the decision
tree.
"""
queries = {
"heavy_precipitation": {
"succeed": "heavy_precipitation_cloud",
"fail": "light_precipitation",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_PROB_ABOVE],
"diagnostic_thresholds": [[1.0, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"heavy_precipitation_cloud": {
"succeed": "heavy_precipitation_convective_ratio",
"fail": "heavy_snow_shower",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.8125, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_precipitation_convective_ratio": {
"succeed": "heavy_snow_shower",
"fail": "heavy_snow_continuous",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CONVECTION_PROB_ABOVE],
"diagnostic_thresholds": [[0.8, 1]],
"diagnostic_conditions": ["above"],
},
"heavy_snow_shower": {
"succeed": 26,
"fail": "heavy_rain_or_sleet_shower",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_rain_or_sleet_shower": {
"succeed": 14,
"fail": 17,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_snow_continuous": {
"succeed": 27,
"fail": "heavy_rain_or_sleet_continuous",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"heavy_rain_or_sleet_continuous": {
"succeed": 15,
"fail": 18,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[1.0, "mm hr-1"], [1.0, "mm hr-1"], [1.0, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"light_precipitation": {
"succeed": "light_precipitation_cloud",
"fail": "drizzle_mist",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [PRECIP_PROB_ABOVE],
"diagnostic_thresholds": [[0.1, "mm hr-1"]],
"diagnostic_conditions": ["above"],
},
"light_precipitation_cloud": {
"succeed": "light_precipitation_convective_ratio",
"fail": "light_snow_shower",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.8125, 1]],
"diagnostic_conditions": ["above"],
},
"light_precipitation_convective_ratio": {
"succeed": "light_snow_shower",
"fail": "light_snow_continuous",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CONVECTION_PROB_ABOVE],
"diagnostic_thresholds": [[0.8, 1]],
"diagnostic_conditions": ["above"],
},
"light_snow_shower": {
"succeed": 23,
"fail": "light_rain_or_sleet_shower",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.1, "mm hr-1"], [0.1, "mm hr-1"], [0.1, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"light_rain_or_sleet_shower": {
"succeed": 10,
"fail": 17,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.1, "mm hr-1"], [0.1, "mm hr-1"], [0.1, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"light_snow_continuous": {
"succeed": 24,
"fail": "light_rain_or_sleet_continuous",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", RAIN_PROB_ABOVE, "-", SNOW_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.1, "mm hr-1"], [0.1, "mm hr-1"], [0.1, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"light_rain_or_sleet_continuous": {
"succeed": 12,
"fail": 18,
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.1, "mm hr-1"], [0.1, "mm hr-1"], [0.1, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"drizzle_mist": {
"succeed": "drizzle_is_rain",
"fail": "drizzle_cloud",
"probability_thresholds": [0.5, 0.5],
"threshold_condition": ">=",
"condition_combination": "AND",
"diagnostic_fields": [PRECIP_PROB_ABOVE, VIS_PROB_BELOW],
"diagnostic_thresholds": [[0.03, "mm hr-1"], [5000.0, "m"]],
"diagnostic_conditions": ["above", "below"],
},
"drizzle_cloud": {
"succeed": "drizzle_is_rain",
"fail": "mist_conditions",
"probability_thresholds": [0.5, 0.5],
"threshold_condition": ">=",
"condition_combination": "AND",
"diagnostic_fields": [PRECIP_PROB_ABOVE, LOW_CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.03, "mm hr-1"], [0.85, 1]],
"diagnostic_conditions": ["above", "above"],
},
"drizzle_is_rain": {
"succeed": 11,
"fail": "mist_conditions",
"probability_thresholds": [0.0],
"threshold_condition": "<",
"condition_combination": "",
"diagnostic_fields": [
[SLEET_PROB_ABOVE, "+", SNOW_PROB_ABOVE, "-", RAIN_PROB_ABOVE]
],
"diagnostic_thresholds": [
[[0.03, "mm hr-1"], [0.03, "mm hr-1"], [0.03, "mm hr-1"]]
],
"diagnostic_conditions": [["above", "above", "above"]],
},
"mist_conditions": {
"succeed": "fog_conditions",
"fail": "no_precipitation_cloud",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [VIS_PROB_BELOW],
"diagnostic_thresholds": [[5000.0, "m"]],
"diagnostic_conditions": ["below"],
},
"fog_conditions": {
"succeed": 6,
"fail": 5,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [VIS_PROB_BELOW],
"diagnostic_thresholds": [[1000.0, "m"]],
"diagnostic_conditions": ["below"],
},
"no_precipitation_cloud": {
"succeed": "overcast_cloud",
"fail": "partly_cloudy",
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.8125, 1]],
"diagnostic_conditions": ["above"],
},
"overcast_cloud": {
"succeed": 8,
"fail": 7,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [LOW_CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.85, 1]],
"diagnostic_conditions": ["above"],
},
"partly_cloudy": {
"succeed": 3,
"fail": 1,
"probability_thresholds": [0.5],
"threshold_condition": ">=",
"condition_combination": "",
"diagnostic_fields": [CLOUD_PROB_ABOVE],
"diagnostic_thresholds": [[0.1875, 1]],
"diagnostic_conditions": ["above"],
},
}
return queries
| en | 0.71536 | # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2021 Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. Utilities for Unit tests for Weather Symbols Inline function to construct probability cube name Define an example UK decision tree to test the weather symbols code. Returns: A dictionary containing the queries that comprise the decision tree. Define an example global decision tree to test the weather symbols code. Returns: A dictionary containing the queries that comprise the decision tree. | 1.337289 | 1 |
tests/test_gaussian.py | JZK00/MONAI | 3 | 6621298 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from monai.networks.layers.convutils import gaussian_1d
class TestGaussian1d(unittest.TestCase):
def test_gaussian(self):
np.testing.assert_allclose(
gaussian_1d(0.5, 8),
torch.tensor(
[
0.0000e00,
2.9802e-07,
1.3496e-03,
1.5731e-01,
6.8269e-01,
1.5731e-01,
1.3496e-03,
2.9802e-07,
0.0000e00,
]
),
rtol=1e-4,
)
np.testing.assert_allclose(
gaussian_1d(1, 1),
torch.tensor([0.24173, 0.382925, 0.24173]),
rtol=1e-4,
)
def test_wrong_sigma(self):
with self.assertRaises(ValueError):
gaussian_1d(-1, 10)
with self.assertRaises(ValueError):
gaussian_1d(1, -10)
if __name__ == "__main__":
unittest.main()
| # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from monai.networks.layers.convutils import gaussian_1d
class TestGaussian1d(unittest.TestCase):
def test_gaussian(self):
np.testing.assert_allclose(
gaussian_1d(0.5, 8),
torch.tensor(
[
0.0000e00,
2.9802e-07,
1.3496e-03,
1.5731e-01,
6.8269e-01,
1.5731e-01,
1.3496e-03,
2.9802e-07,
0.0000e00,
]
),
rtol=1e-4,
)
np.testing.assert_allclose(
gaussian_1d(1, 1),
torch.tensor([0.24173, 0.382925, 0.24173]),
rtol=1e-4,
)
def test_wrong_sigma(self):
with self.assertRaises(ValueError):
gaussian_1d(-1, 10)
with self.assertRaises(ValueError):
gaussian_1d(1, -10)
if __name__ == "__main__":
unittest.main()
| en | 0.846037 | # Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.923691 | 2 |
anaadementia/assin/assin_sts.py | lbsantos/ANAA-Dementia | 0 | 6621299 | <filename>anaadementia/assin/assin_sts.py
# -*- coding: utf-8 -*-
from anaadementia.preprocessing.text_preprocessing import PreprocessingSTS, ExpadingSynonyms
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.utils.validation import check_is_fitted
from sklearn.base import TransformerMixin
import numpy as np
class AssinSTS(TransformerMixin):
def __init__(self, embeddings, synonyms, stemmer, delaf, tokenizer, stop_words=None):
self.preprocessing = PreprocessingSTS(tokenizer, stop_words)
self.add_syns = ExpadingSynonyms(synonyms, stemmer, delaf)
self.embeddings = embeddings
self._tfidf = TfidfVectorizer()
def _process(self, src_setences, trg_senteces):
src_preprocessed = self.preprocessing.transform(src_setences)
trg_preprocessed = self.preprocessing.transform(trg_senteces)
src_syns = [' '.join(src) for src in self.add_syns.transform(src_preprocessed)]
trg_syns = [' '.join(src) for src in self.add_syns.transform(trg_preprocessed)]
return src_preprocessed, trg_preprocessed, src_syns, trg_syns
def _cosine(self, src_preprocessed, trg_preprocessed, src_vec, trg_vec):
cos_distances = []
for _src_vec, _trg_vec, src_tokens, trg_tokens in zip(src_vec, trg_vec, src_preprocessed, trg_preprocessed):
e1 = [i if i in self.embeddings else 'unk' for i in src_tokens]
e2 = [i if i in self.embeddings else 'unk' for i in trg_tokens]
cos_distances.append(
[float(cosine_similarity(_src_vec, _trg_vec)),
self.embeddings.n_similarity(e1, e2)])
return cos_distances
def transform(self, src_trg_setences, y=None,**fit_params):
src_setences = src_trg_setences[:,0]
trg_senteces = src_trg_setences[:,1]
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
src_preprocessed, trg_preprocessed, src_syns, trg_syns = self._process(src_setences, trg_senteces)
vecs = self._tfidf.transform(src_syns + trg_syns)
src_vecs = vecs[0:vecs.shape[0]//2,:]
trg_vecs = vecs[vecs.shape[0]//2:,:]
return self._cosine(
src_preprocessed,
trg_preprocessed,
src_vecs,
trg_vecs)
def fit_transform(self, src_trg_setences, y=None, **fit_params):
src_setences = src_trg_setences[:,0]
trg_senteces = src_trg_setences[:,1]
src_preprocessed, trg_preprocessed, src_syns, trg_syns = self._process(src_setences, trg_senteces)
vecs = self._tfidf.fit_transform(src_syns + trg_syns)
src_vecs = vecs[0:vecs.shape[0]//2,:]
trg_vecs = vecs[vecs.shape[0]//2:,:]
return self._cosine(
src_preprocessed,
trg_preprocessed,
src_vecs,
trg_vecs)
def fit(self, src_trg_setences, y=None, **fit_params):
src_setences = src_trg_setences[:,0]
trg_senteces = src_trg_setences[:,1]
_, _, src_syns, trg_syns = self._process(src_setences, trg_senteces)
self._tfidf.fit(src_syns + trg_syns)
return self | <filename>anaadementia/assin/assin_sts.py
# -*- coding: utf-8 -*-
from anaadementia.preprocessing.text_preprocessing import PreprocessingSTS, ExpadingSynonyms
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.utils.validation import check_is_fitted
from sklearn.base import TransformerMixin
import numpy as np
class AssinSTS(TransformerMixin):
def __init__(self, embeddings, synonyms, stemmer, delaf, tokenizer, stop_words=None):
self.preprocessing = PreprocessingSTS(tokenizer, stop_words)
self.add_syns = ExpadingSynonyms(synonyms, stemmer, delaf)
self.embeddings = embeddings
self._tfidf = TfidfVectorizer()
def _process(self, src_setences, trg_senteces):
src_preprocessed = self.preprocessing.transform(src_setences)
trg_preprocessed = self.preprocessing.transform(trg_senteces)
src_syns = [' '.join(src) for src in self.add_syns.transform(src_preprocessed)]
trg_syns = [' '.join(src) for src in self.add_syns.transform(trg_preprocessed)]
return src_preprocessed, trg_preprocessed, src_syns, trg_syns
def _cosine(self, src_preprocessed, trg_preprocessed, src_vec, trg_vec):
cos_distances = []
for _src_vec, _trg_vec, src_tokens, trg_tokens in zip(src_vec, trg_vec, src_preprocessed, trg_preprocessed):
e1 = [i if i in self.embeddings else 'unk' for i in src_tokens]
e2 = [i if i in self.embeddings else 'unk' for i in trg_tokens]
cos_distances.append(
[float(cosine_similarity(_src_vec, _trg_vec)),
self.embeddings.n_similarity(e1, e2)])
return cos_distances
def transform(self, src_trg_setences, y=None,**fit_params):
src_setences = src_trg_setences[:,0]
trg_senteces = src_trg_setences[:,1]
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
src_preprocessed, trg_preprocessed, src_syns, trg_syns = self._process(src_setences, trg_senteces)
vecs = self._tfidf.transform(src_syns + trg_syns)
src_vecs = vecs[0:vecs.shape[0]//2,:]
trg_vecs = vecs[vecs.shape[0]//2:,:]
return self._cosine(
src_preprocessed,
trg_preprocessed,
src_vecs,
trg_vecs)
def fit_transform(self, src_trg_setences, y=None, **fit_params):
src_setences = src_trg_setences[:,0]
trg_senteces = src_trg_setences[:,1]
src_preprocessed, trg_preprocessed, src_syns, trg_syns = self._process(src_setences, trg_senteces)
vecs = self._tfidf.fit_transform(src_syns + trg_syns)
src_vecs = vecs[0:vecs.shape[0]//2,:]
trg_vecs = vecs[vecs.shape[0]//2:,:]
return self._cosine(
src_preprocessed,
trg_preprocessed,
src_vecs,
trg_vecs)
def fit(self, src_trg_setences, y=None, **fit_params):
src_setences = src_trg_setences[:,0]
trg_senteces = src_trg_setences[:,1]
_, _, src_syns, trg_syns = self._process(src_setences, trg_senteces)
self._tfidf.fit(src_syns + trg_syns)
return self | en | 0.769321 | # -*- coding: utf-8 -*- | 2.093765 | 2 |
decodesamsungbin.py | nlitsme/CelbEprDecode | 3 | 6621300 | """
Tool for decoding the Cellebrite UFED bootloaders from ufedsamsungpack_v21.epr
It will write the decoded binary in a filename with suffix '.decoded'
Author: <NAME> <<EMAIL>>
"""
from __future__ import division, print_function
import struct
def processcelbdata(data):
vectors = struct.unpack("<3L4s4LL4sLL", data[:48])
if all( vectors[i]-vectors[i+1] == 1 for i in (1,4,5,6) ):
print("vectors OK")
if vectors[3] != b'CELB':
raise Exception("not CELB arm code")
if vectors[9] != b'CELB':
raise Exception("not CELB arm code")
encsize = vectors[8]
enckey = vectors[10]
if struct.unpack("<5L", data[0x110:0x124]) == (0xffffffff, 0x10000000, 0x00000000, 0x20000000, 0x08088405):
print("unpacker type1 ok")
elif struct.unpack("<5L", data[0x80:0x94]) == (0xffffffff, 0x10000000, 0x00000000, 0x20000000, 0x08088405):
print("unpacker type2 ok")
else:
print("unknown unpacker")
return encsize, enckey
def decode(enc, key, useR4):
dec = []
R7 = 0x8088405
R3 = R4 = R2 = key
dec = []
for R1 in enc:
R0 = R1 ^ R2
R3 = (R3 * R7 + 1)&0xFFFFFFFF
R0 ^= R3
if useR4:
R4 ^= (R4<<13)&0xFFFFFFFF
R4 ^= R4>>17
R4 ^= (R4<<5)&0xFFFFFFFF
R0 ^= R4
R2 = R1
dec.append(R0)
return dec
def processfile(fn):
with open(fn, "rb") as fh:
data = fh.read()
encsize, enckey = processcelbdata(data)
enc = struct.unpack("<%dL" % (encsize/4), data[-encsize:])
if len(data)-encsize == 0x2A8:
dec = decode(enc, enckey, True)
elif len(data)-encsize == 0x1BC:
dec = decode(enc, enckey, False)
else:
print("has unexpected encsize: %04x -> ofs = +%04x" %( encsize, len(data)-encsize))
decdata = struct.pack("<%dL" % (encsize/4), *dec)
with open(fn+".decoded", "wb") as fh:
fh.write(decdata)
def main():
import argparse
parser = argparse.ArgumentParser(description='decodebin')
parser.add_argument('--verbose', '-v', action='count')
parser.add_argument('FILES', type=str, nargs='+')
args = parser.parse_args()
for fn in args.FILES:
print("==>", fn, "<==")
try:
processfile(fn)
except Exception as e:
print("ERROR", e)
if __name__ == '__main__':
main()
| """
Tool for decoding the Cellebrite UFED bootloaders from ufedsamsungpack_v21.epr
It will write the decoded binary in a filename with suffix '.decoded'
Author: <NAME> <<EMAIL>>
"""
from __future__ import division, print_function
import struct
def processcelbdata(data):
vectors = struct.unpack("<3L4s4LL4sLL", data[:48])
if all( vectors[i]-vectors[i+1] == 1 for i in (1,4,5,6) ):
print("vectors OK")
if vectors[3] != b'CELB':
raise Exception("not CELB arm code")
if vectors[9] != b'CELB':
raise Exception("not CELB arm code")
encsize = vectors[8]
enckey = vectors[10]
if struct.unpack("<5L", data[0x110:0x124]) == (0xffffffff, 0x10000000, 0x00000000, 0x20000000, 0x08088405):
print("unpacker type1 ok")
elif struct.unpack("<5L", data[0x80:0x94]) == (0xffffffff, 0x10000000, 0x00000000, 0x20000000, 0x08088405):
print("unpacker type2 ok")
else:
print("unknown unpacker")
return encsize, enckey
def decode(enc, key, useR4):
dec = []
R7 = 0x8088405
R3 = R4 = R2 = key
dec = []
for R1 in enc:
R0 = R1 ^ R2
R3 = (R3 * R7 + 1)&0xFFFFFFFF
R0 ^= R3
if useR4:
R4 ^= (R4<<13)&0xFFFFFFFF
R4 ^= R4>>17
R4 ^= (R4<<5)&0xFFFFFFFF
R0 ^= R4
R2 = R1
dec.append(R0)
return dec
def processfile(fn):
with open(fn, "rb") as fh:
data = fh.read()
encsize, enckey = processcelbdata(data)
enc = struct.unpack("<%dL" % (encsize/4), data[-encsize:])
if len(data)-encsize == 0x2A8:
dec = decode(enc, enckey, True)
elif len(data)-encsize == 0x1BC:
dec = decode(enc, enckey, False)
else:
print("has unexpected encsize: %04x -> ofs = +%04x" %( encsize, len(data)-encsize))
decdata = struct.pack("<%dL" % (encsize/4), *dec)
with open(fn+".decoded", "wb") as fh:
fh.write(decdata)
def main():
import argparse
parser = argparse.ArgumentParser(description='decodebin')
parser.add_argument('--verbose', '-v', action='count')
parser.add_argument('FILES', type=str, nargs='+')
args = parser.parse_args()
for fn in args.FILES:
print("==>", fn, "<==")
try:
processfile(fn)
except Exception as e:
print("ERROR", e)
if __name__ == '__main__':
main()
| en | 0.658287 | Tool for decoding the Cellebrite UFED bootloaders from ufedsamsungpack_v21.epr It will write the decoded binary in a filename with suffix '.decoded' Author: <NAME> <<EMAIL>> | 2.841823 | 3 |
src/mk_inc.py | stanzabird/gil | 0 | 6621301 | #!/usr/bin/python3
# read input data file
with open('mk_inc.txt') as f:
content = f.readlines()
content = [x.strip() for x in content]
# check input file, build input data structure
data = []
typeno = 0
for line in content:
if not line or line[0] == '#':
continue
fields = line.split(':')
# cast the fields to the right data type
fields[0] = fields[0].strip()
fields[1] = int(fields[1].strip())
# simple tesing of data consistency
if fields[1] != typeno:
print(f'error in input at type #{fields[1]} (should be {typeno}')
exit()
data.append(fields)
typeno += 1
del content
del typeno
# generate monster type for monster.h
f = open('monster_types.inc','w')
for t in data:
f.write(f' boost::gil::{t[0]}_image_t')
if t[1] < len(data)-1:
f.write(',')
f.write(f' // {t[1]}\n')
f.close()
i = 10
s = f'''This is
a long multiline f-string where I can
put souce code blocks into. the number i is {i}'''
#print(s)
| #!/usr/bin/python3
# read input data file
with open('mk_inc.txt') as f:
content = f.readlines()
content = [x.strip() for x in content]
# check input file, build input data structure
data = []
typeno = 0
for line in content:
if not line or line[0] == '#':
continue
fields = line.split(':')
# cast the fields to the right data type
fields[0] = fields[0].strip()
fields[1] = int(fields[1].strip())
# simple tesing of data consistency
if fields[1] != typeno:
print(f'error in input at type #{fields[1]} (should be {typeno}')
exit()
data.append(fields)
typeno += 1
del content
del typeno
# generate monster type for monster.h
f = open('monster_types.inc','w')
for t in data:
f.write(f' boost::gil::{t[0]}_image_t')
if t[1] < len(data)-1:
f.write(',')
f.write(f' // {t[1]}\n')
f.close()
i = 10
s = f'''This is
a long multiline f-string where I can
put souce code blocks into. the number i is {i}'''
#print(s)
| en | 0.557753 | #!/usr/bin/python3 # read input data file # check input file, build input data structure # cast the fields to the right data type # simple tesing of data consistency #{fields[1]} (should be {typeno}') # generate monster type for monster.h This is a long multiline f-string where I can put souce code blocks into. the number i is {i} #print(s) | 2.959438 | 3 |
fullprocess.py | jmcabreira/Dynamic-risk-assessment-system | 1 | 6621302 |
import training
import scoring
import deployment
import diagnostics
import reporting
import ast
import json
import os
import glob
import sys
import subprocess
from scoring import score_model
##################Load config file and paths
with open('config.json','r') as f:
config = json.load(f)
input_folder_path = config['input_folder_path']
ingestedfiles_path = os.path.join(config['prod_deployment_path'], 'ingestedfiles.txt')
ingesteddata_path = os.path.join(config['output_folder_path'], 'finaldata.csv')
lastestscore_path = os.path.join(config['prod_deployment_path'], 'latestscore.txt')
model_path = os.path.join(config['prod_deployment_path'], 'trainedmodel.pkl')
##################Check and read new data
#first, read ingestedfiles.txt
with open(ingestedfiles_path,'r+') as f:
ingested_files = ast.literal_eval(f.read())
# print(ingested_files)
#second, determine whether the source data folder has files that aren't listed in ingestedfiles.txt
filenames = glob.glob(input_folder_path + "/*.csv")
new_files = []
print(filenames)
for file in filenames:
print(f"{file} in {input_folder_path}")
if os.path.basename(file) not in ingested_files:
new_files.append(file)
else:
pass
##################Deciding whether to proceed, part 1
#if you found new data, you should proceed. otherwise, do end the process here
if len(new_files) > 0:
subprocess.run(['python3', 'ingestion.py'])
else:
sys.exit()
##################Checking for model drift
#check whether the score from the deployed model is different from the score from the model that uses the newest ingested data
with open(lastestscore_path, 'r') as f:
latest_score = float(f.read())
score = score_model(ingesteddata_path)
check_model_drift = score < latest_score
##################Deciding whether to proceed, part 2
#if model drift, proceed. otherwise, finish the process
if check_model_drift == False:
print(f'NO Model drift. Previous model F1 score was {latest_score}. New model score is {score}.')
sys.exit()
else:
##################Re-deployment
#if evidence for model drift, re-run the deployment.py script
# Retrain and redeploy model
print(f'Model drift has been detected.\n')
print(f"Previous model F1 score was {latest_score}. New model score is {score}.\n")
print("Training new model.")
# Retrain model with latest data
subprocess.run(['python3', 'training.py'])
# Score model on test data
subprocess.run(['python3', 'scoring.py'])
# Redeploy model
subprocess.run(['python3', 'deployment.py'])
# Generate report
subprocess.run(['python3', 'reporting.py'])
# Run diagnostics
subprocess.run(['python3', 'apicalls.py'])
|
import training
import scoring
import deployment
import diagnostics
import reporting
import ast
import json
import os
import glob
import sys
import subprocess
from scoring import score_model
##################Load config file and paths
with open('config.json','r') as f:
config = json.load(f)
input_folder_path = config['input_folder_path']
ingestedfiles_path = os.path.join(config['prod_deployment_path'], 'ingestedfiles.txt')
ingesteddata_path = os.path.join(config['output_folder_path'], 'finaldata.csv')
lastestscore_path = os.path.join(config['prod_deployment_path'], 'latestscore.txt')
model_path = os.path.join(config['prod_deployment_path'], 'trainedmodel.pkl')
##################Check and read new data
#first, read ingestedfiles.txt
with open(ingestedfiles_path,'r+') as f:
ingested_files = ast.literal_eval(f.read())
# print(ingested_files)
#second, determine whether the source data folder has files that aren't listed in ingestedfiles.txt
filenames = glob.glob(input_folder_path + "/*.csv")
new_files = []
print(filenames)
for file in filenames:
print(f"{file} in {input_folder_path}")
if os.path.basename(file) not in ingested_files:
new_files.append(file)
else:
pass
##################Deciding whether to proceed, part 1
#if you found new data, you should proceed. otherwise, do end the process here
if len(new_files) > 0:
subprocess.run(['python3', 'ingestion.py'])
else:
sys.exit()
##################Checking for model drift
#check whether the score from the deployed model is different from the score from the model that uses the newest ingested data
with open(lastestscore_path, 'r') as f:
latest_score = float(f.read())
score = score_model(ingesteddata_path)
check_model_drift = score < latest_score
##################Deciding whether to proceed, part 2
#if model drift, proceed. otherwise, finish the process
if check_model_drift == False:
print(f'NO Model drift. Previous model F1 score was {latest_score}. New model score is {score}.')
sys.exit()
else:
##################Re-deployment
#if evidence for model drift, re-run the deployment.py script
# Retrain and redeploy model
print(f'Model drift has been detected.\n')
print(f"Previous model F1 score was {latest_score}. New model score is {score}.\n")
print("Training new model.")
# Retrain model with latest data
subprocess.run(['python3', 'training.py'])
# Score model on test data
subprocess.run(['python3', 'scoring.py'])
# Redeploy model
subprocess.run(['python3', 'deployment.py'])
# Generate report
subprocess.run(['python3', 'reporting.py'])
# Run diagnostics
subprocess.run(['python3', 'apicalls.py'])
| en | 0.673291 | ##################Load config file and paths ##################Check and read new data #first, read ingestedfiles.txt # print(ingested_files) #second, determine whether the source data folder has files that aren't listed in ingestedfiles.txt ##################Deciding whether to proceed, part 1 #if you found new data, you should proceed. otherwise, do end the process here ##################Checking for model drift #check whether the score from the deployed model is different from the score from the model that uses the newest ingested data ##################Deciding whether to proceed, part 2 #if model drift, proceed. otherwise, finish the process ##################Re-deployment #if evidence for model drift, re-run the deployment.py script # Retrain and redeploy model # Retrain model with latest data # Score model on test data # Redeploy model # Generate report # Run diagnostics | 2.171342 | 2 |
src/mpass/mpass/migrations/0002_auto_20180329_1246.py | haltu/velmu-mpass-demo | 0 | 6621303 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-29 09:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import parler.models
class Migration(migrations.Migration):
dependencies = [
('mpass', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('service_id', models.CharField(max_length=128)),
('icon_url', models.CharField(blank=True, max_length=2048, null=True)),
('service_url', models.CharField(blank=True, max_length=2048, null=True)),
('sso_url', models.CharField(blank=True, max_length=2048, null=True)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='ServiceTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('description', models.CharField(max_length=2048)),
('title', models.CharField(max_length=2048)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='mpass.Service')),
],
options={
'managed': True,
'db_table': 'mpass_service_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'service Translation',
},
),
migrations.AlterUniqueTogether(
name='servicetranslation',
unique_together=set([('language_code', 'master')]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-29 09:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import parler.models
class Migration(migrations.Migration):
dependencies = [
('mpass', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('service_id', models.CharField(max_length=128)),
('icon_url', models.CharField(blank=True, max_length=2048, null=True)),
('service_url', models.CharField(blank=True, max_length=2048, null=True)),
('sso_url', models.CharField(blank=True, max_length=2048, null=True)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='ServiceTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('description', models.CharField(max_length=2048)),
('title', models.CharField(max_length=2048)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='mpass.Service')),
],
options={
'managed': True,
'db_table': 'mpass_service_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'service Translation',
},
),
migrations.AlterUniqueTogether(
name='servicetranslation',
unique_together=set([('language_code', 'master')]),
),
]
| en | 0.677603 | # -*- coding: utf-8 -*- # Generated by Django 1.11.10 on 2018-03-29 09:46 | 1.656102 | 2 |
ansible/utils/gcp/gcp.py | hyperledger-labs-archives/fabric-vms-provision | 1 | 6621304 | <reponame>hyperledger-labs-archives/fabric-vms-provision
'''generate gce role'''
import argparse
GCE_TEMPLATE_INTRO = '''---
- name: create multiple instances
gce:
instance_names: "{{ item.name }}"
tags: "{{ item.tag }}"
zone: "{{ zone }}"
machine_type: "{{ machine_type }}"
image: "{{ image }}"
state: present
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
with_items:'''
GCE_TEMPLATE_FINISH = ''' register: gce
- name: Wait for SSH for instances
wait_for:
delay: 1
host: "{{ item.instance_data[0].public_ip }}"
port: 22
state: started
timeout: 30
with_items: "{{ gce.results }}"
'''
def gce(args):
print(GCE_TEMPLATE_INTRO)
names(args)
print(GCE_TEMPLATE_FINISH)
def names(args):
template = ''' - {{ name: {name}, tag: '{tag}-{{{{ domain }}}}' }}'''
print(template.format(name='build', tag='build'))
org_count = len(args.peer_count)
for oid in range(0, org_count):
for pid in range(0, args.peer_count[oid]):
n = 'peer{}org{}'.format(pid, oid)
t = 'peer{}-org{}'.format(pid, oid)
print(template.format(name=n, tag=t))
o = 'orderer{}'.format(oid)
print(template.format(name=o, tag=o))
z = 'z{}'.format(oid)
print(template.format(name=z, tag=z))
k = 'k{}'.format(oid)
print(template.format(name=k, tag=k))
f = 'fabricca{}'.format(oid)
print(template.format(name=f, tag=f))
c = 'cli{}'.format(oid)
print(template.format(name=c, tag=c))
def main():
'''parse cmdline args and print role'''
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--peer_count',
nargs='+',
type=int,
help='number of peers per org')
args = parser.parse_args()
gce(args)
if __name__ == '__main__':
main()
| '''generate gce role'''
import argparse
GCE_TEMPLATE_INTRO = '''---
- name: create multiple instances
gce:
instance_names: "{{ item.name }}"
tags: "{{ item.tag }}"
zone: "{{ zone }}"
machine_type: "{{ machine_type }}"
image: "{{ image }}"
state: present
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
with_items:'''
GCE_TEMPLATE_FINISH = ''' register: gce
- name: Wait for SSH for instances
wait_for:
delay: 1
host: "{{ item.instance_data[0].public_ip }}"
port: 22
state: started
timeout: 30
with_items: "{{ gce.results }}"
'''
def gce(args):
print(GCE_TEMPLATE_INTRO)
names(args)
print(GCE_TEMPLATE_FINISH)
def names(args):
template = ''' - {{ name: {name}, tag: '{tag}-{{{{ domain }}}}' }}'''
print(template.format(name='build', tag='build'))
org_count = len(args.peer_count)
for oid in range(0, org_count):
for pid in range(0, args.peer_count[oid]):
n = 'peer{}org{}'.format(pid, oid)
t = 'peer{}-org{}'.format(pid, oid)
print(template.format(name=n, tag=t))
o = 'orderer{}'.format(oid)
print(template.format(name=o, tag=o))
z = 'z{}'.format(oid)
print(template.format(name=z, tag=z))
k = 'k{}'.format(oid)
print(template.format(name=k, tag=k))
f = 'fabricca{}'.format(oid)
print(template.format(name=f, tag=f))
c = 'cli{}'.format(oid)
print(template.format(name=c, tag=c))
def main():
'''parse cmdline args and print role'''
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--peer_count',
nargs='+',
type=int,
help='number of peers per org')
args = parser.parse_args()
gce(args)
if __name__ == '__main__':
main() | en | 0.598413 | generate gce role --- - name: create multiple instances gce: instance_names: "{{ item.name }}" tags: "{{ item.tag }}" zone: "{{ zone }}" machine_type: "{{ machine_type }}" image: "{{ image }}" state: present service_account_email: "{{ service_account_email }}" credentials_file: "{{ credentials_file }}" project_id: "{{ project_id }}" with_items: register: gce - name: Wait for SSH for instances wait_for: delay: 1 host: "{{ item.instance_data[0].public_ip }}" port: 22 state: started timeout: 30 with_items: "{{ gce.results }}" - {{ name: {name}, tag: '{tag}-{{{{ domain }}}}' }} parse cmdline args and print role | 2.352201 | 2 |
onnx_pytorch/op_code_generators/Split.py | BernardJiang/onnx-pytorch | 66 | 6621305 | <gh_stars>10-100
import onnx
import torch
from onnx.numpy_helper import to_array
from onnx_pytorch.op_code_generators import OpCodeGenerator
class SplitOpCodeGenerator(OpCodeGenerator):
def __init__(self,
onnx_ver=onnx.defs.onnx_opset_version(),
torch_ver=torch.__version__):
super(SplitOpCodeGenerator, self).__init__(onnx_ver, torch_ver)
def gen(self, node, value_infos, initializers):
attr_value_dict = self.get_attr_value_dict(node)
inputs_str, outputs_str = self.gen_input_output_string(
node, initializers, self.rename_helper)
init_str, forward_str = [], []
if self.onnx_ver > 11 and len(node.input) > 1:
split = to_array(initializers[node.input[1]]).tolist()
else:
split = attr_value_dict.get("split", None)
axis = attr_value_dict["axis"]
params_str = self.gen_params_str(split_size_or_sections=split, dim=axis)
forward_str.append(
f"{', '.join(outputs_str)} = torch.split({inputs_str[0]}, **{{{params_str}}})"
)
return {"init": init_str, "forward": forward_str}
| import onnx
import torch
from onnx.numpy_helper import to_array
from onnx_pytorch.op_code_generators import OpCodeGenerator
class SplitOpCodeGenerator(OpCodeGenerator):
def __init__(self,
onnx_ver=onnx.defs.onnx_opset_version(),
torch_ver=torch.__version__):
super(SplitOpCodeGenerator, self).__init__(onnx_ver, torch_ver)
def gen(self, node, value_infos, initializers):
attr_value_dict = self.get_attr_value_dict(node)
inputs_str, outputs_str = self.gen_input_output_string(
node, initializers, self.rename_helper)
init_str, forward_str = [], []
if self.onnx_ver > 11 and len(node.input) > 1:
split = to_array(initializers[node.input[1]]).tolist()
else:
split = attr_value_dict.get("split", None)
axis = attr_value_dict["axis"]
params_str = self.gen_params_str(split_size_or_sections=split, dim=axis)
forward_str.append(
f"{', '.join(outputs_str)} = torch.split({inputs_str[0]}, **{{{params_str}}})"
)
return {"init": init_str, "forward": forward_str} | none | 1 | 2.415128 | 2 | |
VMEncryption/test/test_ResourceDiskUtil.py | jamvar/azure-linux-extensions | 1 | 6621306 | #!/usr/bin/env python
#
# *********************************************************
# Copyright (c) Microsoft. All rights reserved.
#
# Apache 2.0 License
#
# You may obtain a copy of the License at
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# *********************************************************
""" Unit tests for the ResourceDiskUtil module """
import unittest
import os
import console_logger
import patch
import ResourceDiskUtil
class TestResourceDiskUtilMethods(unittest.TestCase):
def setUp(self):
self.logger = console_logger.ConsoleLogger()
self.distro_patcher = patch.GetDistroPatcher(self.logger)
self.resource_disk = ResourceDiskUtil.ResourceDiskUtil(self.logger, self.logger, self.distro_patcher)
def test_is_luks_device(self):
self.assertEqual(self.resource_disk.is_luks_device(), False)
def test_is_luks_device_opened(self):
self.assertEqual(self.resource_disk.is_luks_device_opened(), False)
def test_is_valid_key(self):
self.assertEqual(self.resource_disk.is_valid_key(), False)
def test_configure_waagent(self):
self.assertEqual(self.resource_disk.configure_waagent(), True)
def test_is_crypt_mounted(self):
self.assertEqual(self.resource_disk.is_crypt_mounted(), False)
def test_try_remount(self):
self.assertEqual(self.resource_disk.try_remount(), False)
def test_automount(self):
# validate preconditions
self.assertEqual(self.resource_disk.is_luks_device(), False)
self.assertEqual(self.resource_disk.is_luks_device_opened(), False)
# run the function under test
self.assertEqual(self.resource_disk.automount(), True)
# validate postconditions
self.assertEqual(self.resource_disk.is_luks_device(), True)
self.assertEqual(self.resource_disk.is_luks_device_opened(), True)
self.assertEqual(self.resource_disk.is_luks_device_opened(), True)
self.assertEqual(self.resource_disk.is_valid_key(), True)
self.assertEqual(self.resource_disk.try_remount(), True)
# cleanup and restore original system state
os.system("umount /mnt/resource")
os.system('dmsetup remove /dev/mapper/' + self.resource_disk.mapper_name)
os.system('dd if=/dev/urandom of=/dev/disk/azure/resource-part1 bs=512 count=20480')
os.system('parted /dev/disk/azure/resource rm 1')
| #!/usr/bin/env python
#
# *********************************************************
# Copyright (c) Microsoft. All rights reserved.
#
# Apache 2.0 License
#
# You may obtain a copy of the License at
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# *********************************************************
""" Unit tests for the ResourceDiskUtil module """
import unittest
import os
import console_logger
import patch
import ResourceDiskUtil
class TestResourceDiskUtilMethods(unittest.TestCase):
def setUp(self):
self.logger = console_logger.ConsoleLogger()
self.distro_patcher = patch.GetDistroPatcher(self.logger)
self.resource_disk = ResourceDiskUtil.ResourceDiskUtil(self.logger, self.logger, self.distro_patcher)
def test_is_luks_device(self):
self.assertEqual(self.resource_disk.is_luks_device(), False)
def test_is_luks_device_opened(self):
self.assertEqual(self.resource_disk.is_luks_device_opened(), False)
def test_is_valid_key(self):
self.assertEqual(self.resource_disk.is_valid_key(), False)
def test_configure_waagent(self):
self.assertEqual(self.resource_disk.configure_waagent(), True)
def test_is_crypt_mounted(self):
self.assertEqual(self.resource_disk.is_crypt_mounted(), False)
def test_try_remount(self):
self.assertEqual(self.resource_disk.try_remount(), False)
def test_automount(self):
# validate preconditions
self.assertEqual(self.resource_disk.is_luks_device(), False)
self.assertEqual(self.resource_disk.is_luks_device_opened(), False)
# run the function under test
self.assertEqual(self.resource_disk.automount(), True)
# validate postconditions
self.assertEqual(self.resource_disk.is_luks_device(), True)
self.assertEqual(self.resource_disk.is_luks_device_opened(), True)
self.assertEqual(self.resource_disk.is_luks_device_opened(), True)
self.assertEqual(self.resource_disk.is_valid_key(), True)
self.assertEqual(self.resource_disk.try_remount(), True)
# cleanup and restore original system state
os.system("umount /mnt/resource")
os.system('dmsetup remove /dev/mapper/' + self.resource_disk.mapper_name)
os.system('dd if=/dev/urandom of=/dev/disk/azure/resource-part1 bs=512 count=20480')
os.system('parted /dev/disk/azure/resource rm 1')
| en | 0.694595 | #!/usr/bin/env python # # ********************************************************* # Copyright (c) Microsoft. All rights reserved. # # Apache 2.0 License # # You may obtain a copy of the License at # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # # ********************************************************* Unit tests for the ResourceDiskUtil module # validate preconditions # run the function under test # validate postconditions # cleanup and restore original system state | 2.241122 | 2 |
pokemon/models.py | juanmarcoscabezas/poke-api | 0 | 6621307 | from django.db import models
class Pokemon(models.Model):
name = models.CharField(max_length=200)
height = models.CharField(max_length=50)
weight = models.CharField(max_length=50)
image = models.CharField(max_length=1024, default='')
# Stats
hp = models.IntegerField()
attack = models.IntegerField()
defense = models.IntegerField()
special_attack = models.IntegerField()
special_defense = models.IntegerField()
speed = models.IntegerField()
# Parent
evolves_from = models.ForeignKey("self",
blank=True,
null=True, on_delete=models.DO_NOTHING
)
| from django.db import models
class Pokemon(models.Model):
name = models.CharField(max_length=200)
height = models.CharField(max_length=50)
weight = models.CharField(max_length=50)
image = models.CharField(max_length=1024, default='')
# Stats
hp = models.IntegerField()
attack = models.IntegerField()
defense = models.IntegerField()
special_attack = models.IntegerField()
special_defense = models.IntegerField()
speed = models.IntegerField()
# Parent
evolves_from = models.ForeignKey("self",
blank=True,
null=True, on_delete=models.DO_NOTHING
)
| ca | 0.362216 | # Stats # Parent | 2.08826 | 2 |
part-B.py | FAWC-bupt/Covid19Spider | 11 | 6621308 | <gh_stars>10-100
"""
累计确诊数排名前 20 的国家名称及其数量(利用12月15日数据)
"""
import matplotlib.pyplot as plt
import pandas as pd
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 用来正常显示中文标签
plt.rcParams['savefig.dpi'] = 300 # 图片像素
plt.rcParams['figure.dpi'] = 300 # 分辨率
plt.style.use('Solarize_Light2')
df = pd.read_csv('csvFile/Covid19Data2020-12-15.csv', encoding='utf-8', skiprows=[1], thousands=',')
# print(df.describe())
# print(df.info())
df.sort_values(by='Confirmed', inplace=True, ascending=False) # ascending=True为升序,反之为降序
print(df)
df_res = df[0:20]
df_res.drop(df_res.columns[2:15], axis=1, inplace=True)
print(df_res)
plt.bar(list(range(0, 100, 5)), df_res['Confirmed'].to_list(), width=3, alpha=0.5, color='b')
plt.xticks(list(range(0, 100, 5)), labels=df_res['Name'].to_list(), rotation=35)
plt.tick_params(labelsize=6)
for a, b in zip(list(range(0, 100, 5)), df_res['Confirmed'].to_list()): # 在直方图上显示数字
plt.text(a, b + 1e5, '%.2e' % b, ha='center', va='bottom', fontsize=4, color='black')
plt.title('累计确诊数排名前20的国家')
plt.xlabel("国家")
plt.ylabel("人数")
plt.tight_layout()
plt.savefig('imgResult/累计确诊数排名前20的国家.png')
plt.show()
df_res.to_csv('csvResult/累计确诊数排名前20的国家.csv', index=False)
| """
累计确诊数排名前 20 的国家名称及其数量(利用12月15日数据)
"""
import matplotlib.pyplot as plt
import pandas as pd
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei'] # 用来正常显示中文标签
plt.rcParams['savefig.dpi'] = 300 # 图片像素
plt.rcParams['figure.dpi'] = 300 # 分辨率
plt.style.use('Solarize_Light2')
df = pd.read_csv('csvFile/Covid19Data2020-12-15.csv', encoding='utf-8', skiprows=[1], thousands=',')
# print(df.describe())
# print(df.info())
df.sort_values(by='Confirmed', inplace=True, ascending=False) # ascending=True为升序,反之为降序
print(df)
df_res = df[0:20]
df_res.drop(df_res.columns[2:15], axis=1, inplace=True)
print(df_res)
plt.bar(list(range(0, 100, 5)), df_res['Confirmed'].to_list(), width=3, alpha=0.5, color='b')
plt.xticks(list(range(0, 100, 5)), labels=df_res['Name'].to_list(), rotation=35)
plt.tick_params(labelsize=6)
for a, b in zip(list(range(0, 100, 5)), df_res['Confirmed'].to_list()): # 在直方图上显示数字
plt.text(a, b + 1e5, '%.2e' % b, ha='center', va='bottom', fontsize=4, color='black')
plt.title('累计确诊数排名前20的国家')
plt.xlabel("国家")
plt.ylabel("人数")
plt.tight_layout()
plt.savefig('imgResult/累计确诊数排名前20的国家.png')
plt.show()
df_res.to_csv('csvResult/累计确诊数排名前20的国家.csv', index=False) | zh | 0.906502 | 累计确诊数排名前 20 的国家名称及其数量(利用12月15日数据) # 用来正常显示中文标签 # 图片像素 # 分辨率 # print(df.describe()) # print(df.info()) # ascending=True为升序,反之为降序 # 在直方图上显示数字 | 3.005809 | 3 |
picatrix/lib/manager.py | google/picatrix | 27 | 6621309 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class that defines the manager for all magics."""
import functools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union
import pandas
from IPython import get_ipython
from picatrix.lib import state, utils
@dataclass
class Helper:
"""Small structure for a helper."""
function: Callable[..., Any]
help: Text
types: Dict[Text, Any]
class MagicManager:
"""Manager class for Picatrix magics."""
MAGICS_DF_COLUMNS = ['name', 'description', 'line', 'cell', 'function']
_magics: Dict[Text, Callable[[Text, Text], Text]] = {}
_helpers: Dict[Text, Helper] = {}
@classmethod
def clear_helpers(cls):
"""Clear all helper registration."""
for helper_name in cls._helpers:
try:
utils.ipython_remove_global(helper_name)
except KeyError:
pass
cls._helpers = {}
@classmethod
def clear_magics(cls):
"""Clear all magic registration."""
magics = list(cls._magics.keys())
for magic_name in magics:
cls.deregister_magic(magic_name)
@classmethod
def deregister_helper(cls, helper_name: Text):
"""Remove a helper from the registration.
Args:
helper_name (str): the name of the helper to remove.
Raises:
KeyError: if the helper is not registered.
"""
if helper_name not in cls._helpers:
raise KeyError(f'Helper [{helper_name}] is not registered.')
_ = cls._helpers.pop(helper_name)
try:
utils.ipython_remove_global(helper_name)
except KeyError:
pass
@classmethod
def deregister_magic(cls, magic_name: Text):
"""Removes a magic from the registration.
Args:
magic_name (str): the name of the magic to remove.
Raises:
KeyError: if the magic is not registered.
"""
if magic_name not in cls._magics:
raise KeyError(f'Magic [{magic_name}] is not registered.')
_ = cls._magics.pop(magic_name)
try:
utils.ipython_remove_global(f'{magic_name}_func')
except KeyError:
pass
# Attempt to remove the magic definition.
ip = get_ipython()
magics_manager = ip.magics_manager
if not hasattr(magics_manager, 'magics'):
return
line_magics = magics_manager.magics.get('line', {})
if magic_name in line_magics:
_ = magics_manager.magics.get('line').pop(magic_name)
cell_magics = magics_manager.magics.get('cell', {})
if magic_name in cell_magics:
_ = magics_manager.magics.get('cell').pop(magic_name)
@classmethod
def get_helper(cls, helper_name: Text) -> Optional[Callable[..., Any]]:
"""Return a helper function from the registration."""
return cls._magics.get(helper_name)
@classmethod
def get_magic(cls, magic_name: Text) -> Callable[[Text, Text], Text]:
"""Return a magic function from the registration."""
return cls._magics.get(magic_name)
@classmethod
def get_helper_info(
cls,
as_pandas: Optional[bool] = True
) -> Union[pandas.DataFrame, List[Tuple[Text, Text]]]:
"""Get a list of all the registered helpers.
Args:
as_pandas (bool): boolean to determine whether to receive the results
as a list of tuple or a pandas DataFrame. Defaults to True.
Returns:
Either a pandas DataFrame or a list of tuples, depending on the
as_pandas boolean.
"""
if not as_pandas:
return [(name, helper.help) for name, helper in cls._helpers.items()]
lines = []
for name, helper in cls._helpers.items():
hints = helper.types
hint_strings = []
for key, value in hints.items():
value_string = getattr(value, '__name__', str(value))
hint_strings.append(f'{key} [{value_string}]')
helper_string = ', '.join(hint_strings)
lines.append(
{
'name': name,
'help': helper.help,
'arguments': helper_string,
})
return pandas.DataFrame(lines)
@classmethod
def get_magic_info(
cls,
as_pandas: Optional[bool] = True
) -> Union[pandas.DataFrame, List[Tuple[Text, Text]]]:
"""Get a list of all magics.
Args:
as_pandas (bool): boolean to determine whether to receive the results
as a list of tuples or a pandas DataFrame. Defaults to True.
Returns:
Either a pandas DataFrame or a list of tuples, depending on the as_pandas
boolean.
"""
if not as_pandas:
return [
(x.magic_name, x.__doc__.split('\n')[0])
for x in iter(cls._magics.values())
]
entries = []
for magic_name, magic_class in iter(cls._magics.items()):
description = magic_class.__doc__.split('\n')[0]
magic_dict = {
'name': magic_name,
'cell': f'%%{magic_name}',
'line': f'%{magic_name}',
'function': f'{magic_name}_func',
'description': description
}
entries.append(magic_dict)
df = pandas.DataFrame(entries)
return df[cls.MAGICS_DF_COLUMNS].sort_values('name')
@classmethod
def register_helper(
cls, name: Text, helper: Any, typing_help: Dict[Text, Any]):
"""Register a picatrix helper function.
Args:
name (str): the name of the helper function.
helper (function): the helper function to register.
typing_help (dict): dict with the arguments and their types.
Raises:
KeyError: if the helper is already registered.
"""
if name in cls._helpers:
raise KeyError(f'The helper [{name}] is already registered.')
doc_string = helper.__doc__
if doc_string:
help_string = doc_string.split('\n')[0]
else:
help_string = 'No help string supplied.'
cls._helpers[name] = Helper(
function=helper, help=help_string, types=typing_help)
@classmethod
def register_magic(
cls,
function: Callable[[Text, Text], Text],
conditional: Callable[[], bool] = None):
"""Register magic function as a magic in picatrix.
Args:
function (function): the function to register as a line and a
cell magic.
conditional (function): a function that should return a bool, used to
determine whether to register magic or not. This can be used by
magics to determine whether a magic should be registered or not, for
instance basing that on whether the notebook is able to reach the
required service, or whether a connection to a client can be achieved,
etc. This is optional and if not provided a magic will be registered.
Raises:
KeyError: if the magic is already registered.
"""
if conditional and not conditional():
return
magic_name = function.magic_name
if magic_name in cls._magics:
raise KeyError(f'The magic [{magic_name}] is already registered.')
ip = get_ipython()
if ip:
ip.register_magic_function(
function, magic_kind='line_cell', magic_name=magic_name)
cls._magics[magic_name] = function
function_name = f'{magic_name}_func'
def capture_output(function, name):
"""A function that wraps around magic functions to capture output."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
function_output = function(*args, **kwargs)
state_obj = state.state()
return state_obj.set_output(function_output, magic_name=name)
return wrapper
_ = utils.ipython_bind_global(
function_name, capture_output(function.fn, function_name))
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class that defines the manager for all magics."""
import functools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union
import pandas
from IPython import get_ipython
from picatrix.lib import state, utils
@dataclass
class Helper:
"""Small structure for a helper."""
function: Callable[..., Any]
help: Text
types: Dict[Text, Any]
class MagicManager:
"""Manager class for Picatrix magics."""
MAGICS_DF_COLUMNS = ['name', 'description', 'line', 'cell', 'function']
_magics: Dict[Text, Callable[[Text, Text], Text]] = {}
_helpers: Dict[Text, Helper] = {}
@classmethod
def clear_helpers(cls):
"""Clear all helper registration."""
for helper_name in cls._helpers:
try:
utils.ipython_remove_global(helper_name)
except KeyError:
pass
cls._helpers = {}
@classmethod
def clear_magics(cls):
"""Clear all magic registration."""
magics = list(cls._magics.keys())
for magic_name in magics:
cls.deregister_magic(magic_name)
@classmethod
def deregister_helper(cls, helper_name: Text):
"""Remove a helper from the registration.
Args:
helper_name (str): the name of the helper to remove.
Raises:
KeyError: if the helper is not registered.
"""
if helper_name not in cls._helpers:
raise KeyError(f'Helper [{helper_name}] is not registered.')
_ = cls._helpers.pop(helper_name)
try:
utils.ipython_remove_global(helper_name)
except KeyError:
pass
@classmethod
def deregister_magic(cls, magic_name: Text):
"""Removes a magic from the registration.
Args:
magic_name (str): the name of the magic to remove.
Raises:
KeyError: if the magic is not registered.
"""
if magic_name not in cls._magics:
raise KeyError(f'Magic [{magic_name}] is not registered.')
_ = cls._magics.pop(magic_name)
try:
utils.ipython_remove_global(f'{magic_name}_func')
except KeyError:
pass
# Attempt to remove the magic definition.
ip = get_ipython()
magics_manager = ip.magics_manager
if not hasattr(magics_manager, 'magics'):
return
line_magics = magics_manager.magics.get('line', {})
if magic_name in line_magics:
_ = magics_manager.magics.get('line').pop(magic_name)
cell_magics = magics_manager.magics.get('cell', {})
if magic_name in cell_magics:
_ = magics_manager.magics.get('cell').pop(magic_name)
@classmethod
def get_helper(cls, helper_name: Text) -> Optional[Callable[..., Any]]:
"""Return a helper function from the registration."""
return cls._magics.get(helper_name)
@classmethod
def get_magic(cls, magic_name: Text) -> Callable[[Text, Text], Text]:
"""Return a magic function from the registration."""
return cls._magics.get(magic_name)
@classmethod
def get_helper_info(
cls,
as_pandas: Optional[bool] = True
) -> Union[pandas.DataFrame, List[Tuple[Text, Text]]]:
"""Get a list of all the registered helpers.
Args:
as_pandas (bool): boolean to determine whether to receive the results
as a list of tuple or a pandas DataFrame. Defaults to True.
Returns:
Either a pandas DataFrame or a list of tuples, depending on the
as_pandas boolean.
"""
if not as_pandas:
return [(name, helper.help) for name, helper in cls._helpers.items()]
lines = []
for name, helper in cls._helpers.items():
hints = helper.types
hint_strings = []
for key, value in hints.items():
value_string = getattr(value, '__name__', str(value))
hint_strings.append(f'{key} [{value_string}]')
helper_string = ', '.join(hint_strings)
lines.append(
{
'name': name,
'help': helper.help,
'arguments': helper_string,
})
return pandas.DataFrame(lines)
@classmethod
def get_magic_info(
cls,
as_pandas: Optional[bool] = True
) -> Union[pandas.DataFrame, List[Tuple[Text, Text]]]:
"""Get a list of all magics.
Args:
as_pandas (bool): boolean to determine whether to receive the results
as a list of tuples or a pandas DataFrame. Defaults to True.
Returns:
Either a pandas DataFrame or a list of tuples, depending on the as_pandas
boolean.
"""
if not as_pandas:
return [
(x.magic_name, x.__doc__.split('\n')[0])
for x in iter(cls._magics.values())
]
entries = []
for magic_name, magic_class in iter(cls._magics.items()):
description = magic_class.__doc__.split('\n')[0]
magic_dict = {
'name': magic_name,
'cell': f'%%{magic_name}',
'line': f'%{magic_name}',
'function': f'{magic_name}_func',
'description': description
}
entries.append(magic_dict)
df = pandas.DataFrame(entries)
return df[cls.MAGICS_DF_COLUMNS].sort_values('name')
@classmethod
def register_helper(
cls, name: Text, helper: Any, typing_help: Dict[Text, Any]):
"""Register a picatrix helper function.
Args:
name (str): the name of the helper function.
helper (function): the helper function to register.
typing_help (dict): dict with the arguments and their types.
Raises:
KeyError: if the helper is already registered.
"""
if name in cls._helpers:
raise KeyError(f'The helper [{name}] is already registered.')
doc_string = helper.__doc__
if doc_string:
help_string = doc_string.split('\n')[0]
else:
help_string = 'No help string supplied.'
cls._helpers[name] = Helper(
function=helper, help=help_string, types=typing_help)
@classmethod
def register_magic(
cls,
function: Callable[[Text, Text], Text],
conditional: Callable[[], bool] = None):
"""Register magic function as a magic in picatrix.
Args:
function (function): the function to register as a line and a
cell magic.
conditional (function): a function that should return a bool, used to
determine whether to register magic or not. This can be used by
magics to determine whether a magic should be registered or not, for
instance basing that on whether the notebook is able to reach the
required service, or whether a connection to a client can be achieved,
etc. This is optional and if not provided a magic will be registered.
Raises:
KeyError: if the magic is already registered.
"""
if conditional and not conditional():
return
magic_name = function.magic_name
if magic_name in cls._magics:
raise KeyError(f'The magic [{magic_name}] is already registered.')
ip = get_ipython()
if ip:
ip.register_magic_function(
function, magic_kind='line_cell', magic_name=magic_name)
cls._magics[magic_name] = function
function_name = f'{magic_name}_func'
def capture_output(function, name):
"""A function that wraps around magic functions to capture output."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
function_output = function(*args, **kwargs)
state_obj = state.state()
return state_obj.set_output(function_output, magic_name=name)
return wrapper
_ = utils.ipython_bind_global(
function_name, capture_output(function.fn, function_name))
| en | 0.757334 | # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Class that defines the manager for all magics. Small structure for a helper. Manager class for Picatrix magics. Clear all helper registration. Clear all magic registration. Remove a helper from the registration. Args: helper_name (str): the name of the helper to remove. Raises: KeyError: if the helper is not registered. Removes a magic from the registration. Args: magic_name (str): the name of the magic to remove. Raises: KeyError: if the magic is not registered. # Attempt to remove the magic definition. Return a helper function from the registration. Return a magic function from the registration. Get a list of all the registered helpers. Args: as_pandas (bool): boolean to determine whether to receive the results as a list of tuple or a pandas DataFrame. Defaults to True. Returns: Either a pandas DataFrame or a list of tuples, depending on the as_pandas boolean. Get a list of all magics. Args: as_pandas (bool): boolean to determine whether to receive the results as a list of tuples or a pandas DataFrame. Defaults to True. Returns: Either a pandas DataFrame or a list of tuples, depending on the as_pandas boolean. Register a picatrix helper function. Args: name (str): the name of the helper function. helper (function): the helper function to register. typing_help (dict): dict with the arguments and their types. Raises: KeyError: if the helper is already registered. Register magic function as a magic in picatrix. Args: function (function): the function to register as a line and a cell magic. conditional (function): a function that should return a bool, used to determine whether to register magic or not. This can be used by magics to determine whether a magic should be registered or not, for instance basing that on whether the notebook is able to reach the required service, or whether a connection to a client can be achieved, etc. This is optional and if not provided a magic will be registered. Raises: KeyError: if the magic is already registered. A function that wraps around magic functions to capture output. | 2.338015 | 2 |
src/runtastic_strava_migration_tool.py | hudcondr/Digital-preservation---sports-trackers-for-Strava | 0 | 6621310 | <reponame>hudcondr/Digital-preservation---sports-trackers-for-Strava
#!/usr/bin/env python
import os
import json
from stravalib import Client, exc
from requests.exceptions import ConnectionError
import csv
import shutil
import time
from datetime import datetime, timedelta
import sys
import pandas as pd
import random
access_token = sys.argv[1]
def convert_json_to_csv(filepath):
for file in os.listdir(filepath):
work_file = os.path.join(filepath + '/', file)
with open(work_file) as json_file:
dct = json.load(json_file)
df = pd.DataFrame([dct])
df.to_csv('../json_to_csv/' + str(file.split('.')[0]) + '.csv', index=False)
def get_strava_access_token():
global access_token
if access_token is not None:
print('Found access token')
return access_token
access_token = os.environ.get('STRAVA_UPLOADER_TOKEN')
if access_token is not None:
print('Found access token')
return access_token
print('Access token not found. Please set the env variable STRAVA_UPLOADER_TOKEN')
exit(1)
def get_strava_client():
token = get_strava_access_token()
client = Client()
client.access_token = token
return client
def increment_activity_counter(counter):
if counter >= 599:
print("Upload count at 599 - pausing uploads for 15 minutes to avoid rate-limit")
time.sleep(900)
return 0
counter += 1
return counter
# designates part of day for name assignment, matching Strava convention for GPS activities
def strava_day_converstion(hour_of_day):
if 3 <= hour_of_day <= 11:
return "Morning"
elif 12 <= hour_of_day <= 4:
return "Afternoon"
elif 5 <= hour_of_day <= 7:
return "Evening"
return "Night"
def activity_translator(activity_id):
input_file = csv.DictReader(open("activity_translator_data.csv"))
for row in input_file:
if int(row['id']) == int(activity_id):
return row['activity']
# Get a small range of time. Note runkeeper does not maintain timezone
# in the CSV, so we must get about 12 hours earlier and later to account
# for potential miss due to UTC
def get_date_range(time, hourBuffer=12):
if type(time) is not datetime:
raise TypeError('time arg must be a datetime, not a %s' % type(time))
return {
'from': time + timedelta(hours=-1 * hourBuffer),
'to': time + timedelta(hours=hourBuffer),
}
def activity_exists(client, activity_name, start_time):
date_range = get_date_range(start_time)
print("Getting existing activities from [" + date_range['from'].isoformat() + "] to [" + date_range[
'to'].isoformat() + "]")
activities = client.get_activities(
before=date_range['to'],
after=date_range['from']
)
for activity in activities:
if activity.name == activity_name:
return True
return False
def create_activity(client, activity_id, duration, distance, start_time, strava_activity_type):
# convert to total time in seconds
day_part = strava_day_converstion(start_time.hour)
activity_name = day_part + " " + strava_activity_type + " (Manual)"
if activity_exists(client, activity_name, start_time):
print('Activity [' + activity_name + '] already created, skipping')
return
print("Manually uploading [" + activity_id + "]:[" + activity_name + "]")
try:
upload = client.create_activity(
name=activity_name,
start_date_local=start_time,
elapsed_time=duration,
distance=distance,
activity_type=strava_activity_type
)
print("Manually created " + activity_id)
return True
except ConnectionError as err:
print("No Internet connection: {}".format(err))
exit(1)
def upload_gpx(client, gpxfile):
if not os.path.isfile(gpxfile):
print("No file found for " + gpxfile + "!")
return False
print("------------------------------------------------------------------")
print("Uploading " + gpxfile)
try:
upload = client.upload_activity(
activity_file=open(gpxfile, 'r'),
data_type='gpx',
private=False
)
except exc.ActivityUploadFailed as err:
errStr = str(err)
# deal with duplicate type of error, if duplicate then continue with next file, else stop
if errStr.find('duplicate of activity'):
print("Duplicate File " + gpxfile + " is already uploaded.")
return False
else:
print("Another ActivityUploadFailed error: {}".format(err))
exit(1)
except ConnectionError as err:
print("No Internet connection: {}".format(err))
exit(1)
try:
upResult = upload.wait()
except exc.ActivityUploadFailed as err:
errStr = str(err)
# deal with duplicate type of error, if duplicate then continue with next file, else stop
if errStr.find('duplicate of activity'):
print("Duplicate File " + gpxfile + " is already uploaded.")
return True
else:
print("Another ActivityUploadFailed error: {}".format(err))
exit(1)
print("Uploaded " + gpxfile + " - Activity id: " + str(upResult.id))
return True
def main():
files_path = sys.argv[3].split(',')
client = get_strava_client()
print('Connecting to Strava')
athlete = client.get_athlete()
print("Now authenticated for " + athlete.firstname + " " + athlete.lastname)
activity_counter = 0
completed_activities = []
if sys.argv[2] == 'json' or sys.argv[2] == 'csv':
if sys.argv[2] == 'json':
convert_json_to_csv(sys.argv[3])
data_path = '../json_to_csv'
if sys.argv[2] == 'csv':
data_path = files_path
for file in os.listdir(data_path):
csv_file = os.path.join(data_path + '/', file)
activities = csv.DictReader(open(csv_file))
for row in activities:
strava_activity_type = activity_translator(int(row['sport_type_id']))
start_time = datetime.strptime(str(datetime.utcfromtimestamp(int(row['start_time'][:-3])).strftime('%Y-%m-%d %H:%M:%S')),"%Y-%m-%d %H:%M:%S")
print(start_time)
duration = int(row['end_time'][:-3])-int(row['start_time'][:-3])
distance = int(row['distance'])
activity_id = str(row['id'])
if strava_activity_type is not None:
if create_activity(client, activity_id, duration, distance, start_time, strava_activity_type):
completed_activities.append(activity_id)
activity_counter = increment_activity_counter(activity_counter)
else:
print('Invalid activity type ' + str(row['Type']) + ', skipping')
elif sys.argv[2] == 'gpx':
for file in os.listdir(sys.argv[3] + '/'):
gpxfile = os.path.join(sys.argv[3] + '/', file)
if upload_gpx(client, gpxfile):
activity_counter = increment_activity_counter(activity_counter)
else:
print("Wrong data path. Make sure you are using the correct path to file.")
print("Complete! Created [" + str(activity_counter) + "] activities.")
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import os
import json
from stravalib import Client, exc
from requests.exceptions import ConnectionError
import csv
import shutil
import time
from datetime import datetime, timedelta
import sys
import pandas as pd
import random
access_token = sys.argv[1]
def convert_json_to_csv(filepath):
for file in os.listdir(filepath):
work_file = os.path.join(filepath + '/', file)
with open(work_file) as json_file:
dct = json.load(json_file)
df = pd.DataFrame([dct])
df.to_csv('../json_to_csv/' + str(file.split('.')[0]) + '.csv', index=False)
def get_strava_access_token():
global access_token
if access_token is not None:
print('Found access token')
return access_token
access_token = os.environ.get('STRAVA_UPLOADER_TOKEN')
if access_token is not None:
print('Found access token')
return access_token
print('Access token not found. Please set the env variable STRAVA_UPLOADER_TOKEN')
exit(1)
def get_strava_client():
token = get_strava_access_token()
client = Client()
client.access_token = token
return client
def increment_activity_counter(counter):
if counter >= 599:
print("Upload count at 599 - pausing uploads for 15 minutes to avoid rate-limit")
time.sleep(900)
return 0
counter += 1
return counter
# designates part of day for name assignment, matching Strava convention for GPS activities
def strava_day_converstion(hour_of_day):
if 3 <= hour_of_day <= 11:
return "Morning"
elif 12 <= hour_of_day <= 4:
return "Afternoon"
elif 5 <= hour_of_day <= 7:
return "Evening"
return "Night"
def activity_translator(activity_id):
input_file = csv.DictReader(open("activity_translator_data.csv"))
for row in input_file:
if int(row['id']) == int(activity_id):
return row['activity']
# Get a small range of time. Note runkeeper does not maintain timezone
# in the CSV, so we must get about 12 hours earlier and later to account
# for potential miss due to UTC
def get_date_range(time, hourBuffer=12):
if type(time) is not datetime:
raise TypeError('time arg must be a datetime, not a %s' % type(time))
return {
'from': time + timedelta(hours=-1 * hourBuffer),
'to': time + timedelta(hours=hourBuffer),
}
def activity_exists(client, activity_name, start_time):
date_range = get_date_range(start_time)
print("Getting existing activities from [" + date_range['from'].isoformat() + "] to [" + date_range[
'to'].isoformat() + "]")
activities = client.get_activities(
before=date_range['to'],
after=date_range['from']
)
for activity in activities:
if activity.name == activity_name:
return True
return False
def create_activity(client, activity_id, duration, distance, start_time, strava_activity_type):
# convert to total time in seconds
day_part = strava_day_converstion(start_time.hour)
activity_name = day_part + " " + strava_activity_type + " (Manual)"
if activity_exists(client, activity_name, start_time):
print('Activity [' + activity_name + '] already created, skipping')
return
print("Manually uploading [" + activity_id + "]:[" + activity_name + "]")
try:
upload = client.create_activity(
name=activity_name,
start_date_local=start_time,
elapsed_time=duration,
distance=distance,
activity_type=strava_activity_type
)
print("Manually created " + activity_id)
return True
except ConnectionError as err:
print("No Internet connection: {}".format(err))
exit(1)
def upload_gpx(client, gpxfile):
if not os.path.isfile(gpxfile):
print("No file found for " + gpxfile + "!")
return False
print("------------------------------------------------------------------")
print("Uploading " + gpxfile)
try:
upload = client.upload_activity(
activity_file=open(gpxfile, 'r'),
data_type='gpx',
private=False
)
except exc.ActivityUploadFailed as err:
errStr = str(err)
# deal with duplicate type of error, if duplicate then continue with next file, else stop
if errStr.find('duplicate of activity'):
print("Duplicate File " + gpxfile + " is already uploaded.")
return False
else:
print("Another ActivityUploadFailed error: {}".format(err))
exit(1)
except ConnectionError as err:
print("No Internet connection: {}".format(err))
exit(1)
try:
upResult = upload.wait()
except exc.ActivityUploadFailed as err:
errStr = str(err)
# deal with duplicate type of error, if duplicate then continue with next file, else stop
if errStr.find('duplicate of activity'):
print("Duplicate File " + gpxfile + " is already uploaded.")
return True
else:
print("Another ActivityUploadFailed error: {}".format(err))
exit(1)
print("Uploaded " + gpxfile + " - Activity id: " + str(upResult.id))
return True
def main():
files_path = sys.argv[3].split(',')
client = get_strava_client()
print('Connecting to Strava')
athlete = client.get_athlete()
print("Now authenticated for " + athlete.firstname + " " + athlete.lastname)
activity_counter = 0
completed_activities = []
if sys.argv[2] == 'json' or sys.argv[2] == 'csv':
if sys.argv[2] == 'json':
convert_json_to_csv(sys.argv[3])
data_path = '../json_to_csv'
if sys.argv[2] == 'csv':
data_path = files_path
for file in os.listdir(data_path):
csv_file = os.path.join(data_path + '/', file)
activities = csv.DictReader(open(csv_file))
for row in activities:
strava_activity_type = activity_translator(int(row['sport_type_id']))
start_time = datetime.strptime(str(datetime.utcfromtimestamp(int(row['start_time'][:-3])).strftime('%Y-%m-%d %H:%M:%S')),"%Y-%m-%d %H:%M:%S")
print(start_time)
duration = int(row['end_time'][:-3])-int(row['start_time'][:-3])
distance = int(row['distance'])
activity_id = str(row['id'])
if strava_activity_type is not None:
if create_activity(client, activity_id, duration, distance, start_time, strava_activity_type):
completed_activities.append(activity_id)
activity_counter = increment_activity_counter(activity_counter)
else:
print('Invalid activity type ' + str(row['Type']) + ', skipping')
elif sys.argv[2] == 'gpx':
for file in os.listdir(sys.argv[3] + '/'):
gpxfile = os.path.join(sys.argv[3] + '/', file)
if upload_gpx(client, gpxfile):
activity_counter = increment_activity_counter(activity_counter)
else:
print("Wrong data path. Make sure you are using the correct path to file.")
print("Complete! Created [" + str(activity_counter) + "] activities.")
if __name__ == '__main__':
main() | en | 0.855815 | #!/usr/bin/env python # designates part of day for name assignment, matching Strava convention for GPS activities # Get a small range of time. Note runkeeper does not maintain timezone # in the CSV, so we must get about 12 hours earlier and later to account # for potential miss due to UTC # convert to total time in seconds # deal with duplicate type of error, if duplicate then continue with next file, else stop # deal with duplicate type of error, if duplicate then continue with next file, else stop | 2.888184 | 3 |
Python/project_euler_6.py | PushpneetSingh/Hello-world | 1,428 | 6621311 | <reponame>PushpneetSingh/Hello-world<filename>Python/project_euler_6.py<gh_stars>1000+
# https://projecteuler.net/
# Problem 6
# Sum square difference
def check(s):
for i in range(0, int(len(s)/2)):
if s[i] != s[(len(s)-1)-i]:
return False
return True
def palindrome():
large = 9009
smallest =
for i in range(999, 99, -1):
for j in range(999, 99, -1):
num = i * j
st = str(num)
if check(st) and num > large:
large = num
return large
print(palindrome())
| # https://projecteuler.net/
# Problem 6
# Sum square difference
def check(s):
for i in range(0, int(len(s)/2)):
if s[i] != s[(len(s)-1)-i]:
return False
return True
def palindrome():
large = 9009
smallest =
for i in range(999, 99, -1):
for j in range(999, 99, -1):
num = i * j
st = str(num)
if check(st) and num > large:
large = num
return large
print(palindrome()) | en | 0.205661 | # https://projecteuler.net/ # Problem 6 # Sum square difference | 3.305184 | 3 |
beerializer/__init__.py | iamale/beerializer | 2 | 6621312 | <reponame>iamale/beerializer
from . import fields
from . import base
from . import validators
from .base import ValidationError, InvalidTypeValidationError, Serializer
__version__ = "1.0.0"
__all__ = [
"fields", "base", "validators", "ValidationError",
"InvalidTypeValidationError", "Serializer"
]
| from . import fields
from . import base
from . import validators
from .base import ValidationError, InvalidTypeValidationError, Serializer
__version__ = "1.0.0"
__all__ = [
"fields", "base", "validators", "ValidationError",
"InvalidTypeValidationError", "Serializer"
] | none | 1 | 1.740345 | 2 | |
read_arduino/receive_serial_data_from_arduino.py | samuelchiang/auto_tank | 5 | 6621313 | #!/usr/bin/env python3
import serial
import datetime
import time
import logging
if __name__ == '__main__':
ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=1)
ser.flush()
logging.basicConfig(filename='/var/log/arduino.log',
filemode='a',
format='%(asctime)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG)
logging.info("Start read serial")
while True:
if ser.in_waiting > 0:
line = ser.readline().decode('utf-8', errors='ignore').rstrip()
logging.info(line)
time.sleep( 0.1 )
| #!/usr/bin/env python3
import serial
import datetime
import time
import logging
if __name__ == '__main__':
ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=1)
ser.flush()
logging.basicConfig(filename='/var/log/arduino.log',
filemode='a',
format='%(asctime)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG)
logging.info("Start read serial")
while True:
if ser.in_waiting > 0:
line = ser.readline().decode('utf-8', errors='ignore').rstrip()
logging.info(line)
time.sleep( 0.1 )
| fr | 0.221828 | #!/usr/bin/env python3 | 2.541533 | 3 |
matrix/transpose.py | shivam3009/fun-with-algorithms | 11 | 6621314 | <reponame>shivam3009/fun-with-algorithms
# coding: utf-8
def transpose(A):
""" returns the transpose of matrix A. """
return list(map(list, zip(*A)))
if __name__ in "__main__":
a = [[1, 2], [3, 4], [5, 6]]
print('A:')
print(a)
print('AT:')
print(transpose(a))
| # coding: utf-8
def transpose(A):
""" returns the transpose of matrix A. """
return list(map(list, zip(*A)))
if __name__ in "__main__":
a = [[1, 2], [3, 4], [5, 6]]
print('A:')
print(a)
print('AT:')
print(transpose(a)) | en | 0.760128 | # coding: utf-8 returns the transpose of matrix A. | 4.038348 | 4 |
tabcmd/commands/datasources_and_workbooks/runschedule_command.py | tableau/tabcmd | 3 | 6621315 | from tabcmd.commands.auth.session import Session
from tabcmd.commands.constants import Errors
from tabcmd.execution.localize import _
from tabcmd.execution.logger_config import log
from .datasources_and_workbooks_command import DatasourcesAndWorkbooks
class RunSchedule(DatasourcesAndWorkbooks):
"""
This command runs the specified schedule as it is on the server.
"""
name: str = "runschedule"
description: str = _("runschedule.short_description")
@staticmethod
def define_args(runschedule_parser):
runschedule_parser.add_argument("schedule", help=_("tabcmd.run_schedule.options.schedule"))
@staticmethod
def run_command(args):
logger = log(__class__.__name__, args.logging_level)
logger.debug(_("tabcmd.launching"))
session = Session()
server = session.create_session(args)
logger.info(_("export.status").format(args.schedule))
schedule = DatasourcesAndWorkbooks.get_items_by_name(logger, server.schedules, args.schedule)[0]
if not schedule:
Errors.exit_with_error(logger, _("publish.errors.server_resource_not_found"))
logger.info(_("runschedule.status"))
Errors.exit_with_error(logger, "Not yet implemented")
# TODO implement in REST/tsc
| from tabcmd.commands.auth.session import Session
from tabcmd.commands.constants import Errors
from tabcmd.execution.localize import _
from tabcmd.execution.logger_config import log
from .datasources_and_workbooks_command import DatasourcesAndWorkbooks
class RunSchedule(DatasourcesAndWorkbooks):
"""
This command runs the specified schedule as it is on the server.
"""
name: str = "runschedule"
description: str = _("runschedule.short_description")
@staticmethod
def define_args(runschedule_parser):
runschedule_parser.add_argument("schedule", help=_("tabcmd.run_schedule.options.schedule"))
@staticmethod
def run_command(args):
logger = log(__class__.__name__, args.logging_level)
logger.debug(_("tabcmd.launching"))
session = Session()
server = session.create_session(args)
logger.info(_("export.status").format(args.schedule))
schedule = DatasourcesAndWorkbooks.get_items_by_name(logger, server.schedules, args.schedule)[0]
if not schedule:
Errors.exit_with_error(logger, _("publish.errors.server_resource_not_found"))
logger.info(_("runschedule.status"))
Errors.exit_with_error(logger, "Not yet implemented")
# TODO implement in REST/tsc
| en | 0.844334 | This command runs the specified schedule as it is on the server. # TODO implement in REST/tsc | 2.136123 | 2 |
ports/stm32/boards/AEMICS_PYGGI/manifest.py | H-Grobben/micropython | 0 | 6621316 | <filename>ports/stm32/boards/AEMICS_PYGGI/manifest.py
include("$(PORT_DIR)/boards/manifest.py")
freeze("$(BOARD_DIR)", ("pyg.py"))
freeze("$(PORT_DIR)/boards/NUCLEO_WB55", "rfcore_firmware.py")
freeze("$(BOARD_DIR)", ("ble_repl.py"))
freeze("$(MPY_DIR)/drivers/neopixel", ("neopixel.py"))
| <filename>ports/stm32/boards/AEMICS_PYGGI/manifest.py
include("$(PORT_DIR)/boards/manifest.py")
freeze("$(BOARD_DIR)", ("pyg.py"))
freeze("$(PORT_DIR)/boards/NUCLEO_WB55", "rfcore_firmware.py")
freeze("$(BOARD_DIR)", ("ble_repl.py"))
freeze("$(MPY_DIR)/drivers/neopixel", ("neopixel.py"))
| none | 1 | 1.195981 | 1 | |
lab_05/src/main.py | Untouchabl3Pineapple/labs_py_02_sem | 1 | 6621317 | import sys, pygame
# ___________________________InitPyGame_____________________________
pygame.init()
size = width, height = 1250, 720
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Animation by <NAME> IU7-23B")
clock = pygame.time.Clock()
# ___________________________ImportImages_____________________________
ball = pygame.image.load("ball.png")
bg_game = pygame.image.load("bg_game.jpg")
bg_exit = pygame.image.load("bg_exit.jpg")
car = pygame.image.load("car_left1.png")
walk_left = [pygame.image.load("car_left1.png"), pygame.image.load("car_left2.png")]
walk_right = [pygame.image.load("car_right1.png"), pygame.image.load("car_right2.png")]
# ___________________________GlobalVars_____________________________
FPS = 100
counter_pos = 0
x_car = width // 2
y_car = height // 2 + 80
x_ball = 100
y_ball = 100
min_x = -4
max_x = width - 285
flag = False
left = False
right = False
# ___________________________StartPositions_________________________
screen.blit(bg_game, (0, 0))
screen.blit(ball, (x_ball, y_ball))
screen.blit(walk_right[0], (x_car, y_car))
pygame.display.update()
# ___________________________EventProcess___________________________
run = True
while run:
clock.tick(FPS)
# ___________________________Exit________________________________
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
if counter_pos + 1 >= 15:
counter_pos = 0
# ___________________________Ball+Miss__________________________________________________
if y_ball != 550:
if right == True and x_car - 70 < x_ball < x_car + 70 and y_ball == y_car: flag = True
if left == True and x_car + 100 < x_ball < x_car + 200 and y_ball == y_car: flag = True
if flag == False:
x_ball += 1
y_ball += 2
else:
if left == True:
x_ball = x_car + 205
y_ball = y_car + 7
else:
x_ball = x_car + 35
y_ball = y_car + 7
screen.blit(ball, (x_ball, y_ball))
else:
screen.blit(bg_exit, (0, 0))
pygame.display.update()
pygame.time.wait(1000)
exit()
# ___________________________Car________________________________
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
if x_car <= min_x: x_car = min_x
x_car -= 7
screen.blit(walk_left[counter_pos // 8], (x_car, y_car))
counter_pos += 1
left = True
right = False
elif keys[pygame.K_RIGHT]:
if x_car >= max_x: x_car = max_x
x_car += 7
screen.blit(walk_right[counter_pos // 8], (x_car, y_car))
counter_pos += 1
left = False
right = True
else:
if left == True: screen.blit(walk_left[counter_pos // 8], (x_car, y_car))
else: screen.blit(walk_right[counter_pos // 8], (x_car, y_car))
pygame.display.update()
screen.blit(bg_game, (0, 0))
# _____________________________________________________________
| import sys, pygame
# ___________________________InitPyGame_____________________________
pygame.init()
size = width, height = 1250, 720
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Animation by <NAME> IU7-23B")
clock = pygame.time.Clock()
# ___________________________ImportImages_____________________________
ball = pygame.image.load("ball.png")
bg_game = pygame.image.load("bg_game.jpg")
bg_exit = pygame.image.load("bg_exit.jpg")
car = pygame.image.load("car_left1.png")
walk_left = [pygame.image.load("car_left1.png"), pygame.image.load("car_left2.png")]
walk_right = [pygame.image.load("car_right1.png"), pygame.image.load("car_right2.png")]
# ___________________________GlobalVars_____________________________
FPS = 100
counter_pos = 0
x_car = width // 2
y_car = height // 2 + 80
x_ball = 100
y_ball = 100
min_x = -4
max_x = width - 285
flag = False
left = False
right = False
# ___________________________StartPositions_________________________
screen.blit(bg_game, (0, 0))
screen.blit(ball, (x_ball, y_ball))
screen.blit(walk_right[0], (x_car, y_car))
pygame.display.update()
# ___________________________EventProcess___________________________
run = True
while run:
clock.tick(FPS)
# ___________________________Exit________________________________
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
if counter_pos + 1 >= 15:
counter_pos = 0
# ___________________________Ball+Miss__________________________________________________
if y_ball != 550:
if right == True and x_car - 70 < x_ball < x_car + 70 and y_ball == y_car: flag = True
if left == True and x_car + 100 < x_ball < x_car + 200 and y_ball == y_car: flag = True
if flag == False:
x_ball += 1
y_ball += 2
else:
if left == True:
x_ball = x_car + 205
y_ball = y_car + 7
else:
x_ball = x_car + 35
y_ball = y_car + 7
screen.blit(ball, (x_ball, y_ball))
else:
screen.blit(bg_exit, (0, 0))
pygame.display.update()
pygame.time.wait(1000)
exit()
# ___________________________Car________________________________
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
if x_car <= min_x: x_car = min_x
x_car -= 7
screen.blit(walk_left[counter_pos // 8], (x_car, y_car))
counter_pos += 1
left = True
right = False
elif keys[pygame.K_RIGHT]:
if x_car >= max_x: x_car = max_x
x_car += 7
screen.blit(walk_right[counter_pos // 8], (x_car, y_car))
counter_pos += 1
left = False
right = True
else:
if left == True: screen.blit(walk_left[counter_pos // 8], (x_car, y_car))
else: screen.blit(walk_right[counter_pos // 8], (x_car, y_car))
pygame.display.update()
screen.blit(bg_game, (0, 0))
# _____________________________________________________________
| en | 0.298308 | # ___________________________InitPyGame_____________________________ # ___________________________ImportImages_____________________________ # ___________________________GlobalVars_____________________________ # ___________________________StartPositions_________________________ # ___________________________EventProcess___________________________ # ___________________________Exit________________________________ # ___________________________Ball+Miss__________________________________________________ # ___________________________Car________________________________ # _____________________________________________________________ | 2.810297 | 3 |
auto_repair_saas/apps/vehicles/views.py | wangonya/auto-repair-saas | 6 | 6621318 | <filename>auto_repair_saas/apps/vehicles/views.py
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.postgres.search import SearchVector
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse_lazy, reverse
from django.views import View
from django.views.generic import UpdateView, DeleteView
from auto_repair_saas.apps.utils.search import SearchForm
from auto_repair_saas.apps.vehicles.forms import NewVehicleForm
from auto_repair_saas.apps.vehicles.models import Vehicle
class VehiclesView(LoginRequiredMixin, View):
form_class = NewVehicleForm
search_form_class = SearchForm
template_name = 'vehicles/index.html'
def get(self, request, *args, **kwargs):
form = self.form_class()
search_form = self.search_form_class()
vehicles = Vehicle.objects.all()
context = {
'form': form,
'vehicles': vehicles,
'search_form': search_form
}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
try:
Vehicle.objects.create(**form.cleaned_data)
messages.success(request, 'Vehicle created.')
return HttpResponseRedirect(reverse('vehicles'))
except Exception as e:
messages.error(request, str(e))
return HttpResponseRedirect(reverse('vehicles'))
else:
error = 'Form is invalid.'
messages.error(request, error)
return HttpResponseRedirect(reverse('vehicles'))
class UpdateVehicleView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Vehicle
form = NewVehicleForm()
fields = [*form.fields]
success_url = reverse_lazy('vehicles')
success_message = 'Vehicle updated.'
class DeleteVehicleView(LoginRequiredMixin, SuccessMessageMixin, DeleteView):
model = Vehicle
success_url = reverse_lazy('vehicles')
success_message = 'Vehicle deleted.'
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(DeleteVehicleView, self).delete(request, *args, **kwargs)
def load_client_vehicles(request):
owner_id = request.GET.get('client')
try:
vehicles = Vehicle.objects.filter(owner_id=owner_id)
except ValueError:
vehicles = Vehicle.objects.none()
return render(
request, 'vehicles/vehicle_list_options.html', {'vehicles': vehicles}
)
class VehiclesSearchView(LoginRequiredMixin, View):
search_form_class = SearchForm
vehicle_form_class = NewVehicleForm
template_name = 'vehicles/index.html'
def get(self, request, *args, **kwargs):
search_form = self.search_form_class(request.GET)
vehicle_form = self.vehicle_form_class()
if not search_form.is_valid():
HttpResponseRedirect(reverse('vehicles'))
if search_form.cleaned_data.get('q') == '':
vehicles = Vehicle.objects.all()
else:
vehicles = Vehicle.objects.annotate(
search=SearchVector('number_plate', 'owner__name', ),
).filter(search=search_form.cleaned_data.get('q'))
context = {
'form': vehicle_form,
'vehicles': vehicles,
'search_form': search_form
}
return render(
request, self.template_name, context
)
| <filename>auto_repair_saas/apps/vehicles/views.py
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.postgres.search import SearchVector
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse_lazy, reverse
from django.views import View
from django.views.generic import UpdateView, DeleteView
from auto_repair_saas.apps.utils.search import SearchForm
from auto_repair_saas.apps.vehicles.forms import NewVehicleForm
from auto_repair_saas.apps.vehicles.models import Vehicle
class VehiclesView(LoginRequiredMixin, View):
form_class = NewVehicleForm
search_form_class = SearchForm
template_name = 'vehicles/index.html'
def get(self, request, *args, **kwargs):
form = self.form_class()
search_form = self.search_form_class()
vehicles = Vehicle.objects.all()
context = {
'form': form,
'vehicles': vehicles,
'search_form': search_form
}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
try:
Vehicle.objects.create(**form.cleaned_data)
messages.success(request, 'Vehicle created.')
return HttpResponseRedirect(reverse('vehicles'))
except Exception as e:
messages.error(request, str(e))
return HttpResponseRedirect(reverse('vehicles'))
else:
error = 'Form is invalid.'
messages.error(request, error)
return HttpResponseRedirect(reverse('vehicles'))
class UpdateVehicleView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Vehicle
form = NewVehicleForm()
fields = [*form.fields]
success_url = reverse_lazy('vehicles')
success_message = 'Vehicle updated.'
class DeleteVehicleView(LoginRequiredMixin, SuccessMessageMixin, DeleteView):
model = Vehicle
success_url = reverse_lazy('vehicles')
success_message = 'Vehicle deleted.'
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(DeleteVehicleView, self).delete(request, *args, **kwargs)
def load_client_vehicles(request):
owner_id = request.GET.get('client')
try:
vehicles = Vehicle.objects.filter(owner_id=owner_id)
except ValueError:
vehicles = Vehicle.objects.none()
return render(
request, 'vehicles/vehicle_list_options.html', {'vehicles': vehicles}
)
class VehiclesSearchView(LoginRequiredMixin, View):
search_form_class = SearchForm
vehicle_form_class = NewVehicleForm
template_name = 'vehicles/index.html'
def get(self, request, *args, **kwargs):
search_form = self.search_form_class(request.GET)
vehicle_form = self.vehicle_form_class()
if not search_form.is_valid():
HttpResponseRedirect(reverse('vehicles'))
if search_form.cleaned_data.get('q') == '':
vehicles = Vehicle.objects.all()
else:
vehicles = Vehicle.objects.annotate(
search=SearchVector('number_plate', 'owner__name', ),
).filter(search=search_form.cleaned_data.get('q'))
context = {
'form': vehicle_form,
'vehicles': vehicles,
'search_form': search_form
}
return render(
request, self.template_name, context
)
| none | 1 | 1.985949 | 2 | |
dqn_implementation/atari_dqn.py | a-nesse/acromuse_atari | 0 | 6621319 | <gh_stars>0
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import json
import os
import pickle
import sys
import numpy as np
import tensorflow as tf
from tf_agents.agents.dqn import dqn_agent
from tf_agents.environments import tf_py_environment
from tf_agents.networks import q_network
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.drivers import dynamic_step_driver
from tf_agents.utils import common
from tf_agents.policies import epsilon_greedy_policy
from preprocessing import suite_atari_mod as suite_atari
class AtariDQN:
"""
Class for training Deep-Q agent to play Atari games.
Inspired by the TF-Agents tutorials which can be found here:
https://www.tensorflow.org/agents/tutorials/2_environments_tutorial
Implemented for the purposes of the thesis.
"""
def __init__(self, net_conf_path='', dqn_conf_path=''):
"""
Initializes an AtariDQN object using configuration files.
Parameters:
net_conf_path : str
Path to network configuration file.
dqn_conf_path : str
Path to DQN hyperparameter configuration file.
Returns:
AtariDQN object.
"""
def _load_config(conf_path):
assert os.path.exists(
conf_path), 'The config file specified does not exist.'
with open(conf_path, 'r') as f:
conf = json.load(f)
return conf
self.net_conf = _load_config(net_conf_path)
self.dqn_conf = _load_config(dqn_conf_path)
self.env_name = self.dqn_conf['env_name']
self.num_iterations = self.dqn_conf['num_iterations']
self.collect_steps_per_iteration = self.dqn_conf['collect_steps_per_iteration']
self.parallell_calls = self.dqn_conf['parallell_calls']
self.batch_size = self.dqn_conf['batch_size']
self.target_update = self.dqn_conf['target_update']
self.learning_rate = self.dqn_conf['learning_rate']
self.log_interval = self.dqn_conf['log_interval']
self.n_eval_steps = self.dqn_conf['n_eval_steps']
self.eval_interval = self.dqn_conf['eval_interval']
self.train_py_env = suite_atari.load(
environment_name=self.env_name, eval_env=False)
self.eval_py_env = suite_atari.load(
environment_name=self.env_name, eval_env=True)
self.train_env = tf_py_environment.TFPyEnvironment(self.train_py_env)
self.eval_env = tf_py_environment.TFPyEnvironment(self.eval_py_env)
self.obs_spec = self.train_env.observation_spec()
self.action_spec = self.train_env.action_spec()
self.step_spec = self.train_env.time_step_spec()
self.q_net = q_network.QNetwork(
self.obs_spec,
self.action_spec,
conv_layer_params=[tuple(c) for c in self.net_conf['conv_layer_params']],
fc_layer_params=tuple(self.net_conf['fc_layer_params']),
kernel_initializer=tf.keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='truncated_normal'))
self.optimizer = tf.compat.v1.train.RMSPropOptimizer(
learning_rate=self.dqn_conf['learning_rate'],
momentum=self.dqn_conf['momentum'],
decay=self.dqn_conf['decay'],
epsilon=self.dqn_conf['mom_epsilon'])
# Replay buffer size & initial collect -3 due to stacking 4 frames
self.replay_buffer_max_length = self.dqn_conf['replay_buffer_max_length']-3
self.initial_collect = int(np.ceil(((self.dqn_conf['initial_collect_frames']-3)/self.collect_steps_per_iteration)))
self.initial_epsilon = self.dqn_conf['initial_epsilon']
self.final_epsilon = self.dqn_conf['final_epsilon']
self.final_exploration = self.dqn_conf['final_exploration']
self.agent = dqn_agent.DqnAgent(
self.step_spec,
self.action_spec,
q_network=self.q_net,
optimizer=self.optimizer,
emit_log_probability=True,
td_errors_loss_fn=common.element_wise_huber_loss,
epsilon_greedy=1.0,
target_update_period=self.target_update,
gamma=self.dqn_conf['discount'])
self.agent.initialize()
self.save_name = self.dqn_conf['save_name']
self.keep_n_models = self.dqn_conf['keep_n_models']
self.log = {}
self.elite_avg = (0, 0) # elite model, score for average score
self.elite_max = (0, 0) # elite model, score for max score
# epsilon-greedy eval policy as described by Mnih et.al (2015)
self.eval_policy = epsilon_greedy_policy.EpsilonGreedyPolicy(
policy=self.agent.policy,
epsilon=self.dqn_conf['eval_epsilon'])
# declaring
self.replay_buffer = None
self.replay_ckp = None
self.driver = None
def act(self, obs):
'''
Method for predicting action.
Uses epsilon-greedy policy to avoid evaluation overfitting.
Parameters:
obs : tf_agents.trajectories.TimeStep
Observation from environment.
Returns:
action : tf_agents.trajectories.PolicyStep
Action agent chooses to take based on the observation.
'''
return self.eval_policy.action(obs)
def _run_episode(self,steps):
"""
Function for running an episode in the environment.
Returns the score if the episode is finished without
exceeding the number of evaluation steps.
"""
episode_score = 0.0
time_step = self.eval_env.reset()
while not time_step.is_last():
action_step = self.act(time_step)
time_step = self.eval_env.step(action_step.action)
episode_score += time_step.reward.numpy()[0]
steps += 1
if steps >= self.n_eval_steps:
return True, None, None
return False, steps, episode_score
def evaluate_agent(self):
"""
Function for evaluating/scoring agent.
Returns:
avg_score : float
Average episode score for agent.
max_score : float
Maximum episode score for agent.
"""
steps = 0
scores = []
# run once outside loop in unlikely case first episode lasts
# for all the evaluation frames
done, steps, ep_score = self._run_episode(steps)
scores.append(ep_score)
while True and not done:
done, steps, ep_score = self._run_episode(steps)
if done:
return np.average(scores), np.max(scores)
scores.append(ep_score)
def _save_model(self, step):
"""
Method for saving agent and deleting old agents.
Saves both q network and target network.
"""
filepath_q = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + '-' + str(step) + '-eval')
with open(filepath_q, 'wb') as f:
pickle.dump(self.q_net.get_weights(), f)
filepath_target = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + '-' + str(step) + '-target')
with open(filepath_target, 'wb') as f:
pickle.dump(self.agent._target_q_network.get_weights(), f)
# deleting old agents
delete = step-(self.eval_interval*self.keep_n_models)
if delete > 0 and self.elite_avg[0] != delete and self.elite_max[0] != delete:
self._delete_model(delete)
def _load_model(self, step):
"""
Method for loading q & target network.
"""
filepath_q = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + '-' + str(step) + '-eval')
with open(filepath_q, 'rb') as f:
new_weights = pickle.load(f)
filepath_target = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + '-' + str(step) + '-target')
with open(filepath_target, 'rb') as f:
new_target = pickle.load(f)
frames = int(step*self.collect_steps_per_iteration)
scaled_epsilon = self.initial_epsilon - \
(0.9*frames/self.final_exploration)
self.agent.collect_policy._epsilon = max(
self.final_epsilon, scaled_epsilon)
self.q_net.set_weights(new_weights)
self.agent._target_q_network.set_weights(new_target)
def _delete_model(self, step):
"""
Function for deleting agent.
"""
os.remove(os.path.join(os.getcwd(), 'saved_models_dqn',
self.save_name + '-' + str(step) + '-eval'))
os.remove(os.path.join(os.getcwd(), 'saved_models_dqn',
self.save_name + '-' + str(step) + '-target'))
def log_data(self, starttime, passed_time, step, loss, avg_score, max_score):
"""
Function for logging training performance.
Parameters:
starttime : float
Time when training was started or restarted.
passed_time : float
Time that was trained before restarting.
Set to 0 if training has not been restarted.
step : int
Number of training steps that have been performed so far.
loss : float
The loss at this step.
avg_score : float
The average agent score from the last evaluation.
max_score : float
The maximum episode score from the last evaluation.
Returns:
None
"""
cur_time = time.time()
train_time = cur_time - starttime + passed_time
step = int(step)
loss = float(loss)
trained_frames = step * self.batch_size * 4
if step % self.eval_interval == 0:
# if elite, replace and potentially delete old elite
keep = step-(self.eval_interval*(self.keep_n_models-1))
if avg_score > self.elite_avg[1] and step >= self.eval_interval:
delete = self.elite_avg[0]
self.elite_avg = (step, avg_score)
# delete if not within keep interval
if delete < keep and delete != 0 and delete != self.elite_max[0]:
self._delete_model(delete)
if max_score > self.elite_max[1] and step >= self.eval_interval:
delete = self.elite_max[0]
self.elite_max = (step, max_score)
# delete if not within keep interval
if delete < keep and delete != 0 and delete != self.elite_avg[0]:
self._delete_model(delete)
self.log[step] = [train_time, loss, avg_score, max_score, trained_frames, self.elite_avg, self.elite_max]
def _write_log(self):
"""
Function for writing log.
"""
filepath = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + 'log')
with open(filepath, 'w') as f:
json.dump(self.log, f)
def _load_log(self, step):
"""
Function for loading log.
"""
filepath = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + 'log')
with open(filepath, 'r') as f:
log = json.load(f)
self.log = log
self.elite_avg = (log[str(step)][5][0], log[str(step)][5][1])
self.elite_max = (log[str(step)][6][0], log[str(step)][6][1])
def restart_training(self, step):
"""
Function for restarting training from step.
Parameters:
step : int
Which step to restart training from.
Returns:
None
"""
self._load_model(step)
self._load_log(step)
def train(self, restart_step=0):
"""
Method for running training of DQN model.
Parameters:
restart_step : int
Step to restart training from.
Defaults to 0 for fresh start.
Returns:
None
"""
tf.compat.v1.enable_v2_behavior()
time_step = self.train_env.reset()
start_time = time.time()
self.replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=self.agent.collect_data_spec,
batch_size=self.train_env.batch_size,
max_length=self.replay_buffer_max_length)
self.replay_ckp = common.Checkpointer(
ckpt_dir=os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + 'replay'),
max_to_keep=1,
replay_buffer=self.replay_buffer)
# initializing dynamic step driver
self.driver = dynamic_step_driver.DynamicStepDriver(
self.train_env,
self.agent.collect_policy,
observers=[self.replay_buffer.add_batch],
num_steps=self.collect_steps_per_iteration)
self.driver.run = common.function(self.driver.run)
if restart_step:
self.restart_training(restart_step)
step = restart_step
passed_time = self.log[str(restart_step)][0]
policy_state = self.agent.collect_policy.get_initial_state(
self.train_env.batch_size)
else:
# setting epsilon to 1.0 for initial collection (random policy)
self.agent.collect_policy._epsilon = self.initial_epsilon
policy_state = self.agent.collect_policy.get_initial_state(
self.train_env.batch_size)
for _ in range(self.initial_collect):
time_step, policy_state = self.driver.run(
time_step=time_step,
policy_state=policy_state)
step = 0
passed_time = 0
self.replay_ckp.initialize_or_restore()
# saving initial buffer to make sure that memory is sufficient
self.replay_ckp.save(global_step=restart_step)
dataset = self.replay_buffer.as_dataset(
num_parallel_calls=self.parallell_calls,
sample_batch_size=self.batch_size,
num_steps=2).prefetch(self.parallell_calls)
iterator = iter(dataset)
self.agent.train = common.function(self.agent.train)
# eval before training
if restart_step:
avg_score = self.log[str(restart_step)][2]
max_score = self.log[str(restart_step)][3]
else:
avg_score, max_score = self.evaluate_agent()
exploration_finished = False
for _ in range(self.num_iterations-restart_step):
# performing action according to epsilon-greedy protocol & collecting data
time_step, policy_state = self.driver.run(
time_step=time_step,
policy_state=policy_state)
# sampling from data
experience, unused_info = next(iterator)
# training
train_loss = self.agent.train(experience).loss
step += 1
frames = int(step*self.collect_steps_per_iteration)
# changing epsilon linearly from frames 0 to 1 mill, down to 0.1
if frames <= self.final_exploration:
scaled_epsilon = self.initial_epsilon - \
(0.9*frames/self.final_exploration)
self.agent.collect_policy._epsilon = max(
self.final_epsilon, scaled_epsilon)
elif not exploration_finished:
self.agent.collect_policy._epsilon = self.final_epsilon
exploration_finished = True
if step % self.eval_interval == 0 and step != restart_step:
self._save_model(step)
self.replay_ckp.save(global_step=step)
avg_score, max_score = self.evaluate_agent()
print('step = {}: Average Score = {} Max Score = {}'.format(
step, avg_score, max_score))
if step % self.log_interval == 0:
print(time.time()-start_time)
self.log_data(start_time, passed_time, step,
train_loss, avg_score, max_score)
if step % self.eval_interval == 0:
self._write_log()
print('step = {}: loss = {}'.format(step, train_loss))
def main(restart_step):
"""
Creates AtariDQN object and runs training according to configs.
Parameters:
restart_step : int
Step to restart training from.
Restart_step = 0 gives fresh start.
Returns:
None
"""
net_conf = os.path.abspath(os.path.join('.', 'configs', 'net.config'))
dqn_conf = os.path.abspath(os.path.join('.', 'configs', 'dqn.config'))
dqn = AtariDQN(net_conf, dqn_conf)
if not os.path.isdir(os.path.join(os.getcwd(), 'saved_models_dqn')):
os.makedirs(os.path.join(os.getcwd(), 'saved_models_dqn'))
dqn.train(restart_step)
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) == 1:
main(int(args[0]))
else:
main(0)
| # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import json
import os
import pickle
import sys
import numpy as np
import tensorflow as tf
from tf_agents.agents.dqn import dqn_agent
from tf_agents.environments import tf_py_environment
from tf_agents.networks import q_network
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.drivers import dynamic_step_driver
from tf_agents.utils import common
from tf_agents.policies import epsilon_greedy_policy
from preprocessing import suite_atari_mod as suite_atari
class AtariDQN:
"""
Class for training Deep-Q agent to play Atari games.
Inspired by the TF-Agents tutorials which can be found here:
https://www.tensorflow.org/agents/tutorials/2_environments_tutorial
Implemented for the purposes of the thesis.
"""
def __init__(self, net_conf_path='', dqn_conf_path=''):
"""
Initializes an AtariDQN object using configuration files.
Parameters:
net_conf_path : str
Path to network configuration file.
dqn_conf_path : str
Path to DQN hyperparameter configuration file.
Returns:
AtariDQN object.
"""
def _load_config(conf_path):
assert os.path.exists(
conf_path), 'The config file specified does not exist.'
with open(conf_path, 'r') as f:
conf = json.load(f)
return conf
self.net_conf = _load_config(net_conf_path)
self.dqn_conf = _load_config(dqn_conf_path)
self.env_name = self.dqn_conf['env_name']
self.num_iterations = self.dqn_conf['num_iterations']
self.collect_steps_per_iteration = self.dqn_conf['collect_steps_per_iteration']
self.parallell_calls = self.dqn_conf['parallell_calls']
self.batch_size = self.dqn_conf['batch_size']
self.target_update = self.dqn_conf['target_update']
self.learning_rate = self.dqn_conf['learning_rate']
self.log_interval = self.dqn_conf['log_interval']
self.n_eval_steps = self.dqn_conf['n_eval_steps']
self.eval_interval = self.dqn_conf['eval_interval']
self.train_py_env = suite_atari.load(
environment_name=self.env_name, eval_env=False)
self.eval_py_env = suite_atari.load(
environment_name=self.env_name, eval_env=True)
self.train_env = tf_py_environment.TFPyEnvironment(self.train_py_env)
self.eval_env = tf_py_environment.TFPyEnvironment(self.eval_py_env)
self.obs_spec = self.train_env.observation_spec()
self.action_spec = self.train_env.action_spec()
self.step_spec = self.train_env.time_step_spec()
self.q_net = q_network.QNetwork(
self.obs_spec,
self.action_spec,
conv_layer_params=[tuple(c) for c in self.net_conf['conv_layer_params']],
fc_layer_params=tuple(self.net_conf['fc_layer_params']),
kernel_initializer=tf.keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='truncated_normal'))
self.optimizer = tf.compat.v1.train.RMSPropOptimizer(
learning_rate=self.dqn_conf['learning_rate'],
momentum=self.dqn_conf['momentum'],
decay=self.dqn_conf['decay'],
epsilon=self.dqn_conf['mom_epsilon'])
# Replay buffer size & initial collect -3 due to stacking 4 frames
self.replay_buffer_max_length = self.dqn_conf['replay_buffer_max_length']-3
self.initial_collect = int(np.ceil(((self.dqn_conf['initial_collect_frames']-3)/self.collect_steps_per_iteration)))
self.initial_epsilon = self.dqn_conf['initial_epsilon']
self.final_epsilon = self.dqn_conf['final_epsilon']
self.final_exploration = self.dqn_conf['final_exploration']
self.agent = dqn_agent.DqnAgent(
self.step_spec,
self.action_spec,
q_network=self.q_net,
optimizer=self.optimizer,
emit_log_probability=True,
td_errors_loss_fn=common.element_wise_huber_loss,
epsilon_greedy=1.0,
target_update_period=self.target_update,
gamma=self.dqn_conf['discount'])
self.agent.initialize()
self.save_name = self.dqn_conf['save_name']
self.keep_n_models = self.dqn_conf['keep_n_models']
self.log = {}
self.elite_avg = (0, 0) # elite model, score for average score
self.elite_max = (0, 0) # elite model, score for max score
# epsilon-greedy eval policy as described by Mnih et.al (2015)
self.eval_policy = epsilon_greedy_policy.EpsilonGreedyPolicy(
policy=self.agent.policy,
epsilon=self.dqn_conf['eval_epsilon'])
# declaring
self.replay_buffer = None
self.replay_ckp = None
self.driver = None
def act(self, obs):
'''
Method for predicting action.
Uses epsilon-greedy policy to avoid evaluation overfitting.
Parameters:
obs : tf_agents.trajectories.TimeStep
Observation from environment.
Returns:
action : tf_agents.trajectories.PolicyStep
Action agent chooses to take based on the observation.
'''
return self.eval_policy.action(obs)
def _run_episode(self,steps):
"""
Function for running an episode in the environment.
Returns the score if the episode is finished without
exceeding the number of evaluation steps.
"""
episode_score = 0.0
time_step = self.eval_env.reset()
while not time_step.is_last():
action_step = self.act(time_step)
time_step = self.eval_env.step(action_step.action)
episode_score += time_step.reward.numpy()[0]
steps += 1
if steps >= self.n_eval_steps:
return True, None, None
return False, steps, episode_score
def evaluate_agent(self):
"""
Function for evaluating/scoring agent.
Returns:
avg_score : float
Average episode score for agent.
max_score : float
Maximum episode score for agent.
"""
steps = 0
scores = []
# run once outside loop in unlikely case first episode lasts
# for all the evaluation frames
done, steps, ep_score = self._run_episode(steps)
scores.append(ep_score)
while True and not done:
done, steps, ep_score = self._run_episode(steps)
if done:
return np.average(scores), np.max(scores)
scores.append(ep_score)
def _save_model(self, step):
"""
Method for saving agent and deleting old agents.
Saves both q network and target network.
"""
filepath_q = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + '-' + str(step) + '-eval')
with open(filepath_q, 'wb') as f:
pickle.dump(self.q_net.get_weights(), f)
filepath_target = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + '-' + str(step) + '-target')
with open(filepath_target, 'wb') as f:
pickle.dump(self.agent._target_q_network.get_weights(), f)
# deleting old agents
delete = step-(self.eval_interval*self.keep_n_models)
if delete > 0 and self.elite_avg[0] != delete and self.elite_max[0] != delete:
self._delete_model(delete)
def _load_model(self, step):
"""
Method for loading q & target network.
"""
filepath_q = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + '-' + str(step) + '-eval')
with open(filepath_q, 'rb') as f:
new_weights = pickle.load(f)
filepath_target = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + '-' + str(step) + '-target')
with open(filepath_target, 'rb') as f:
new_target = pickle.load(f)
frames = int(step*self.collect_steps_per_iteration)
scaled_epsilon = self.initial_epsilon - \
(0.9*frames/self.final_exploration)
self.agent.collect_policy._epsilon = max(
self.final_epsilon, scaled_epsilon)
self.q_net.set_weights(new_weights)
self.agent._target_q_network.set_weights(new_target)
def _delete_model(self, step):
"""
Function for deleting agent.
"""
os.remove(os.path.join(os.getcwd(), 'saved_models_dqn',
self.save_name + '-' + str(step) + '-eval'))
os.remove(os.path.join(os.getcwd(), 'saved_models_dqn',
self.save_name + '-' + str(step) + '-target'))
def log_data(self, starttime, passed_time, step, loss, avg_score, max_score):
"""
Function for logging training performance.
Parameters:
starttime : float
Time when training was started or restarted.
passed_time : float
Time that was trained before restarting.
Set to 0 if training has not been restarted.
step : int
Number of training steps that have been performed so far.
loss : float
The loss at this step.
avg_score : float
The average agent score from the last evaluation.
max_score : float
The maximum episode score from the last evaluation.
Returns:
None
"""
cur_time = time.time()
train_time = cur_time - starttime + passed_time
step = int(step)
loss = float(loss)
trained_frames = step * self.batch_size * 4
if step % self.eval_interval == 0:
# if elite, replace and potentially delete old elite
keep = step-(self.eval_interval*(self.keep_n_models-1))
if avg_score > self.elite_avg[1] and step >= self.eval_interval:
delete = self.elite_avg[0]
self.elite_avg = (step, avg_score)
# delete if not within keep interval
if delete < keep and delete != 0 and delete != self.elite_max[0]:
self._delete_model(delete)
if max_score > self.elite_max[1] and step >= self.eval_interval:
delete = self.elite_max[0]
self.elite_max = (step, max_score)
# delete if not within keep interval
if delete < keep and delete != 0 and delete != self.elite_avg[0]:
self._delete_model(delete)
self.log[step] = [train_time, loss, avg_score, max_score, trained_frames, self.elite_avg, self.elite_max]
def _write_log(self):
"""
Function for writing log.
"""
filepath = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + 'log')
with open(filepath, 'w') as f:
json.dump(self.log, f)
def _load_log(self, step):
"""
Function for loading log.
"""
filepath = os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + 'log')
with open(filepath, 'r') as f:
log = json.load(f)
self.log = log
self.elite_avg = (log[str(step)][5][0], log[str(step)][5][1])
self.elite_max = (log[str(step)][6][0], log[str(step)][6][1])
def restart_training(self, step):
"""
Function for restarting training from step.
Parameters:
step : int
Which step to restart training from.
Returns:
None
"""
self._load_model(step)
self._load_log(step)
def train(self, restart_step=0):
"""
Method for running training of DQN model.
Parameters:
restart_step : int
Step to restart training from.
Defaults to 0 for fresh start.
Returns:
None
"""
tf.compat.v1.enable_v2_behavior()
time_step = self.train_env.reset()
start_time = time.time()
self.replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=self.agent.collect_data_spec,
batch_size=self.train_env.batch_size,
max_length=self.replay_buffer_max_length)
self.replay_ckp = common.Checkpointer(
ckpt_dir=os.path.join(
os.getcwd(), 'saved_models_dqn', self.save_name + 'replay'),
max_to_keep=1,
replay_buffer=self.replay_buffer)
# initializing dynamic step driver
self.driver = dynamic_step_driver.DynamicStepDriver(
self.train_env,
self.agent.collect_policy,
observers=[self.replay_buffer.add_batch],
num_steps=self.collect_steps_per_iteration)
self.driver.run = common.function(self.driver.run)
if restart_step:
self.restart_training(restart_step)
step = restart_step
passed_time = self.log[str(restart_step)][0]
policy_state = self.agent.collect_policy.get_initial_state(
self.train_env.batch_size)
else:
# setting epsilon to 1.0 for initial collection (random policy)
self.agent.collect_policy._epsilon = self.initial_epsilon
policy_state = self.agent.collect_policy.get_initial_state(
self.train_env.batch_size)
for _ in range(self.initial_collect):
time_step, policy_state = self.driver.run(
time_step=time_step,
policy_state=policy_state)
step = 0
passed_time = 0
self.replay_ckp.initialize_or_restore()
# saving initial buffer to make sure that memory is sufficient
self.replay_ckp.save(global_step=restart_step)
dataset = self.replay_buffer.as_dataset(
num_parallel_calls=self.parallell_calls,
sample_batch_size=self.batch_size,
num_steps=2).prefetch(self.parallell_calls)
iterator = iter(dataset)
self.agent.train = common.function(self.agent.train)
# eval before training
if restart_step:
avg_score = self.log[str(restart_step)][2]
max_score = self.log[str(restart_step)][3]
else:
avg_score, max_score = self.evaluate_agent()
exploration_finished = False
for _ in range(self.num_iterations-restart_step):
# performing action according to epsilon-greedy protocol & collecting data
time_step, policy_state = self.driver.run(
time_step=time_step,
policy_state=policy_state)
# sampling from data
experience, unused_info = next(iterator)
# training
train_loss = self.agent.train(experience).loss
step += 1
frames = int(step*self.collect_steps_per_iteration)
# changing epsilon linearly from frames 0 to 1 mill, down to 0.1
if frames <= self.final_exploration:
scaled_epsilon = self.initial_epsilon - \
(0.9*frames/self.final_exploration)
self.agent.collect_policy._epsilon = max(
self.final_epsilon, scaled_epsilon)
elif not exploration_finished:
self.agent.collect_policy._epsilon = self.final_epsilon
exploration_finished = True
if step % self.eval_interval == 0 and step != restart_step:
self._save_model(step)
self.replay_ckp.save(global_step=step)
avg_score, max_score = self.evaluate_agent()
print('step = {}: Average Score = {} Max Score = {}'.format(
step, avg_score, max_score))
if step % self.log_interval == 0:
print(time.time()-start_time)
self.log_data(start_time, passed_time, step,
train_loss, avg_score, max_score)
if step % self.eval_interval == 0:
self._write_log()
print('step = {}: loss = {}'.format(step, train_loss))
def main(restart_step):
"""
Creates AtariDQN object and runs training according to configs.
Parameters:
restart_step : int
Step to restart training from.
Restart_step = 0 gives fresh start.
Returns:
None
"""
net_conf = os.path.abspath(os.path.join('.', 'configs', 'net.config'))
dqn_conf = os.path.abspath(os.path.join('.', 'configs', 'dqn.config'))
dqn = AtariDQN(net_conf, dqn_conf)
if not os.path.isdir(os.path.join(os.getcwd(), 'saved_models_dqn')):
os.makedirs(os.path.join(os.getcwd(), 'saved_models_dqn'))
dqn.train(restart_step)
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) == 1:
main(int(args[0]))
else:
main(0) | en | 0.837343 | # Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Class for training Deep-Q agent to play Atari games. Inspired by the TF-Agents tutorials which can be found here: https://www.tensorflow.org/agents/tutorials/2_environments_tutorial Implemented for the purposes of the thesis. Initializes an AtariDQN object using configuration files. Parameters: net_conf_path : str Path to network configuration file. dqn_conf_path : str Path to DQN hyperparameter configuration file. Returns: AtariDQN object. # Replay buffer size & initial collect -3 due to stacking 4 frames # elite model, score for average score # elite model, score for max score # epsilon-greedy eval policy as described by Mnih et.al (2015) # declaring Method for predicting action. Uses epsilon-greedy policy to avoid evaluation overfitting. Parameters: obs : tf_agents.trajectories.TimeStep Observation from environment. Returns: action : tf_agents.trajectories.PolicyStep Action agent chooses to take based on the observation. Function for running an episode in the environment. Returns the score if the episode is finished without exceeding the number of evaluation steps. Function for evaluating/scoring agent. Returns: avg_score : float Average episode score for agent. max_score : float Maximum episode score for agent. # run once outside loop in unlikely case first episode lasts # for all the evaluation frames Method for saving agent and deleting old agents. Saves both q network and target network. # deleting old agents Method for loading q & target network. Function for deleting agent. Function for logging training performance. Parameters: starttime : float Time when training was started or restarted. passed_time : float Time that was trained before restarting. Set to 0 if training has not been restarted. step : int Number of training steps that have been performed so far. loss : float The loss at this step. avg_score : float The average agent score from the last evaluation. max_score : float The maximum episode score from the last evaluation. Returns: None # if elite, replace and potentially delete old elite # delete if not within keep interval # delete if not within keep interval Function for writing log. Function for loading log. Function for restarting training from step. Parameters: step : int Which step to restart training from. Returns: None Method for running training of DQN model. Parameters: restart_step : int Step to restart training from. Defaults to 0 for fresh start. Returns: None # initializing dynamic step driver # setting epsilon to 1.0 for initial collection (random policy) # saving initial buffer to make sure that memory is sufficient # eval before training # performing action according to epsilon-greedy protocol & collecting data # sampling from data # training # changing epsilon linearly from frames 0 to 1 mill, down to 0.1 Creates AtariDQN object and runs training according to configs. Parameters: restart_step : int Step to restart training from. Restart_step = 0 gives fresh start. Returns: None | 2.007613 | 2 |
src/models/utils.py | saeedranjbar12/mtlcfci | 6 | 6621320 | import torch
import torch.nn as nn
from torch.autograd import Variable
#=========================================================
# Reconstruction
#=========================================================
class Conv2_recon(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm):
super(Conv2_recon, self).__init__()
# saeed added padding ======================> ADD LEAKY RELU LeakyReLU
self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 1),
) #nn.BatchNorm2d(out_size) #,
self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 1),
) #nn.BatchNorm2d(out_size), #,nn.ReLU()
def forward(self, inputs):
outputs = self.conv1(inputs)
#outputs = self.conv2(outputs)
return outputs
class Up_recon(nn.Module):
def __init__(self, in_size, out_size, is_deconv):
super(Up_recon, self).__init__()
self.conv = Conv2_recon(out_size, out_size, True)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
def forward(self, inputs2):
outputs2 = self.up(inputs2)
return self.conv((outputs2))
#=========================================================
# SEGMENTATION
#=========================================================
class Conv2_discon(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm):
super(Conv2_discon, self).__init__()
#saeed added padding ======================> ADD LEAKY RELU LeakyReLU
if is_batchnorm:
self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 1),
)#nn.ReLU() #nn.BatchNorm2d(out_size),
self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 1),
) # nn.ReLU(), #nn.BatchNorm2d(out_size),
else:
self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 1),)
self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 1),)
def forward(self, inputs):
outputs = self.conv1(inputs)
#outputs = self.conv1(outputs)
return outputs
#======================================================================
class Up_disconnected(nn.Module):
def __init__(self, in_size, out_size, is_deconv):
super(Up_disconnected, self).__init__()
self.conv = Conv2_discon(out_size, out_size, True)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
def forward(self, inputs2):
outputs2 = self.up(inputs2)
return self.conv((outputs2))
| import torch
import torch.nn as nn
from torch.autograd import Variable
#=========================================================
# Reconstruction
#=========================================================
class Conv2_recon(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm):
super(Conv2_recon, self).__init__()
# saeed added padding ======================> ADD LEAKY RELU LeakyReLU
self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 1),
) #nn.BatchNorm2d(out_size) #,
self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 1),
) #nn.BatchNorm2d(out_size), #,nn.ReLU()
def forward(self, inputs):
outputs = self.conv1(inputs)
#outputs = self.conv2(outputs)
return outputs
class Up_recon(nn.Module):
def __init__(self, in_size, out_size, is_deconv):
super(Up_recon, self).__init__()
self.conv = Conv2_recon(out_size, out_size, True)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
def forward(self, inputs2):
outputs2 = self.up(inputs2)
return self.conv((outputs2))
#=========================================================
# SEGMENTATION
#=========================================================
class Conv2_discon(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm):
super(Conv2_discon, self).__init__()
#saeed added padding ======================> ADD LEAKY RELU LeakyReLU
if is_batchnorm:
self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 1),
)#nn.ReLU() #nn.BatchNorm2d(out_size),
self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 1),
) # nn.ReLU(), #nn.BatchNorm2d(out_size),
else:
self.conv1 = nn.Sequential(nn.Conv2d(in_size, out_size, 3, 1, 1),)
self.conv2 = nn.Sequential(nn.Conv2d(out_size, out_size, 3, 1, 1),)
def forward(self, inputs):
outputs = self.conv1(inputs)
#outputs = self.conv1(outputs)
return outputs
#======================================================================
class Up_disconnected(nn.Module):
def __init__(self, in_size, out_size, is_deconv):
super(Up_disconnected, self).__init__()
self.conv = Conv2_discon(out_size, out_size, True)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
def forward(self, inputs2):
outputs2 = self.up(inputs2)
return self.conv((outputs2))
| fr | 0.334673 | #========================================================= # Reconstruction #========================================================= # saeed added padding ======================> ADD LEAKY RELU LeakyReLU #nn.BatchNorm2d(out_size) #, #nn.BatchNorm2d(out_size), #,nn.ReLU() #outputs = self.conv2(outputs) #========================================================= # SEGMENTATION #========================================================= #saeed added padding ======================> ADD LEAKY RELU LeakyReLU #nn.ReLU() #nn.BatchNorm2d(out_size), # nn.ReLU(), #nn.BatchNorm2d(out_size), #outputs = self.conv1(outputs) #====================================================================== | 2.564348 | 3 |
ml_dronebase_data_utils/__init__.py | DroneBase/ml-dronebase-utils | 2 | 6621321 | <filename>ml_dronebase_data_utils/__init__.py
from . import pascal_voc, s3 # noqa: F401
__author__ = "<NAME>"
__version__ = "0.0.1"
| <filename>ml_dronebase_data_utils/__init__.py
from . import pascal_voc, s3 # noqa: F401
__author__ = "<NAME>"
__version__ = "0.0.1"
| uz | 0.465103 | # noqa: F401 | 1.040447 | 1 |
SmartDoorAuthenticationSystem/lambda functions/LF2.py | DivyaPabba08/SmartDoorAuthenticationSystem | 0 | 6621322 | import json
import boto3
import time
from datetime import datetime, timezone
from random import randint
TABLE_DB1_NAME = 'DB1'
TABLE_DB2_NAME = 'DB2'
S3_BUCKET_NAME = 'assignment2-fall2020-faces'
def store_visitor(visitor):
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table(TABLE_DB2_NAME)
item = {}
item['faceId'] = visitor['faceId']
item['name'] = visitor['name']
item['phoneNumber'] = visitor['phoneNumber']
item['photos'] = {
"objectKey": visitor['objectKey'],
"bucket": S3_BUCKET_NAME,
"createdTimestamp": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S")
}
print(item)
response = table.put_item(Item = item)
return response
def put_passcode_dynamoDB(passcode):
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table(TABLE_DB1_NAME)
current_time = int(time.time())
expireTime = current_time + 300
item = {
'passcodes': str(passcode),
'ttl': expireTime
}
table.put_item(Item = item)
print(item)
def query_danymoDB_DB2(dynamodb, faceId):
response = dynamodb.get_item(
TableName=TABLE_DB2_NAME,
Key={
'faceId': {
'S': faceId
},
}
)
return response
def random_with_N_digits(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
def make_and_store_opt(n):
passcode = random_with_N_digits(n)
put_passcode_dynamoDB(passcode)
return passcode
def send_opt_sns(sns, passcode, visitor):
# send sns messgae to visitor
name = visitor['Item']['name']['S']
faceId = visitor['Item']['faceId']['S']
phone_number = visitor['Item']['phoneNumber']['S']
sns_message = 'Hello ' + name + ', here is your OPT: '
sns_message += str(passcode) + '\n'+ 'Please click the following link to access:\n'
sns_message += f'http://assignment2-fall2020.s3-website-us-east-1.amazonaws.com/?faceId={faceId}'
response = (sns_message, phone_number)
print(response)
response = sns.publish(
PhoneNumber = phone_number,
Message=sns_message,
)
return response
def lambda_handler(event, context):
# TODO implement
ACCESS_HEADERS = {
"Access-Control-Allow-Headers" : "Content-Type",
"Access-Control-Allow-Origin" : "*",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET"}
received = event['body']
if received == None:
return {
'statusCode': 200,
'headers': ACCESS_HEADERS,
'body': json.dumps(f'Hello from lambda0 None')
}
received = received.replace("\'","\"")
body = json.loads(received)
print(type(body))
print(body)
message = body['message']
print(event)
dynamodb = boto3.client('dynamodb')
sns = boto3.client('sns')
print(message)
response = store_visitor(message)
print(response)
visitor = query_danymoDB_DB2(dynamodb, message['faceId'])
new_OPT = make_and_store_opt(4)
sns_response = send_opt_sns(sns, new_OPT, visitor)
print(response)
return {
'statusCode': 200,
'headers': ACCESS_HEADERS,
'body': json.dumps('Successful submission')
}
| import json
import boto3
import time
from datetime import datetime, timezone
from random import randint
TABLE_DB1_NAME = 'DB1'
TABLE_DB2_NAME = 'DB2'
S3_BUCKET_NAME = 'assignment2-fall2020-faces'
def store_visitor(visitor):
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table(TABLE_DB2_NAME)
item = {}
item['faceId'] = visitor['faceId']
item['name'] = visitor['name']
item['phoneNumber'] = visitor['phoneNumber']
item['photos'] = {
"objectKey": visitor['objectKey'],
"bucket": S3_BUCKET_NAME,
"createdTimestamp": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S")
}
print(item)
response = table.put_item(Item = item)
return response
def put_passcode_dynamoDB(passcode):
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table(TABLE_DB1_NAME)
current_time = int(time.time())
expireTime = current_time + 300
item = {
'passcodes': str(passcode),
'ttl': expireTime
}
table.put_item(Item = item)
print(item)
def query_danymoDB_DB2(dynamodb, faceId):
response = dynamodb.get_item(
TableName=TABLE_DB2_NAME,
Key={
'faceId': {
'S': faceId
},
}
)
return response
def random_with_N_digits(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
def make_and_store_opt(n):
passcode = random_with_N_digits(n)
put_passcode_dynamoDB(passcode)
return passcode
def send_opt_sns(sns, passcode, visitor):
# send sns messgae to visitor
name = visitor['Item']['name']['S']
faceId = visitor['Item']['faceId']['S']
phone_number = visitor['Item']['phoneNumber']['S']
sns_message = 'Hello ' + name + ', here is your OPT: '
sns_message += str(passcode) + '\n'+ 'Please click the following link to access:\n'
sns_message += f'http://assignment2-fall2020.s3-website-us-east-1.amazonaws.com/?faceId={faceId}'
response = (sns_message, phone_number)
print(response)
response = sns.publish(
PhoneNumber = phone_number,
Message=sns_message,
)
return response
def lambda_handler(event, context):
# TODO implement
ACCESS_HEADERS = {
"Access-Control-Allow-Headers" : "Content-Type",
"Access-Control-Allow-Origin" : "*",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET"}
received = event['body']
if received == None:
return {
'statusCode': 200,
'headers': ACCESS_HEADERS,
'body': json.dumps(f'Hello from lambda0 None')
}
received = received.replace("\'","\"")
body = json.loads(received)
print(type(body))
print(body)
message = body['message']
print(event)
dynamodb = boto3.client('dynamodb')
sns = boto3.client('sns')
print(message)
response = store_visitor(message)
print(response)
visitor = query_danymoDB_DB2(dynamodb, message['faceId'])
new_OPT = make_and_store_opt(4)
sns_response = send_opt_sns(sns, new_OPT, visitor)
print(response)
return {
'statusCode': 200,
'headers': ACCESS_HEADERS,
'body': json.dumps('Successful submission')
}
| en | 0.358875 | # send sns messgae to visitor # TODO implement | 2.36989 | 2 |
apps/accounts/migrations/0002_add_more_fields.py | developersociety/commonslibrary | 4 | 6621323 | <filename>apps/accounts/migrations/0002_add_more_fields.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-06 10:56
from __future__ import unicode_literals
import django.contrib.postgres.fields.citext
from django.contrib.postgres.operations import CITextExtension
from django.db import migrations, models
import sorl.thumbnail.fields
import accounts.managers
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
CITextExtension(),
migrations.AlterField(
model_name='user',
name='email',
field=django.contrib.postgres.fields.citext.CIEmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterModelManagers(
name='user',
managers=[
('objects', accounts.managers.UserManager()),
],
),
migrations.RemoveField(
model_name='user',
name='username',
),
migrations.AddField(
model_name='user',
name='address',
field=models.TextField(blank=True, verbose_name='Work address'),
),
migrations.AddField(
model_name='user',
name='is_email_confirmed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(blank=True, max_length=32),
),
migrations.AddField(
model_name='user',
name='photo',
field=sorl.thumbnail.fields.ImageField(blank=True, upload_to='uploads/accounts/images/%Y/%m/%d', verbose_name='Profile picture'),
),
migrations.AlterField(
model_name='user',
name='email',
field=django.contrib.postgres.fields.citext.CIEmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(max_length=30, verbose_name='first name'),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(max_length=30, verbose_name='<NAME>'),
),
]
| <filename>apps/accounts/migrations/0002_add_more_fields.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-06 10:56
from __future__ import unicode_literals
import django.contrib.postgres.fields.citext
from django.contrib.postgres.operations import CITextExtension
from django.db import migrations, models
import sorl.thumbnail.fields
import accounts.managers
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
CITextExtension(),
migrations.AlterField(
model_name='user',
name='email',
field=django.contrib.postgres.fields.citext.CIEmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterModelManagers(
name='user',
managers=[
('objects', accounts.managers.UserManager()),
],
),
migrations.RemoveField(
model_name='user',
name='username',
),
migrations.AddField(
model_name='user',
name='address',
field=models.TextField(blank=True, verbose_name='Work address'),
),
migrations.AddField(
model_name='user',
name='is_email_confirmed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(blank=True, max_length=32),
),
migrations.AddField(
model_name='user',
name='photo',
field=sorl.thumbnail.fields.ImageField(blank=True, upload_to='uploads/accounts/images/%Y/%m/%d', verbose_name='Profile picture'),
),
migrations.AlterField(
model_name='user',
name='email',
field=django.contrib.postgres.fields.citext.CIEmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(max_length=30, verbose_name='first name'),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(max_length=30, verbose_name='<NAME>'),
),
]
| en | 0.632678 | # -*- coding: utf-8 -*- # Generated by Django 1.11.10 on 2018-03-06 10:56 | 1.498566 | 1 |
virustotal_python/__init__.py | smk762/virustotal-python | 0 | 6621324 | from virustotal_python.virustotal import Virustotal
from virustotal_python.virustotal import VirustotalError
name = "virustotal-python"
| from virustotal_python.virustotal import Virustotal
from virustotal_python.virustotal import VirustotalError
name = "virustotal-python"
| none | 1 | 1.204957 | 1 | |
part2/prob_1.py | hasanmansur/drishtipat | 0 | 6621325 | <filename>part2/prob_1.py<gh_stars>0
import cv2
import math
import numpy as np
from matplotlib import pyplot as plt
from tkinter import filedialog
from tkinter import *
def on_mouse_over(event, x, y, flags, param):
global img
global dashboard
if event == cv2.cv2.EVENT_MOUSEMOVE:
img_reset()
cv2.rectangle(img,(x-6, y-6), (x+6, y+6),(0,0,255),1)
intensity = sum(img[y][x])/3
window = img[y-5:y+5, x-5:x+5]
#print(img.shape)
x0 = 0
xn = img.shape[1] - 1
y0 = 0
yn = img.shape[0] - 1
if (x-5 < x0 or x+5 > xn or y-5 < y0 or y+5 > yn):
txt = "window out of boundary"
str_mean = "mean: {}".format(txt)
str_std = "standard deviation: {}".format(txt)
else:
mean, std = cv2.meanStdDev(window)
str_mean = "window mean: " + "\n" + "R:{}, G:{}, B:{}".format(mean[2][0],mean[1][0], mean[0][0])
str_std = "window standard deviation: " + "\n" + "R:{}, G:{}, B:{}".format(std[2][0],std[1][0], std[0][0])
str_coordinates = "x:{}, y:{}".format(x,y)
str_rgb = "R:{},G:{},B:{}".format(img[y][x][2], img[y][x][1], img[y][x][0])
str_intesity = "intensity:{}".format(sum(img[y][x])/3)
#str_mean = "mean: R:{} G:{} B:{}".format(mean[2],mean[1], mean[0])
#str_std = "standard deviation:" + "\n" + "R:{} G:{} B:{}".format(std[2],std[1], std[0])
output_str = str_coordinates + "\n" + str_rgb + "\n" + str_intesity + "\n" + str_mean + "\n" + str_std
y0, dy = 50, 50
for i, line in enumerate(output_str.split('\n')):
y = y0 + i*dy
cv2.putText(dashboard, str(line), (20, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1)
def img_reset():
global img
global dashboard
global filename
#img = cv2.imread("testimage.png")
img = cv2.imread(filename)
dashboard = np.full((600,1200), 255, dtype='uint8')
cv2.imshow("dashboard", dashboard)
cv2.imshow('image', img)
def channel_histogram():
global img
color = ('b', 'g', 'r')
for i, col in enumerate(color):
histr = cv2.calcHist([img], [i], None, [256], [0, 256])
plt.plot(histr, color = col)
plt.xlim([0, 256])
plt.title("Color Channels Histogram")
plt.show(block=False)
def main():
global img
global dashboard
global filename
#img = cv2.imread("testimage.png")
root = Tk()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("jpeg files","*.jpg"),("png files","*.png"),("all files","*.*")))
filename = root.filename
img = cv2.imread(filename)
dashboard = np.full((600,1200), 255, dtype='uint8')
channel_histogram()
cv2.namedWindow("image")
cv2.setMouseCallback("image",on_mouse_over)
while(1):
cv2.imshow("image", img)
cv2.imshow("dashboard", dashboard)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
main()
| <filename>part2/prob_1.py<gh_stars>0
import cv2
import math
import numpy as np
from matplotlib import pyplot as plt
from tkinter import filedialog
from tkinter import *
def on_mouse_over(event, x, y, flags, param):
global img
global dashboard
if event == cv2.cv2.EVENT_MOUSEMOVE:
img_reset()
cv2.rectangle(img,(x-6, y-6), (x+6, y+6),(0,0,255),1)
intensity = sum(img[y][x])/3
window = img[y-5:y+5, x-5:x+5]
#print(img.shape)
x0 = 0
xn = img.shape[1] - 1
y0 = 0
yn = img.shape[0] - 1
if (x-5 < x0 or x+5 > xn or y-5 < y0 or y+5 > yn):
txt = "window out of boundary"
str_mean = "mean: {}".format(txt)
str_std = "standard deviation: {}".format(txt)
else:
mean, std = cv2.meanStdDev(window)
str_mean = "window mean: " + "\n" + "R:{}, G:{}, B:{}".format(mean[2][0],mean[1][0], mean[0][0])
str_std = "window standard deviation: " + "\n" + "R:{}, G:{}, B:{}".format(std[2][0],std[1][0], std[0][0])
str_coordinates = "x:{}, y:{}".format(x,y)
str_rgb = "R:{},G:{},B:{}".format(img[y][x][2], img[y][x][1], img[y][x][0])
str_intesity = "intensity:{}".format(sum(img[y][x])/3)
#str_mean = "mean: R:{} G:{} B:{}".format(mean[2],mean[1], mean[0])
#str_std = "standard deviation:" + "\n" + "R:{} G:{} B:{}".format(std[2],std[1], std[0])
output_str = str_coordinates + "\n" + str_rgb + "\n" + str_intesity + "\n" + str_mean + "\n" + str_std
y0, dy = 50, 50
for i, line in enumerate(output_str.split('\n')):
y = y0 + i*dy
cv2.putText(dashboard, str(line), (20, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1)
def img_reset():
global img
global dashboard
global filename
#img = cv2.imread("testimage.png")
img = cv2.imread(filename)
dashboard = np.full((600,1200), 255, dtype='uint8')
cv2.imshow("dashboard", dashboard)
cv2.imshow('image', img)
def channel_histogram():
global img
color = ('b', 'g', 'r')
for i, col in enumerate(color):
histr = cv2.calcHist([img], [i], None, [256], [0, 256])
plt.plot(histr, color = col)
plt.xlim([0, 256])
plt.title("Color Channels Histogram")
plt.show(block=False)
def main():
global img
global dashboard
global filename
#img = cv2.imread("testimage.png")
root = Tk()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("jpeg files","*.jpg"),("png files","*.png"),("all files","*.*")))
filename = root.filename
img = cv2.imread(filename)
dashboard = np.full((600,1200), 255, dtype='uint8')
channel_histogram()
cv2.namedWindow("image")
cv2.setMouseCallback("image",on_mouse_over)
while(1):
cv2.imshow("image", img)
cv2.imshow("dashboard", dashboard)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
main()
| en | 0.165426 | #print(img.shape) #str_mean = "mean: R:{} G:{} B:{}".format(mean[2],mean[1], mean[0]) #str_std = "standard deviation:" + "\n" + "R:{} G:{} B:{}".format(std[2],std[1], std[0]) #img = cv2.imread("testimage.png") #img = cv2.imread("testimage.png") | 2.777178 | 3 |
bin/db_build_dict.py | donalm/obstructx | 0 | 6621326 | #!/usr/bin/env python
import sys
import json
import argparse
from twisted.python import log
from collections import OrderedDict
appname = "obstructx"
from obstructx import config
config.Config.init()
from obstructx.log import get_logger
logger = get_logger(appname)
logger.error("OBSTRUCTX DATABASE SCHEMA TRAWLER")
from twisted.internet import reactor
from twisted.internet import defer
from obstructx import log
from obstructx import db_build_dict
logger = log.get_logger()
def eb(f):
logger.error(f.getBriefTraceback())
def stop(x, database_name, json_filename):
reactor.stop()
data = json.dumps(db_build_dict.Inquisitor.data[database_name], indent=4, sort_keys=True)
fh = open(json_filename, "w")
fh.write(data)
fh.close()
print("JSON file created at " + json_filename)
def main(database_name, json_filename):
inquisitor = db_build_dict.Inquisitor(database_name)
df = inquisitor.get_database_metadata()
df.addErrback(eb)
df.addBoth(stop, database_name, json_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Parse a PostgreSQL database schema into JSON.',
usage="db_build_dict [-h] DATABASE"
)
parser.add_argument('database_name', metavar="DATABASE", type=str,
help='The name of the database')
parser.add_argument('json_filename', metavar="FILEPATH", type=str,
help='A path to write the JSON file')
args = parser.parse_args()
reactor.callWhenRunning(main, args.database_name, args.json_filename)
reactor.run()
| #!/usr/bin/env python
import sys
import json
import argparse
from twisted.python import log
from collections import OrderedDict
appname = "obstructx"
from obstructx import config
config.Config.init()
from obstructx.log import get_logger
logger = get_logger(appname)
logger.error("OBSTRUCTX DATABASE SCHEMA TRAWLER")
from twisted.internet import reactor
from twisted.internet import defer
from obstructx import log
from obstructx import db_build_dict
logger = log.get_logger()
def eb(f):
logger.error(f.getBriefTraceback())
def stop(x, database_name, json_filename):
reactor.stop()
data = json.dumps(db_build_dict.Inquisitor.data[database_name], indent=4, sort_keys=True)
fh = open(json_filename, "w")
fh.write(data)
fh.close()
print("JSON file created at " + json_filename)
def main(database_name, json_filename):
inquisitor = db_build_dict.Inquisitor(database_name)
df = inquisitor.get_database_metadata()
df.addErrback(eb)
df.addBoth(stop, database_name, json_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Parse a PostgreSQL database schema into JSON.',
usage="db_build_dict [-h] DATABASE"
)
parser.add_argument('database_name', metavar="DATABASE", type=str,
help='The name of the database')
parser.add_argument('json_filename', metavar="FILEPATH", type=str,
help='A path to write the JSON file')
args = parser.parse_args()
reactor.callWhenRunning(main, args.database_name, args.json_filename)
reactor.run()
| ru | 0.26433 | #!/usr/bin/env python | 2.3456 | 2 |
explore_medium/sorting_and_searching/SortColors.py | niefy/LeetCodeExam | 0 | 6621327 | """
https://leetcode-cn.com/explore/interview/card/top-interview-questions-medium/50/sorting-and-searching/96/
题目:颜色分类
给定一个包含红色、白色和蓝色,一共 n 个元素的数组,原地对它们进行排序,使得相同颜色的元素相邻,并按照红色、白色、蓝色顺序排列。
此题中,我们使用整数 0、 1 和 2 分别表示红色、白色和蓝色。
注意:
不能使用代码库中的排序函数来解决这道题。
示例:
输入: [2,0,2,1,1,0]
输出: [0,0,1,1,2,2]
进阶:
一个直观的解决方案是使用计数排序的两趟扫描算法。
首先,迭代计算出0、1 和 2 元素的个数,然后按照0、1、2的排序,重写当前数组。
你能想出一个仅使用常数空间的一趟扫描算法吗?
@author Niefy
@date 2018-12-12
"""
class Solution:
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
count=[0,0,0]
for k in nums:
count[k]+=1
nums[:]=[0]*count[0]+[1]*count[1]+[2]*count[2]
#测试代码
t=Solution()
nums1=[0,1]
t.sortColors(nums1)
print(nums1)
nums2=[2,0,2,1,1,0]
t.sortColors(nums2)
print(nums2)
| """
https://leetcode-cn.com/explore/interview/card/top-interview-questions-medium/50/sorting-and-searching/96/
题目:颜色分类
给定一个包含红色、白色和蓝色,一共 n 个元素的数组,原地对它们进行排序,使得相同颜色的元素相邻,并按照红色、白色、蓝色顺序排列。
此题中,我们使用整数 0、 1 和 2 分别表示红色、白色和蓝色。
注意:
不能使用代码库中的排序函数来解决这道题。
示例:
输入: [2,0,2,1,1,0]
输出: [0,0,1,1,2,2]
进阶:
一个直观的解决方案是使用计数排序的两趟扫描算法。
首先,迭代计算出0、1 和 2 元素的个数,然后按照0、1、2的排序,重写当前数组。
你能想出一个仅使用常数空间的一趟扫描算法吗?
@author Niefy
@date 2018-12-12
"""
class Solution:
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
count=[0,0,0]
for k in nums:
count[k]+=1
nums[:]=[0]*count[0]+[1]*count[1]+[2]*count[2]
#测试代码
t=Solution()
nums1=[0,1]
t.sortColors(nums1)
print(nums1)
nums2=[2,0,2,1,1,0]
t.sortColors(nums2)
print(nums2)
| zh | 0.961477 | https://leetcode-cn.com/explore/interview/card/top-interview-questions-medium/50/sorting-and-searching/96/ 题目:颜色分类 给定一个包含红色、白色和蓝色,一共 n 个元素的数组,原地对它们进行排序,使得相同颜色的元素相邻,并按照红色、白色、蓝色顺序排列。 此题中,我们使用整数 0、 1 和 2 分别表示红色、白色和蓝色。 注意: 不能使用代码库中的排序函数来解决这道题。 示例: 输入: [2,0,2,1,1,0] 输出: [0,0,1,1,2,2] 进阶: 一个直观的解决方案是使用计数排序的两趟扫描算法。 首先,迭代计算出0、1 和 2 元素的个数,然后按照0、1、2的排序,重写当前数组。 你能想出一个仅使用常数空间的一趟扫描算法吗? @author Niefy @date 2018-12-12 :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. #测试代码 | 4.25224 | 4 |
FH_Assignment.py | FHealy90/EGM722_Assignment | 0 | 6621328 | # The print function allows us to print messages and information to the screen
print ( "Hello and welcome to my assignment for EGM722 - Programming for GIS and Remote Sensing"
"Designated Sites such as Special Areas of Conservation (SAC) 'and' Special Protection Areas"
"ensure the adequate conservation of habitats where many of our plants and animals live."
"The following code will review SAC and SPA data" )
# First import geopandas and load the data:
import geopandas as gpd
sac_data = gpd.read_file (
'C:\EGM_722\egm722\project\data_files/sac_ITM.shp' ) # you will need to create your own file path here
print ( sac_data.head () )
spa_data = gpd.read_file (
'C:\EGM_722\egm722\project\data_files/spa_ITM.shp' ) # you will need to create your own file path here
print ( spa_data.head () )
# The data is stored in a table (a GeoDataFrame), much like the attribute table in ArcMap.
# Next, you can discover how many rows of each feature there is.
# This will display the numbers of SACs and SPAs in Northern Ireland
rows, cols = sac_data.shape # get the number of rows in the table,
# this gives you the count of the SAC features in Northern Ireland
print ( 'Number of SAC features: {}'.format ( rows ) )
rows, cols = spa_data.shape # get the number of rows in the table,
# this gives you the count of the SPA features in Northern Ireland
print ( 'Number of SPA features: {}'.format ( rows ) )
# _______________________________________________________________________________________________________________________
# Convert csv file to shapefiles. Here Historical Land Use for Northern Ireland will be investigated and
# converted into a shapefile
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
import cartopy.crs as ccrs
df = pd.read_csv ( 'C:\EGM_722\egm722\Project\Data_Files\Historical_Landuse_Dataset.csv' )
df.head () # this will let you look at the loaded DataFrame
# You only have point information (a single Lat/Lon coordinate) for each land use,
# so it makes sense to create a Point object for each feature using that point.
# Do this by first using the python built-in zip,
# then the apply method of the DataFrame to create a point object from the list of coordinates.
df['geometry'] = list ( zip ( df['x'], df['y'] ) ) # Zip is an iterator, so use list to create
# something that pandas can use.
df['geometry'] = df['geometry'].apply ( Point ) # using the 'apply' method of the dataframe,
# turn the coordinates column
# into points using the x, y coordinates
gdf = gpd.GeoDataFrame ( df )
gdf.set_crs ( "EPSG:2157", inplace=True ) # This sets the coordinate reference system to epsg:2157,
# Irish Transverse Mercator lat/lon
print ( gdf )
gdf.to_file ( 'Historical_Landuse_Dataset.shp' )
# Writes the csv into to a shapefile
# _____________________________________________________________________________________________________________
# This allows the use of figures interactively
import geopandas as gpd
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from cartopy.feature import ShapelyFeature
plt.ion () # make the plotting interactive
# generate matplotlib handles to create a legend of the features we put in our map.
def generate_handles (labels, colors, edge='k', alpha=1):
lc = len ( colors ) # get the length of the color list
handles = []
for i in range ( len ( labels ) ):
handles.append ( mpatches.Rectangle ( (0, 0), 1, 1, facecolor=colors[i % lc], edgecolor=edge, alpha=alpha ) )
return handles
# create a scale bar of length 20 km in the upper right corner of the map
def scale_bar (ax, location=(0.92, 0.95)):
llx0, llx1, lly0, lly1 = ax.get_extent ( ccrs.PlateCarree () )
sbllx = (llx1 + llx0) / 2
sblly = lly0 + (lly1 - lly0) * location[1]
tmc = ccrs.TransverseMercator ( sbllx, sblly )
x0, x1, y0, y1 = ax.get_extent ( tmc )
sbx = x0 + (x1 - x0) * location[0]
sby = y0 + (y1 - y0) * location[1]
plt.plot ( [sbx, sbx - 20000], [sby, sby], color='k', linewidth=9, transform=tmc )
plt.plot ( [sbx, sbx - 10000], [sby, sby], color='k', linewidth=6, transform=tmc )
plt.plot ( [sbx - 10000, sbx - 20000], [sby, sby], color='w', linewidth=6, transform=tmc )
plt.text ( sbx, sby - 4500, '20 km', transform=tmc, fontsize=8 )
plt.text ( sbx - 12500, sby - 4500, '10 km', transform=tmc, fontsize=8 )
plt.text ( sbx - 24500, sby - 4500, '0 km', transform=tmc, fontsize=8 )
# Most of the modules are now imported and a few helper functions defined,
# Now load the data. To load the shapefile data, use GeoPandas, an open-source package designed
# to make working with geospatial data in python easier
# load the datasets
outline = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/NI_outline.shp' )
towns = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/Towns.shp' )
water = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/Water.shp' )
rivers = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/Rivers.shp' )
counties = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/Counties.shp' )
SACs = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/sac_ITM.shp' )
SPAs = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/spa_ITM.shp' )
# Create a figure of size 10x10 (representing the page size in inches)
myFig = plt.figure ( figsize=(10, 10) )
myCRS = ccrs.UTM ( 29 ) # Create a Universal Transverse Mercator reference system
ax = plt.axes ( projection=ccrs.Mercator () ) # Creates an axes object in the figure, using a Mercator
# projection, where that data will be plotted.
# Add the outline of Northern Ireland using cartopy's ShapelyFeature
outline_feature = ShapelyFeature ( outline['geometry'], myCRS, edgecolor='k', facecolor='w' )
xmin, ymin, xmax, ymax = outline.total_bounds
ax.add_feature ( outline_feature ) # add the features we've created to the map.
# using the boundary of the shapefile features, zoom the map to our area of interest
ax.set_extent ( [xmin, xmax, ymin, ymax], crs=myCRS ) # because total_bounds gives output as xmin, ymin, xmax, ymax,
# Here, set the edge color to be the same as the face color.
water_feat = ShapelyFeature ( water['geometry'], myCRS,
edgecolor='mediumblue',
facecolor='mediumblue',
linewidth=1 )
ax.add_feature ( water_feat )
river_feat = ShapelyFeature ( rivers['geometry'], myCRS,
edgecolor='royalblue',
linewidth=0.2 )
ax.add_feature ( river_feat )
SACs_feat = ShapelyFeature ( SACs['geometry'], myCRS,
edgecolor='darkorchid',
facecolor='darkorchid',
linewidth=0.5 )
ax.add_feature ( SACs_feat )
SPAs_feat = ShapelyFeature ( SPAs['geometry'], myCRS,
edgecolor='fuchsia',
facecolor='fuchsia',
linewidth=0.5 )
ax.add_feature ( SPAs_feat )
# ShapelyFeature creates a polygon, so for point data we can just use ax.plot()
myFig # to show the updated figure
town_handle = ax.plot ( towns.geometry.x, towns.geometry.y, 's', color='0.5', ms=3, transform=myCRS )
# note: if you change the color you use to display lakes, you'll want to change it here, too
water_handle = generate_handles ( ['Lakes'], ['mediumblue'] )
# note: if you change the color you use to display rivers, you'll want to change it here, too
river_handle = [mlines.Line2D ( [], [], color='royalblue' )] # have to make this a list
# get a list of unique names for the county boundaries
county_names = list ( counties.CountyName.unique () )
county_names.sort () # sort the counties alphabetically by name
# update county_names to take it out of uppercase text
nice_names = [name.title () for name in county_names]
# generate a list of handles for the county datasets
county_colors = ['k']
county_handles = generate_handles ( counties.CountyName.unique (), county_colors, alpha=0.25 )
# generate handles for SPAs
spa_handle = [mlines.Line2D ( [], [], color='fuchsia' )]
sac_handle = [mlines.Line2D ( [], [], color='orchid' )]
# ax.legend() takes a list of handles and a list of labels corresponding to the objects you want to add to the legend
handles = county_handles + water_handle + river_handle + town_handle + sac_handle + spa_handle
labels = nice_names + ['Lakes', 'Rivers', 'Towns', 'Special Areas of Conservation', 'Special Protection Areas']
leg = ax.legend ( handles, labels, title='Legend', title_fontsize=4,
fontsize=2, loc='upper left', frameon=True, framealpha=1 )
gridlines = ax.gridlines ( draw_labels=True,
xlocs=[-8, -7.5, -7, -6.5, -6, -5.5],
ylocs=[54, 54.5, 55, 55.5] )
gridlines.left_labels = False
gridlines.bottom_labels = False
ax.set_extent ( [xmin, xmax, ymin, ymax], crs=myCRS )
# add the text labels for the towns
for i, row in towns.iterrows ():
x, y = row.geometry.x, row.geometry.y
plt.text ( x, y, row['TOWN_NAME'].title (), fontsize=4, transform=myCRS ) # use plt.text to place a label at x,y
myFig.savefig ( 'map.png', bbox_inches='tight', dpi=300 )
# _____________________________________________________________________________________________________
# You need to get the conifer forestry from the raster layer
# and convert it to a shapefile as there is no shapefile data
# avaialable for forestry in Northern Ireland
import rasterio as rio
import matplotlib.pyplot as plt
plt.rcParams.update ( {'font.size': 22} ) # update the font size for our plots to be size 22
# open the land cover raster and read the data
with rio.open ( 'C:\EGM_722\egm722\Week5\data_files/LCM2015_Aggregate_100m.tif' ) as dataset:
xmin, ymin, xmax, ymax = dataset.bounds
crs = dataset.crs
landcover = dataset.read ( 1 )
affine_tfm = dataset.transform
# Polygonize a raster using Geospatial Data Abstraction Library (GDAL)
from osgeo import gdal, ogr
import sys
# This allows GDAL to throw Python Exceptions
gdal.UseExceptions ()
# Get raster datasource
src = 'src_LCM2015_Aggregate_100m.tiff'
src_ds = gdal.Open ( "C:\EGM_722\egm722\Project\Data_Files\LCM2015_Aggregate_100m.tif" )
if src_ds is None:
print ( 'Unable to open {}'.format ( 'src_filename' ) )
sys.exit ( 1 )
try:
srcband = src_ds.GetRasterBand ( 3 )
except RuntimeError as e:
# for example, try GetRasterBand(2)
print ( 'Band ( %i ) not found' )
print ( e )
sys.exit ( 1 )
# Create output datasource
dst_layername = "C:\EGM_722\egm722\Project\Data_Files\Conifer_Forest_Polygonized"
drv = ogr.GetDriverByName ( "ESRI Shapefile" )
dst_ds = drv.CreateDataSource ( dst_layername + "Conifer_Forest_Polygonized.shp" )
dst_layer = dst_ds.CreateLayer ( dst_layername, srs=None )
gdal.Polygonize ( srcband, None, dst_layer, -1, [], callback=None )
# _____________________________________________________________________________________________________________
# Create a buffer from polygonized features
import ogr, os
def createBuffer (inputfn, outputBufferfn, bufferDist):
inputds = ogr.Open ( inputfn )
inputlyr = inputds.GetLayer ()
shpdriver = ogr.GetDriverByName ( 'C:\EGM_722\egm722\Project\Data_Files\Conifer_Forest_Polygonized.shp' )
if os.path.exists ( outputBufferfn ):
shpdriver.DeleteDataSource ( outputBufferfn )
outputBufferds = shpdriver.CreateDataSource ( outputBufferfn )
bufferlyr = outputBufferds.CreateLayer ( outputBufferfn, geom_type=ogr.wkbPolygon )
featureDefn = bufferlyr.GetLayerDefn ()
for feature in inputlyr:
ingeom = feature.GetGeometryRef ()
geomBuffer = ingeom.Buffer ( bufferDist )
outFeature = ogr.Feature ( featureDefn )
outFeature.SetGeometry ( geomBuffer )
bufferlyr.CreateFeature ( outFeature )
outFeature = None
def main (inputfn, outputBufferfn, bufferDist):
createBuffer ( inputfn, outputBufferfn, bufferDist )
if __name__ == "__Conifer Forest__":
inputfn = 'Conifer_Forest_Polygonied.shp'
outputBufferfn = '3km_Conifer_Forest_Polygonied.shp'
bufferDist = 3000.0
main ( inputfn, outputBufferfn, bufferDist )
# _____________________________________________________________________
# Select SACs and SPAs that are located within 3km buffer from coniferous forest
import numpy as np
from matplotlib.widgets import PolygonSelector
from matplotlib.path import Path
class SelectFromCollection:
"""
Select indices from a matplotlib collection using `PolygonSelector`.
Selected indices are saved in the `ind` attribute. This tool fades out the
polygons that are not part of the selection (i.e., reduces their alpha
values). If your collection has alpha < 1, this tool will permanently
alter the alpha values.
Note that this tool selects collection objects based on their *origins*
(i.e., `offsets`).
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes to interact with.
collection : `matplotlib.collections.Collection` subclass
Collection you want to select from.
alpha_other : 0 <= float <= 1
To highlight a selection, this tool sets all selected polygons to an
alpha value of 1 and non-selected points to *alpha_other*.
"""
def __init__ (self, ax, collection, alpha_other=0.3):
self.canvas = ax.figure.canvas
self.collection = collection
self.alpha_other = alpha_other
self.xys = collection.get_offsets ()
self.Npts = len ( self.xys )
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors ()
if len ( self.fc ) == 0:
raise ValueError ( 'Collection must have a facecolor' )
elif len ( self.fc ) == 1:
self.fc = np.tile ( self.fc, (self.Npts, 1) )
self.poly = PolygonSelector ( ax, self.onselect )
self.ind = []
def onselect (self, verts):
path = Path ( verts )
self.ind = np.nonzero ( path.contains_points ( self.xys ) )[0]
self.fc[:, -1] = self.alpha_other
self.fc[self.ind, -1] = 1
self.collection.set_facecolors ( self.fc )
self.canvas.draw_idle ()
def disconnect (self):
self.poly.disconnect_events ()
self.fc[:, -1] = 1
self.collection.set_facecolors ( self.fc )
self.canvas.draw_idle ()
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig, ax = plt.subplots ()
grid_size = 5
grid_x = np.tile ( np.arange ( grid_size ), grid_size )
grid_y = np.repeat ( np.arange ( grid_size ), grid_size )
pts = ax.scatter ( grid_x, grid_y )
selector = SelectFromCollection ( ax, pts )
print ( "Select polygons in the figure by enclosing them within a polygon." )
print ( "Press the 'esc' key to start a new polygon." )
print ( "Try holding the 'shift' key to move all of the vertices." )
print ( "Try holding the 'ctrl' key to move a single vertex." )
plt.show ()
selector.disconnect ()
# Congratulations, you are now finished coding________________________________________________________________________________________
| # The print function allows us to print messages and information to the screen
print ( "Hello and welcome to my assignment for EGM722 - Programming for GIS and Remote Sensing"
"Designated Sites such as Special Areas of Conservation (SAC) 'and' Special Protection Areas"
"ensure the adequate conservation of habitats where many of our plants and animals live."
"The following code will review SAC and SPA data" )
# First import geopandas and load the data:
import geopandas as gpd
sac_data = gpd.read_file (
'C:\EGM_722\egm722\project\data_files/sac_ITM.shp' ) # you will need to create your own file path here
print ( sac_data.head () )
spa_data = gpd.read_file (
'C:\EGM_722\egm722\project\data_files/spa_ITM.shp' ) # you will need to create your own file path here
print ( spa_data.head () )
# The data is stored in a table (a GeoDataFrame), much like the attribute table in ArcMap.
# Next, you can discover how many rows of each feature there is.
# This will display the numbers of SACs and SPAs in Northern Ireland
rows, cols = sac_data.shape # get the number of rows in the table,
# this gives you the count of the SAC features in Northern Ireland
print ( 'Number of SAC features: {}'.format ( rows ) )
rows, cols = spa_data.shape # get the number of rows in the table,
# this gives you the count of the SPA features in Northern Ireland
print ( 'Number of SPA features: {}'.format ( rows ) )
# _______________________________________________________________________________________________________________________
# Convert csv file to shapefiles. Here Historical Land Use for Northern Ireland will be investigated and
# converted into a shapefile
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
import cartopy.crs as ccrs
df = pd.read_csv ( 'C:\EGM_722\egm722\Project\Data_Files\Historical_Landuse_Dataset.csv' )
df.head () # this will let you look at the loaded DataFrame
# You only have point information (a single Lat/Lon coordinate) for each land use,
# so it makes sense to create a Point object for each feature using that point.
# Do this by first using the python built-in zip,
# then the apply method of the DataFrame to create a point object from the list of coordinates.
df['geometry'] = list ( zip ( df['x'], df['y'] ) ) # Zip is an iterator, so use list to create
# something that pandas can use.
df['geometry'] = df['geometry'].apply ( Point ) # using the 'apply' method of the dataframe,
# turn the coordinates column
# into points using the x, y coordinates
gdf = gpd.GeoDataFrame ( df )
gdf.set_crs ( "EPSG:2157", inplace=True ) # This sets the coordinate reference system to epsg:2157,
# Irish Transverse Mercator lat/lon
print ( gdf )
gdf.to_file ( 'Historical_Landuse_Dataset.shp' )
# Writes the csv into to a shapefile
# _____________________________________________________________________________________________________________
# This allows the use of figures interactively
import geopandas as gpd
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from cartopy.feature import ShapelyFeature
plt.ion () # make the plotting interactive
# generate matplotlib handles to create a legend of the features we put in our map.
def generate_handles (labels, colors, edge='k', alpha=1):
lc = len ( colors ) # get the length of the color list
handles = []
for i in range ( len ( labels ) ):
handles.append ( mpatches.Rectangle ( (0, 0), 1, 1, facecolor=colors[i % lc], edgecolor=edge, alpha=alpha ) )
return handles
# create a scale bar of length 20 km in the upper right corner of the map
def scale_bar (ax, location=(0.92, 0.95)):
llx0, llx1, lly0, lly1 = ax.get_extent ( ccrs.PlateCarree () )
sbllx = (llx1 + llx0) / 2
sblly = lly0 + (lly1 - lly0) * location[1]
tmc = ccrs.TransverseMercator ( sbllx, sblly )
x0, x1, y0, y1 = ax.get_extent ( tmc )
sbx = x0 + (x1 - x0) * location[0]
sby = y0 + (y1 - y0) * location[1]
plt.plot ( [sbx, sbx - 20000], [sby, sby], color='k', linewidth=9, transform=tmc )
plt.plot ( [sbx, sbx - 10000], [sby, sby], color='k', linewidth=6, transform=tmc )
plt.plot ( [sbx - 10000, sbx - 20000], [sby, sby], color='w', linewidth=6, transform=tmc )
plt.text ( sbx, sby - 4500, '20 km', transform=tmc, fontsize=8 )
plt.text ( sbx - 12500, sby - 4500, '10 km', transform=tmc, fontsize=8 )
plt.text ( sbx - 24500, sby - 4500, '0 km', transform=tmc, fontsize=8 )
# Most of the modules are now imported and a few helper functions defined,
# Now load the data. To load the shapefile data, use GeoPandas, an open-source package designed
# to make working with geospatial data in python easier
# load the datasets
outline = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/NI_outline.shp' )
towns = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/Towns.shp' )
water = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/Water.shp' )
rivers = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/Rivers.shp' )
counties = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/Counties.shp' )
SACs = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/sac_ITM.shp' )
SPAs = gpd.read_file ( 'C:\EGM_722\egm722\Project\data_files/spa_ITM.shp' )
# Create a figure of size 10x10 (representing the page size in inches)
myFig = plt.figure ( figsize=(10, 10) )
myCRS = ccrs.UTM ( 29 ) # Create a Universal Transverse Mercator reference system
ax = plt.axes ( projection=ccrs.Mercator () ) # Creates an axes object in the figure, using a Mercator
# projection, where that data will be plotted.
# Add the outline of Northern Ireland using cartopy's ShapelyFeature
outline_feature = ShapelyFeature ( outline['geometry'], myCRS, edgecolor='k', facecolor='w' )
xmin, ymin, xmax, ymax = outline.total_bounds
ax.add_feature ( outline_feature ) # add the features we've created to the map.
# using the boundary of the shapefile features, zoom the map to our area of interest
ax.set_extent ( [xmin, xmax, ymin, ymax], crs=myCRS ) # because total_bounds gives output as xmin, ymin, xmax, ymax,
# Here, set the edge color to be the same as the face color.
water_feat = ShapelyFeature ( water['geometry'], myCRS,
edgecolor='mediumblue',
facecolor='mediumblue',
linewidth=1 )
ax.add_feature ( water_feat )
river_feat = ShapelyFeature ( rivers['geometry'], myCRS,
edgecolor='royalblue',
linewidth=0.2 )
ax.add_feature ( river_feat )
SACs_feat = ShapelyFeature ( SACs['geometry'], myCRS,
edgecolor='darkorchid',
facecolor='darkorchid',
linewidth=0.5 )
ax.add_feature ( SACs_feat )
SPAs_feat = ShapelyFeature ( SPAs['geometry'], myCRS,
edgecolor='fuchsia',
facecolor='fuchsia',
linewidth=0.5 )
ax.add_feature ( SPAs_feat )
# ShapelyFeature creates a polygon, so for point data we can just use ax.plot()
myFig # to show the updated figure
town_handle = ax.plot ( towns.geometry.x, towns.geometry.y, 's', color='0.5', ms=3, transform=myCRS )
# note: if you change the color you use to display lakes, you'll want to change it here, too
water_handle = generate_handles ( ['Lakes'], ['mediumblue'] )
# note: if you change the color you use to display rivers, you'll want to change it here, too
river_handle = [mlines.Line2D ( [], [], color='royalblue' )] # have to make this a list
# get a list of unique names for the county boundaries
county_names = list ( counties.CountyName.unique () )
county_names.sort () # sort the counties alphabetically by name
# update county_names to take it out of uppercase text
nice_names = [name.title () for name in county_names]
# generate a list of handles for the county datasets
county_colors = ['k']
county_handles = generate_handles ( counties.CountyName.unique (), county_colors, alpha=0.25 )
# generate handles for SPAs
spa_handle = [mlines.Line2D ( [], [], color='fuchsia' )]
sac_handle = [mlines.Line2D ( [], [], color='orchid' )]
# ax.legend() takes a list of handles and a list of labels corresponding to the objects you want to add to the legend
handles = county_handles + water_handle + river_handle + town_handle + sac_handle + spa_handle
labels = nice_names + ['Lakes', 'Rivers', 'Towns', 'Special Areas of Conservation', 'Special Protection Areas']
leg = ax.legend ( handles, labels, title='Legend', title_fontsize=4,
fontsize=2, loc='upper left', frameon=True, framealpha=1 )
gridlines = ax.gridlines ( draw_labels=True,
xlocs=[-8, -7.5, -7, -6.5, -6, -5.5],
ylocs=[54, 54.5, 55, 55.5] )
gridlines.left_labels = False
gridlines.bottom_labels = False
ax.set_extent ( [xmin, xmax, ymin, ymax], crs=myCRS )
# add the text labels for the towns
for i, row in towns.iterrows ():
x, y = row.geometry.x, row.geometry.y
plt.text ( x, y, row['TOWN_NAME'].title (), fontsize=4, transform=myCRS ) # use plt.text to place a label at x,y
myFig.savefig ( 'map.png', bbox_inches='tight', dpi=300 )
# _____________________________________________________________________________________________________
# You need to get the conifer forestry from the raster layer
# and convert it to a shapefile as there is no shapefile data
# avaialable for forestry in Northern Ireland
import rasterio as rio
import matplotlib.pyplot as plt
plt.rcParams.update ( {'font.size': 22} ) # update the font size for our plots to be size 22
# open the land cover raster and read the data
with rio.open ( 'C:\EGM_722\egm722\Week5\data_files/LCM2015_Aggregate_100m.tif' ) as dataset:
xmin, ymin, xmax, ymax = dataset.bounds
crs = dataset.crs
landcover = dataset.read ( 1 )
affine_tfm = dataset.transform
# Polygonize a raster using Geospatial Data Abstraction Library (GDAL)
from osgeo import gdal, ogr
import sys
# This allows GDAL to throw Python Exceptions
gdal.UseExceptions ()
# Get raster datasource
src = 'src_LCM2015_Aggregate_100m.tiff'
src_ds = gdal.Open ( "C:\EGM_722\egm722\Project\Data_Files\LCM2015_Aggregate_100m.tif" )
if src_ds is None:
print ( 'Unable to open {}'.format ( 'src_filename' ) )
sys.exit ( 1 )
try:
srcband = src_ds.GetRasterBand ( 3 )
except RuntimeError as e:
# for example, try GetRasterBand(2)
print ( 'Band ( %i ) not found' )
print ( e )
sys.exit ( 1 )
# Create output datasource
dst_layername = "C:\EGM_722\egm722\Project\Data_Files\Conifer_Forest_Polygonized"
drv = ogr.GetDriverByName ( "ESRI Shapefile" )
dst_ds = drv.CreateDataSource ( dst_layername + "Conifer_Forest_Polygonized.shp" )
dst_layer = dst_ds.CreateLayer ( dst_layername, srs=None )
gdal.Polygonize ( srcband, None, dst_layer, -1, [], callback=None )
# _____________________________________________________________________________________________________________
# Create a buffer from polygonized features
import ogr, os
def createBuffer (inputfn, outputBufferfn, bufferDist):
inputds = ogr.Open ( inputfn )
inputlyr = inputds.GetLayer ()
shpdriver = ogr.GetDriverByName ( 'C:\EGM_722\egm722\Project\Data_Files\Conifer_Forest_Polygonized.shp' )
if os.path.exists ( outputBufferfn ):
shpdriver.DeleteDataSource ( outputBufferfn )
outputBufferds = shpdriver.CreateDataSource ( outputBufferfn )
bufferlyr = outputBufferds.CreateLayer ( outputBufferfn, geom_type=ogr.wkbPolygon )
featureDefn = bufferlyr.GetLayerDefn ()
for feature in inputlyr:
ingeom = feature.GetGeometryRef ()
geomBuffer = ingeom.Buffer ( bufferDist )
outFeature = ogr.Feature ( featureDefn )
outFeature.SetGeometry ( geomBuffer )
bufferlyr.CreateFeature ( outFeature )
outFeature = None
def main (inputfn, outputBufferfn, bufferDist):
createBuffer ( inputfn, outputBufferfn, bufferDist )
if __name__ == "__Conifer Forest__":
inputfn = 'Conifer_Forest_Polygonied.shp'
outputBufferfn = '3km_Conifer_Forest_Polygonied.shp'
bufferDist = 3000.0
main ( inputfn, outputBufferfn, bufferDist )
# _____________________________________________________________________
# Select SACs and SPAs that are located within 3km buffer from coniferous forest
import numpy as np
from matplotlib.widgets import PolygonSelector
from matplotlib.path import Path
class SelectFromCollection:
"""
Select indices from a matplotlib collection using `PolygonSelector`.
Selected indices are saved in the `ind` attribute. This tool fades out the
polygons that are not part of the selection (i.e., reduces their alpha
values). If your collection has alpha < 1, this tool will permanently
alter the alpha values.
Note that this tool selects collection objects based on their *origins*
(i.e., `offsets`).
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes to interact with.
collection : `matplotlib.collections.Collection` subclass
Collection you want to select from.
alpha_other : 0 <= float <= 1
To highlight a selection, this tool sets all selected polygons to an
alpha value of 1 and non-selected points to *alpha_other*.
"""
def __init__ (self, ax, collection, alpha_other=0.3):
self.canvas = ax.figure.canvas
self.collection = collection
self.alpha_other = alpha_other
self.xys = collection.get_offsets ()
self.Npts = len ( self.xys )
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors ()
if len ( self.fc ) == 0:
raise ValueError ( 'Collection must have a facecolor' )
elif len ( self.fc ) == 1:
self.fc = np.tile ( self.fc, (self.Npts, 1) )
self.poly = PolygonSelector ( ax, self.onselect )
self.ind = []
def onselect (self, verts):
path = Path ( verts )
self.ind = np.nonzero ( path.contains_points ( self.xys ) )[0]
self.fc[:, -1] = self.alpha_other
self.fc[self.ind, -1] = 1
self.collection.set_facecolors ( self.fc )
self.canvas.draw_idle ()
def disconnect (self):
self.poly.disconnect_events ()
self.fc[:, -1] = 1
self.collection.set_facecolors ( self.fc )
self.canvas.draw_idle ()
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig, ax = plt.subplots ()
grid_size = 5
grid_x = np.tile ( np.arange ( grid_size ), grid_size )
grid_y = np.repeat ( np.arange ( grid_size ), grid_size )
pts = ax.scatter ( grid_x, grid_y )
selector = SelectFromCollection ( ax, pts )
print ( "Select polygons in the figure by enclosing them within a polygon." )
print ( "Press the 'esc' key to start a new polygon." )
print ( "Try holding the 'shift' key to move all of the vertices." )
print ( "Try holding the 'ctrl' key to move a single vertex." )
plt.show ()
selector.disconnect ()
# Congratulations, you are now finished coding________________________________________________________________________________________
| en | 0.753696 | # The print function allows us to print messages and information to the screen # First import geopandas and load the data: # you will need to create your own file path here # you will need to create your own file path here # The data is stored in a table (a GeoDataFrame), much like the attribute table in ArcMap. # Next, you can discover how many rows of each feature there is. # This will display the numbers of SACs and SPAs in Northern Ireland # get the number of rows in the table, # this gives you the count of the SAC features in Northern Ireland # get the number of rows in the table, # this gives you the count of the SPA features in Northern Ireland # _______________________________________________________________________________________________________________________ # Convert csv file to shapefiles. Here Historical Land Use for Northern Ireland will be investigated and # converted into a shapefile # this will let you look at the loaded DataFrame # You only have point information (a single Lat/Lon coordinate) for each land use, # so it makes sense to create a Point object for each feature using that point. # Do this by first using the python built-in zip, # then the apply method of the DataFrame to create a point object from the list of coordinates. # Zip is an iterator, so use list to create # something that pandas can use. # using the 'apply' method of the dataframe, # turn the coordinates column # into points using the x, y coordinates # This sets the coordinate reference system to epsg:2157, # Irish Transverse Mercator lat/lon # Writes the csv into to a shapefile # _____________________________________________________________________________________________________________ # This allows the use of figures interactively # make the plotting interactive # generate matplotlib handles to create a legend of the features we put in our map. # get the length of the color list # create a scale bar of length 20 km in the upper right corner of the map # Most of the modules are now imported and a few helper functions defined, # Now load the data. To load the shapefile data, use GeoPandas, an open-source package designed # to make working with geospatial data in python easier # load the datasets # Create a figure of size 10x10 (representing the page size in inches) # Create a Universal Transverse Mercator reference system # Creates an axes object in the figure, using a Mercator # projection, where that data will be plotted. # Add the outline of Northern Ireland using cartopy's ShapelyFeature # add the features we've created to the map. # using the boundary of the shapefile features, zoom the map to our area of interest # because total_bounds gives output as xmin, ymin, xmax, ymax, # Here, set the edge color to be the same as the face color. # ShapelyFeature creates a polygon, so for point data we can just use ax.plot() # to show the updated figure # note: if you change the color you use to display lakes, you'll want to change it here, too # note: if you change the color you use to display rivers, you'll want to change it here, too # have to make this a list # get a list of unique names for the county boundaries # sort the counties alphabetically by name # update county_names to take it out of uppercase text # generate a list of handles for the county datasets # generate handles for SPAs # ax.legend() takes a list of handles and a list of labels corresponding to the objects you want to add to the legend # add the text labels for the towns # use plt.text to place a label at x,y # _____________________________________________________________________________________________________ # You need to get the conifer forestry from the raster layer # and convert it to a shapefile as there is no shapefile data # avaialable for forestry in Northern Ireland # update the font size for our plots to be size 22 # open the land cover raster and read the data # Polygonize a raster using Geospatial Data Abstraction Library (GDAL) # This allows GDAL to throw Python Exceptions # Get raster datasource # for example, try GetRasterBand(2) # Create output datasource # _____________________________________________________________________________________________________________ # Create a buffer from polygonized features # _____________________________________________________________________ # Select SACs and SPAs that are located within 3km buffer from coniferous forest Select indices from a matplotlib collection using `PolygonSelector`. Selected indices are saved in the `ind` attribute. This tool fades out the polygons that are not part of the selection (i.e., reduces their alpha values). If your collection has alpha < 1, this tool will permanently alter the alpha values. Note that this tool selects collection objects based on their *origins* (i.e., `offsets`). Parameters ---------- ax : `~matplotlib.axes.Axes` Axes to interact with. collection : `matplotlib.collections.Collection` subclass Collection you want to select from. alpha_other : 0 <= float <= 1 To highlight a selection, this tool sets all selected polygons to an alpha value of 1 and non-selected points to *alpha_other*. # Ensure that we have separate colors for each object # Congratulations, you are now finished coding________________________________________________________________________________________ | 3.975613 | 4 |
misago/misago/users/online/tracker.py | vascoalramos/misago-deployment | 2 | 6621329 | <gh_stars>1-10
from django.utils import timezone
from rest_framework.request import Request
from ..models import Online
def mute_tracker(request):
request._misago_online_tracker = None
def start_tracking(request, user):
online_tracker = Online.objects.create(user=user)
request.user.online_tracker = online_tracker
request._misago_online_tracker = online_tracker
def update_tracker(request, tracker):
tracker.last_click = timezone.now()
tracker.save(update_fields=["last_click"])
def stop_tracking(request, tracker):
user = tracker.user
user.last_login = tracker.last_click
user.save(update_fields=["last_login"])
tracker.delete()
def clear_tracking(request):
if isinstance(request, Request):
request = request._request # Fugly unwrap restframework's request
request._misago_online_tracker = None
| from django.utils import timezone
from rest_framework.request import Request
from ..models import Online
def mute_tracker(request):
request._misago_online_tracker = None
def start_tracking(request, user):
online_tracker = Online.objects.create(user=user)
request.user.online_tracker = online_tracker
request._misago_online_tracker = online_tracker
def update_tracker(request, tracker):
tracker.last_click = timezone.now()
tracker.save(update_fields=["last_click"])
def stop_tracking(request, tracker):
user = tracker.user
user.last_login = tracker.last_click
user.save(update_fields=["last_login"])
tracker.delete()
def clear_tracking(request):
if isinstance(request, Request):
request = request._request # Fugly unwrap restframework's request
request._misago_online_tracker = None | en | 0.539981 | # Fugly unwrap restframework's request | 1.922247 | 2 |
bright/apigen/tests/test_cythongen.py | bright-dev/bright | 3 | 6621330 | <reponame>bright-dev/bright<gh_stars>1-10
from bright.apigen import typesystem as ts
from bright.apigen import cythongen as cg
from nose.tools import assert_equal
toaster_desc = {
'name': 'Toaster',
'header_filename': 'toaster.h',
'cpppxd_filename': 'cpp_toaster.pxd',
'namespace': 'bright',
'docstrings': {
'module': "I am the Toaster lib! Hear me sizzle!",
'class': "I am the Toaster! FORKS DO NOT GO IN ME!",
'attrs': {
'toastiness': "white as snow or black as hell?",
'rate': "The rate at which the toaster can process slices.",
},
'methods': {
'make_toast': "I'll make you some toast you can't refuse...",
},
},
'parents': ['FCComp'],
'attrs': {
'nslices': 'uint',
'toastiness': 'str',
'rate': 'float',
},
'methods': {
('Toaster',): None,
('~Toaster',): None,
('make_toast', ('when', 'str'), ('nslices', 'uint', 1)): 'int',
},
}
exp_cpppxd = cg.AUTOGEN_WARNING + \
"""from libcpp.string cimport string as std_string
from pyne cimport extra_types
cdef extern from "toaster.h" namespace "bright":
cdef cppclass Toaster(FCComp):
# constructors
Toaster() except +
~Toaster() except +
# attributes
extra_types.uint nslices
double rate
std_string toastiness
# methods
int make_toast(std_string) except +
int make_toast(std_string, extra_types.uint) except +
"""
def test_gencpppxd():
obs = cg.gencpppxd(toaster_desc).splitlines()
exp = exp_cpppxd.splitlines()
assert_equal(len(obs), len(exp))
for o, e in zip(obs, exp):
assert_equal(o, e)
exp_pxd = cg.AUTOGEN_WARNING + \
"""cimport cpp_toaster
cimport fccomp
cdef class Toaster(fccomp.FCComp):
cdef cpp_toaster.Toaster * _inst
cdef public bint _free_inst
"""
def test_genpxd():
ts.register_class('FCComp',
cython_c_type='cpp_fccomp.FCComp',
cython_cimport='cpp_fccomp',
cython_cy_type='fccomp.FCComp',
cython_cyimport='fccomp')
obs = cg.genpxd(toaster_desc).splitlines()
ts.deregister_class('FCComp')
exp = exp_pxd.splitlines()
assert_equal(len(obs), len(exp))
for o, e in zip(obs, exp):
assert_equal(o, e)
exp_pyx = cg.AUTOGEN_WARNING + \
'''"""I am the Toaster lib! Hear me sizzle!
"""
from libcpp.string cimport string as std_string
from pyne cimport extra_types
cdef class Toaster(fccomp.FCComp):
"""I am the Toaster! FORKS DO NOT GO IN ME!"""
# constuctors
def __cinit__(self, *args, **kwargs):
self._inst = NULL
self._free_inst = True
def __init__(self, *args, **kwargs):
""""""
self._inst = new cpp_toaster.Toaster()
def __dealloc__(self):
if self._free_inst:
free(self._inst)
# attributes
property nslices:
"""no docstring for nslices, please file a bug report!"""
def __get__(self):
return int(self._inst.nslices)
def __set__(self, value):
self._inst.nslices = <extra_types.uint> long(value)
property rate:
"""The rate at which the toaster can process slices."""
def __get__(self):
return float(self._inst.rate)
def __set__(self, value):
self._inst.rate = <double> value
property toastiness:
"""white as snow or black as hell?"""
def __get__(self):
return str(<char *> self._inst.toastiness.c_str())
def __set__(self, value):
self._inst.toastiness = std_string(<char *> value)
# methods
def make_toast(self, when, nslices=1):
"""I'll make you some toast you can't refuse..."""
cdef int rtnval
rtnval = self._inst.make_toast(std_string(<char *> when), <extra_types.uint> long(nslices))
return int(rtnval)
'''
def test_genpyx():
ts.register_class('FCComp',
cython_c_type='cpp_fccomp.FCComp',
cython_cimport='cpp_fccomp',
cython_cy_type='fccomp.FCComp',
cython_cyimport='fccomp')
obs = cg.genpyx(toaster_desc).splitlines()
ts.deregister_class('FCComp')
#print "\n".join(obs)
exp = exp_pyx.splitlines()
assert_equal(len(obs), len(exp))
for o, e in zip(obs, exp):
assert_equal(o, e)
| from bright.apigen import typesystem as ts
from bright.apigen import cythongen as cg
from nose.tools import assert_equal
toaster_desc = {
'name': 'Toaster',
'header_filename': 'toaster.h',
'cpppxd_filename': 'cpp_toaster.pxd',
'namespace': 'bright',
'docstrings': {
'module': "I am the Toaster lib! Hear me sizzle!",
'class': "I am the Toaster! FORKS DO NOT GO IN ME!",
'attrs': {
'toastiness': "white as snow or black as hell?",
'rate': "The rate at which the toaster can process slices.",
},
'methods': {
'make_toast': "I'll make you some toast you can't refuse...",
},
},
'parents': ['FCComp'],
'attrs': {
'nslices': 'uint',
'toastiness': 'str',
'rate': 'float',
},
'methods': {
('Toaster',): None,
('~Toaster',): None,
('make_toast', ('when', 'str'), ('nslices', 'uint', 1)): 'int',
},
}
exp_cpppxd = cg.AUTOGEN_WARNING + \
"""from libcpp.string cimport string as std_string
from pyne cimport extra_types
cdef extern from "toaster.h" namespace "bright":
cdef cppclass Toaster(FCComp):
# constructors
Toaster() except +
~Toaster() except +
# attributes
extra_types.uint nslices
double rate
std_string toastiness
# methods
int make_toast(std_string) except +
int make_toast(std_string, extra_types.uint) except +
"""
def test_gencpppxd():
obs = cg.gencpppxd(toaster_desc).splitlines()
exp = exp_cpppxd.splitlines()
assert_equal(len(obs), len(exp))
for o, e in zip(obs, exp):
assert_equal(o, e)
exp_pxd = cg.AUTOGEN_WARNING + \
"""cimport cpp_toaster
cimport fccomp
cdef class Toaster(fccomp.FCComp):
cdef cpp_toaster.Toaster * _inst
cdef public bint _free_inst
"""
def test_genpxd():
ts.register_class('FCComp',
cython_c_type='cpp_fccomp.FCComp',
cython_cimport='cpp_fccomp',
cython_cy_type='fccomp.FCComp',
cython_cyimport='fccomp')
obs = cg.genpxd(toaster_desc).splitlines()
ts.deregister_class('FCComp')
exp = exp_pxd.splitlines()
assert_equal(len(obs), len(exp))
for o, e in zip(obs, exp):
assert_equal(o, e)
exp_pyx = cg.AUTOGEN_WARNING + \
'''"""I am the Toaster lib! Hear me sizzle!
"""
from libcpp.string cimport string as std_string
from pyne cimport extra_types
cdef class Toaster(fccomp.FCComp):
"""I am the Toaster! FORKS DO NOT GO IN ME!"""
# constuctors
def __cinit__(self, *args, **kwargs):
self._inst = NULL
self._free_inst = True
def __init__(self, *args, **kwargs):
""""""
self._inst = new cpp_toaster.Toaster()
def __dealloc__(self):
if self._free_inst:
free(self._inst)
# attributes
property nslices:
"""no docstring for nslices, please file a bug report!"""
def __get__(self):
return int(self._inst.nslices)
def __set__(self, value):
self._inst.nslices = <extra_types.uint> long(value)
property rate:
"""The rate at which the toaster can process slices."""
def __get__(self):
return float(self._inst.rate)
def __set__(self, value):
self._inst.rate = <double> value
property toastiness:
"""white as snow or black as hell?"""
def __get__(self):
return str(<char *> self._inst.toastiness.c_str())
def __set__(self, value):
self._inst.toastiness = std_string(<char *> value)
# methods
def make_toast(self, when, nslices=1):
"""I'll make you some toast you can't refuse..."""
cdef int rtnval
rtnval = self._inst.make_toast(std_string(<char *> when), <extra_types.uint> long(nslices))
return int(rtnval)
'''
def test_genpyx():
ts.register_class('FCComp',
cython_c_type='cpp_fccomp.FCComp',
cython_cimport='cpp_fccomp',
cython_cy_type='fccomp.FCComp',
cython_cyimport='fccomp')
obs = cg.genpyx(toaster_desc).splitlines()
ts.deregister_class('FCComp')
#print "\n".join(obs)
exp = exp_pyx.splitlines()
assert_equal(len(obs), len(exp))
for o, e in zip(obs, exp):
assert_equal(o, e) | en | 0.452442 | from libcpp.string cimport string as std_string from pyne cimport extra_types cdef extern from "toaster.h" namespace "bright": cdef cppclass Toaster(FCComp): # constructors Toaster() except + ~Toaster() except + # attributes extra_types.uint nslices double rate std_string toastiness # methods int make_toast(std_string) except + int make_toast(std_string, extra_types.uint) except + cimport cpp_toaster cimport fccomp cdef class Toaster(fccomp.FCComp): cdef cpp_toaster.Toaster * _inst cdef public bint _free_inst """I am the Toaster lib! Hear me sizzle! """ from libcpp.string cimport string as std_string from pyne cimport extra_types cdef class Toaster(fccomp.FCComp): """I am the Toaster! FORKS DO NOT GO IN ME!""" # constuctors def __cinit__(self, *args, **kwargs): self._inst = NULL self._free_inst = True def __init__(self, *args, **kwargs): """""" self._inst = new cpp_toaster.Toaster() def __dealloc__(self): if self._free_inst: free(self._inst) # attributes property nslices: """no docstring for nslices, please file a bug report!""" def __get__(self): return int(self._inst.nslices) def __set__(self, value): self._inst.nslices = <extra_types.uint> long(value) property rate: """The rate at which the toaster can process slices.""" def __get__(self): return float(self._inst.rate) def __set__(self, value): self._inst.rate = <double> value property toastiness: """white as snow or black as hell?""" def __get__(self): return str(<char *> self._inst.toastiness.c_str()) def __set__(self, value): self._inst.toastiness = std_string(<char *> value) # methods def make_toast(self, when, nslices=1): """I'll make you some toast you can't refuse...""" cdef int rtnval rtnval = self._inst.make_toast(std_string(<char *> when), <extra_types.uint> long(nslices)) return int(rtnval) #print "\n".join(obs) | 2.053107 | 2 |
cleanup_garbage.py | nilleb/adb-expertise-locator | 0 | 6621331 | <reponame>nilleb/adb-expertise-locator
import os
from common.folder_processor import DEFAULT_FOLDERS, FolderProcessor
from common.io import read_object
def process_file(filepath):
if not os.path.exists(filepath.replace('.metadata.json', '')):
metadata = read_object(filepath)
if not metadata.get('pages'):
os.unlink(filepath)
if __name__ == "__main__":
FolderProcessor(DEFAULT_FOLDERS, "*.metadata.json", process_file).process_folders()
| import os
from common.folder_processor import DEFAULT_FOLDERS, FolderProcessor
from common.io import read_object
def process_file(filepath):
if not os.path.exists(filepath.replace('.metadata.json', '')):
metadata = read_object(filepath)
if not metadata.get('pages'):
os.unlink(filepath)
if __name__ == "__main__":
FolderProcessor(DEFAULT_FOLDERS, "*.metadata.json", process_file).process_folders() | none | 1 | 2.268257 | 2 | |
graphs/models/onedcnn.py | jkrishnan95v/Signal_detector | 2 | 6621332 | <filename>graphs/models/onedcnn.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 4 11:23:27 2021
@author: jay
"""
import config
import torch.nn as nn
class OneDCNN(nn.Module):
def __init__(self,config):
super().__init__()
self.config = config
self.num_classes = self.config.num_classes
# 420 x 1
self.conv1 = nn.Sequential(
nn.Conv1d(1, 128, kernel_size=3, stride=3, padding=0),
nn.BatchNorm1d(128),
nn.ReLU())
# 140 x 128
self.conv2 = nn.Sequential(
nn.Conv1d(128, 128, kernel_size=3, stride=1, padding=2),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(3, stride=3))
# 48 x 128
self.conv3 = nn.Sequential(
nn.Conv1d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(3,stride=3))
# 16 x 128
self.conv4 = nn.Sequential(
nn.Conv1d(128, 256, kernel_size=3, stride=1, padding=0),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.MaxPool1d(3,stride=3),
nn.Dropout(config.DROPOUT))
# 5 x 256
self.conv5 = nn.Sequential(
nn.Conv1d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(3,stride=3))
# 1 x 512
self.fc = nn.Linear(128, self.config.num_classes)
#Keep numnber of output layers to number of signals being classified
self.activation = nn.Sigmoid()
def forward(self, x):
# input x : 23 x 59049 x 1
# expected conv1d input : minibatch_size x num_channel x width
x = x.view(x.shape[0], 1,-1)
# x : 23 x 1 x 59049
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.conv5(out)
out = out.view(x.shape[0], out.size(1) * out.size(2))
logit = self.fc(out)
#logit = self.activation(logit)
return logit | <filename>graphs/models/onedcnn.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 4 11:23:27 2021
@author: jay
"""
import config
import torch.nn as nn
class OneDCNN(nn.Module):
def __init__(self,config):
super().__init__()
self.config = config
self.num_classes = self.config.num_classes
# 420 x 1
self.conv1 = nn.Sequential(
nn.Conv1d(1, 128, kernel_size=3, stride=3, padding=0),
nn.BatchNorm1d(128),
nn.ReLU())
# 140 x 128
self.conv2 = nn.Sequential(
nn.Conv1d(128, 128, kernel_size=3, stride=1, padding=2),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(3, stride=3))
# 48 x 128
self.conv3 = nn.Sequential(
nn.Conv1d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(3,stride=3))
# 16 x 128
self.conv4 = nn.Sequential(
nn.Conv1d(128, 256, kernel_size=3, stride=1, padding=0),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.MaxPool1d(3,stride=3),
nn.Dropout(config.DROPOUT))
# 5 x 256
self.conv5 = nn.Sequential(
nn.Conv1d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(3,stride=3))
# 1 x 512
self.fc = nn.Linear(128, self.config.num_classes)
#Keep numnber of output layers to number of signals being classified
self.activation = nn.Sigmoid()
def forward(self, x):
# input x : 23 x 59049 x 1
# expected conv1d input : minibatch_size x num_channel x width
x = x.view(x.shape[0], 1,-1)
# x : 23 x 1 x 59049
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.conv5(out)
out = out.view(x.shape[0], out.size(1) * out.size(2))
logit = self.fc(out)
#logit = self.activation(logit)
return logit | en | 0.486091 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Fri Jun 4 11:23:27 2021 @author: jay # 420 x 1 # 140 x 128 # 48 x 128 # 16 x 128 # 5 x 256 # 1 x 512 #Keep numnber of output layers to number of signals being classified # input x : 23 x 59049 x 1 # expected conv1d input : minibatch_size x num_channel x width # x : 23 x 1 x 59049 #logit = self.activation(logit) | 2.592311 | 3 |
python/src/problem042.py | arturh85/projecteuler | 3 | 6621333 | <reponame>arturh85/projecteuler<gh_stars>1-10
'''
Problem 42
25 April 2003
The nth term of the sequence of triangle numbers is given by, tn = 1/2 n(n+1); so the first ten triangle numbers are:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
By converting each letter in a word to a number corresponding to its alphabetical position and adding these values
we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a
triangle number then we shall call the word a triangle word.
Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common
English words, how many are triangle words?
----------------------------------------------------------
Created on 30.01.2015
@author: ahallmann
'''
import unittest
import math
import timeit
from problem022 import char_value
from problem022 import word_value
def triangle_numbers_at(n):
return 1.0 / 2.0 * n * (n + 1)
def generate_numbers(func):
i = 1
while True:
value = func(i)
yield value
i += 1
def generate_triangle_numbers():
return generate_numbers(triangle_numbers_at)
is_number_cache = {}
def is_number(func, cache_name, n):
global is_number_cache
if cache_name not in is_number_cache:
is_number_cache[cache_name] = {
"max": 0,
"list": []
}
if is_number_cache[cache_name]["max"] < n:
while True:
is_number_cache[cache_name]["max"] += 1
value = func(is_number_cache[cache_name]["max"])
is_number_cache[cache_name]["list"].append(value)
if value > n:
break
return n in is_number_cache[cache_name]["list"]
def is_triangle_number(n):
h = (math.sqrt(8*n+1)-1.0)/2.0
return math.floor(h) == h
def read_words(filename):
f = open(filename, 'r')
words = []
for line in f.readlines():
words = line[1:-1].split('","')
f.close()
return words
def solve():
words = read_words("data/problem042.txt")
word_values = map(word_value, words)
triangle_numbers = filter(is_triangle_number, word_values)
return len(triangle_numbers)
class Test(unittest.TestCase):
def test_sample(self):
self.assertEqual(1.0, triangle_numbers_at(1.0))
self.assertEqual(3.0, triangle_numbers_at(2.0))
self.assertEqual(6.0, triangle_numbers_at(3.0))
self.assertEqual(55, word_value('SKY'))
self.assertEqual(19, char_value('S'))
self.assertEqual(11, char_value('K'))
self.assertEqual(25, char_value('Y'))
pass
def test_answer(self):
self.assertEqual(162, solve())
pass
# -----------------------------------------
def run():
return solve()
if __name__ == '__main__':
run()
unittest.main()
# if __name__ == '__main__':
# t = timeit.Timer("run()", "from __main__ import run")
# count = 1
# print(str(t.timeit(count)) + " seconds for " + str(count) + " runs")
| '''
Problem 42
25 April 2003
The nth term of the sequence of triangle numbers is given by, tn = 1/2 n(n+1); so the first ten triangle numbers are:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
By converting each letter in a word to a number corresponding to its alphabetical position and adding these values
we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a
triangle number then we shall call the word a triangle word.
Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common
English words, how many are triangle words?
----------------------------------------------------------
Created on 30.01.2015
@author: ahallmann
'''
import unittest
import math
import timeit
from problem022 import char_value
from problem022 import word_value
def triangle_numbers_at(n):
return 1.0 / 2.0 * n * (n + 1)
def generate_numbers(func):
i = 1
while True:
value = func(i)
yield value
i += 1
def generate_triangle_numbers():
return generate_numbers(triangle_numbers_at)
is_number_cache = {}
def is_number(func, cache_name, n):
global is_number_cache
if cache_name not in is_number_cache:
is_number_cache[cache_name] = {
"max": 0,
"list": []
}
if is_number_cache[cache_name]["max"] < n:
while True:
is_number_cache[cache_name]["max"] += 1
value = func(is_number_cache[cache_name]["max"])
is_number_cache[cache_name]["list"].append(value)
if value > n:
break
return n in is_number_cache[cache_name]["list"]
def is_triangle_number(n):
h = (math.sqrt(8*n+1)-1.0)/2.0
return math.floor(h) == h
def read_words(filename):
f = open(filename, 'r')
words = []
for line in f.readlines():
words = line[1:-1].split('","')
f.close()
return words
def solve():
words = read_words("data/problem042.txt")
word_values = map(word_value, words)
triangle_numbers = filter(is_triangle_number, word_values)
return len(triangle_numbers)
class Test(unittest.TestCase):
def test_sample(self):
self.assertEqual(1.0, triangle_numbers_at(1.0))
self.assertEqual(3.0, triangle_numbers_at(2.0))
self.assertEqual(6.0, triangle_numbers_at(3.0))
self.assertEqual(55, word_value('SKY'))
self.assertEqual(19, char_value('S'))
self.assertEqual(11, char_value('K'))
self.assertEqual(25, char_value('Y'))
pass
def test_answer(self):
self.assertEqual(162, solve())
pass
# -----------------------------------------
def run():
return solve()
if __name__ == '__main__':
run()
unittest.main()
# if __name__ == '__main__':
# t = timeit.Timer("run()", "from __main__ import run")
# count = 1
# print(str(t.timeit(count)) + " seconds for " + str(count) + " runs") | en | 0.729489 | Problem 42
25 April 2003
The nth term of the sequence of triangle numbers is given by, tn = 1/2 n(n+1); so the first ten triangle numbers are:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
By converting each letter in a word to a number corresponding to its alphabetical position and adding these values
we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a
triangle number then we shall call the word a triangle word.
Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common
English words, how many are triangle words?
----------------------------------------------------------
Created on 30.01.2015
@author: ahallmann # ----------------------------------------- # if __name__ == '__main__': # t = timeit.Timer("run()", "from __main__ import run") # count = 1 # print(str(t.timeit(count)) + " seconds for " + str(count) + " runs") | 3.922983 | 4 |
api_rest/migrations/0005_auto_20200828_0903.py | ccortes1/Event5-Data | 1 | 6621334 | # Generated by Django 3.1 on 2020-08-28 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_rest', '0004_auto_20200828_0749'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='user_id',
),
migrations.AddField(
model_name='event',
name='users',
field=models.ManyToManyField(db_table='user_event', related_name='users', to='api_rest.UserE'),
),
]
| # Generated by Django 3.1 on 2020-08-28 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_rest', '0004_auto_20200828_0749'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='user_id',
),
migrations.AddField(
model_name='event',
name='users',
field=models.ManyToManyField(db_table='user_event', related_name='users', to='api_rest.UserE'),
),
]
| en | 0.752908 | # Generated by Django 3.1 on 2020-08-28 14:03 | 1.468151 | 1 |
tests/test_column.py | tamuto/columnarframe | 0 | 6621335 | import unittest
import columnarframe as colf
class TestColumn(unittest.TestCase):
def setUp(self):
self.cf = colf.ColumnarFrame({
'col1': ['AAA', None, 'CCC', 'CCC', 'DDD'],
'col2': ['1', '5', '8', '3', None],
'col4': ['True', 'False', 'False', None, 'True'],
})
def test_to_list(self):
self.assertEqual(
self.cf['col1'].to_list(),
['AAA', None, 'CCC', 'CCC', 'DDD'])
def test_unique(self):
self.assertEqual(
self.cf['col1'].apply(
lambda x: x if x != 'AAA' else None).unique().to_list(),
[None, 'CCC', 'DDD'])
def test_apply(self):
self.assertEqual(
self.cf['col1'].apply(
lambda x: x if x != 'AAA' else None).to_list(),
[None, None, 'CCC', 'CCC', 'DDD'])
def test_apply2(self):
self.cf['test1'] = self.cf['col1'].apply(
lambda x: x if x != 'AAA' else None
)
self.assertEqual(
self.cf['col1'].to_list(),
['AAA', None, 'CCC', 'CCC', 'DDD']
)
self.assertEqual(
self.cf['test1'].to_list(),
[None, None, 'CCC', 'CCC', 'DDD']
)
def test_apply3(self):
def conv(value, target):
return value if value == target else None
self.cf['col1'] = self.cf['col1'].apply(
(conv, 'CCC'),
lambda x: x if x is not None else ''
)
self.assertEqual(
self.cf['col1'].to_list(),
['', '', 'CCC', 'CCC', '']
)
def test_fillin(self):
self.cf['col1'] = self.cf['col1'].fillin(self.cf['col2'], lambda x, val: x if x is not None else val)
self.assertEqual(
self.cf['col1'].to_list(),
['AAA', '5', 'CCC', 'CCC', 'DDD'])
| import unittest
import columnarframe as colf
class TestColumn(unittest.TestCase):
def setUp(self):
self.cf = colf.ColumnarFrame({
'col1': ['AAA', None, 'CCC', 'CCC', 'DDD'],
'col2': ['1', '5', '8', '3', None],
'col4': ['True', 'False', 'False', None, 'True'],
})
def test_to_list(self):
self.assertEqual(
self.cf['col1'].to_list(),
['AAA', None, 'CCC', 'CCC', 'DDD'])
def test_unique(self):
self.assertEqual(
self.cf['col1'].apply(
lambda x: x if x != 'AAA' else None).unique().to_list(),
[None, 'CCC', 'DDD'])
def test_apply(self):
self.assertEqual(
self.cf['col1'].apply(
lambda x: x if x != 'AAA' else None).to_list(),
[None, None, 'CCC', 'CCC', 'DDD'])
def test_apply2(self):
self.cf['test1'] = self.cf['col1'].apply(
lambda x: x if x != 'AAA' else None
)
self.assertEqual(
self.cf['col1'].to_list(),
['AAA', None, 'CCC', 'CCC', 'DDD']
)
self.assertEqual(
self.cf['test1'].to_list(),
[None, None, 'CCC', 'CCC', 'DDD']
)
def test_apply3(self):
def conv(value, target):
return value if value == target else None
self.cf['col1'] = self.cf['col1'].apply(
(conv, 'CCC'),
lambda x: x if x is not None else ''
)
self.assertEqual(
self.cf['col1'].to_list(),
['', '', 'CCC', 'CCC', '']
)
def test_fillin(self):
self.cf['col1'] = self.cf['col1'].fillin(self.cf['col2'], lambda x, val: x if x is not None else val)
self.assertEqual(
self.cf['col1'].to_list(),
['AAA', '5', 'CCC', 'CCC', 'DDD'])
| none | 1 | 3.232143 | 3 | |
cdn_static_website/settings/components/templates.py | soulraven/cdn_small | 0 | 6621336 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2021 ProGeek
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# https://docs.djangoproject.com/en/3.2/ref/templates/api/
from cdn_static_website.settings.components import BASE_DIR
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR.joinpath('templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Default template context processors:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
}]
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2021 ProGeek
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# https://docs.djangoproject.com/en/3.2/ref/templates/api/
from cdn_static_website.settings.components import BASE_DIR
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR.joinpath('templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Default template context processors:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
}]
| en | 0.838745 | # -*- coding: utf-8 -*- # # Copyright (C) 2018-2021 ProGeek # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # https://docs.djangoproject.com/en/3.2/ref/templates/api/ # Default template context processors: | 1.594416 | 2 |
prozhito_app/models.py | apjanco/prozhito_db | 0 | 6621337 | from django.contrib.gis.db import models
from djgeojson.fields import PointField
# Create your models here.
class Place(models.Model):
name = models.CharField(max_length=220, blank=True, null=True)
wiki = models.URLField(max_length=250, blank=True)
geom = models.PointField(null=True, blank=True)
@property
def popupContent(self):
return '<b>{}</b>'.format(
self.name,
)
def __str__(self):
return self.name
class Keyword(models.Model):
name = models.CharField(max_length=220, blank=True, null=True)
def __str__(self):
return self.name
class Person(models.Model):
id = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=220, blank=True, null=True)
patronymic = models.CharField(max_length=220, blank=True, null=True)
family_name = models.CharField(max_length=220, blank=True, null=True)
nickname = models.CharField(max_length=220, blank=True, null=True)
edition = models.TextField(blank=True, null=True)
info = models.TextField(blank=True, null=True)
additional_info = models.TextField(blank=True, null=True)
wiki = models.URLField(max_length=1000, blank=True)
birth_date = models.DateField(blank=True, null=True)
death_date = models.DateField(blank=True, null=True)
gender = models.CharField(max_length=220, blank=True, null=True)
from_natasha = models.BooleanField(default=False)
from_tags = models.BooleanField(default=False)
def __str__(self):
return f'{self.family_name}, {self.first_name} {self.patronymic}'
class Entry(models.Model):
id = models.AutoField(primary_key=True)
text = models.TextField(blank=True, null=True)
lemmatized = models.TextField(blank=True, null=True)
date_start = models.DateField(blank=True, null=True)
date_end = models.DateField(blank=True, null=True)
author = models.ForeignKey(Person, on_delete=models.CASCADE, blank=True, null=True, related_name='entry_author')
people = models.ManyToManyField(Person, blank=True, verbose_name="Person(s)")
keywords = models.ManyToManyField(Keyword, blank=True, verbose_name="Keyword(s)")
places = models.ManyToManyField(Place, blank=True, verbose_name="Place(s)")
diary = models.IntegerField(default=None)
sentiment = models.CharField(max_length=220, blank=True, null=True)
RuBERT = models.BooleanField(default=False)
@property
def popupContent(self):
return '<b>{}</b>'.format(
self.text[:100],
)
def __str__(self):
return self.text[:100]
class Diary(models.Model):
id = models.AutoField(primary_key=True)
author = models.ForeignKey(Person, on_delete=models.CASCADE, blank=True, null=True, related_name='diary_author')
no_entries = models.IntegerField(default=None)
first_note = models.DateField(blank=True, null=True)
last_note = models.DateField(blank=True, null=True)
def __str__(self):
return str(self.id)
| from django.contrib.gis.db import models
from djgeojson.fields import PointField
# Create your models here.
class Place(models.Model):
name = models.CharField(max_length=220, blank=True, null=True)
wiki = models.URLField(max_length=250, blank=True)
geom = models.PointField(null=True, blank=True)
@property
def popupContent(self):
return '<b>{}</b>'.format(
self.name,
)
def __str__(self):
return self.name
class Keyword(models.Model):
name = models.CharField(max_length=220, blank=True, null=True)
def __str__(self):
return self.name
class Person(models.Model):
id = models.AutoField(primary_key=True)
first_name = models.CharField(max_length=220, blank=True, null=True)
patronymic = models.CharField(max_length=220, blank=True, null=True)
family_name = models.CharField(max_length=220, blank=True, null=True)
nickname = models.CharField(max_length=220, blank=True, null=True)
edition = models.TextField(blank=True, null=True)
info = models.TextField(blank=True, null=True)
additional_info = models.TextField(blank=True, null=True)
wiki = models.URLField(max_length=1000, blank=True)
birth_date = models.DateField(blank=True, null=True)
death_date = models.DateField(blank=True, null=True)
gender = models.CharField(max_length=220, blank=True, null=True)
from_natasha = models.BooleanField(default=False)
from_tags = models.BooleanField(default=False)
def __str__(self):
return f'{self.family_name}, {self.first_name} {self.patronymic}'
class Entry(models.Model):
id = models.AutoField(primary_key=True)
text = models.TextField(blank=True, null=True)
lemmatized = models.TextField(blank=True, null=True)
date_start = models.DateField(blank=True, null=True)
date_end = models.DateField(blank=True, null=True)
author = models.ForeignKey(Person, on_delete=models.CASCADE, blank=True, null=True, related_name='entry_author')
people = models.ManyToManyField(Person, blank=True, verbose_name="Person(s)")
keywords = models.ManyToManyField(Keyword, blank=True, verbose_name="Keyword(s)")
places = models.ManyToManyField(Place, blank=True, verbose_name="Place(s)")
diary = models.IntegerField(default=None)
sentiment = models.CharField(max_length=220, blank=True, null=True)
RuBERT = models.BooleanField(default=False)
@property
def popupContent(self):
return '<b>{}</b>'.format(
self.text[:100],
)
def __str__(self):
return self.text[:100]
class Diary(models.Model):
id = models.AutoField(primary_key=True)
author = models.ForeignKey(Person, on_delete=models.CASCADE, blank=True, null=True, related_name='diary_author')
no_entries = models.IntegerField(default=None)
first_note = models.DateField(blank=True, null=True)
last_note = models.DateField(blank=True, null=True)
def __str__(self):
return str(self.id)
| en | 0.963489 | # Create your models here. | 2.587166 | 3 |
discovery-infra/test_infra/helper_classes/kube_helpers/installenv.py | RazRegev/assisted-test-infra | 0 | 6621338 | import re
from typing import Optional, Union, Dict
from pprint import pformat
import yaml
import waiting
from kubernetes.client import ApiClient, CustomObjectsApi
from kubernetes.client.rest import ApiException
from tests.conftest import env_variables
from .base_resource import BaseCustomResource
from .cluster_deployment import ClusterDeployment
from .secret import deploy_default_secret, Secret
from .common import logger
from .global_vars import DEFAULT_WAIT_FOR_CRD_STATUS_TIMEOUT
ISO_URL_PATTERN = re.compile(r"(?P<api_url>.+)/api/assisted-install/v1/clusters/"
r"(?P<cluster_id>[0-9a-z-]+)/downloads/image")
class Proxy:
"""Proxy settings for the installation.
Args:
http_proxy (str): endpoint for accessing in every HTTP request.
https_proxy (str): endpoint for accessing in every HTTPS request.
no_proxy (str): comma separated values of addresses/address ranges/DNS entries
that shouldn't be accessed via proxies.
"""
def __init__(
self,
http_proxy: str,
https_proxy: str,
no_proxy: str
):
self.http_proxy = http_proxy
self.https_proxy = https_proxy
self.no_proxy = no_proxy
def __repr__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> dict:
return {
'httpProxy': self.http_proxy,
'httpsProxy': self.https_proxy,
'noProxy': self.no_proxy,
}
class InstallEnv(BaseCustomResource):
"""
InstallEnv is used to generate cluster iso.
Image is automatically generated on CRD deployment, after InstallEnv is
reconciled. Image download url will be exposed in the status.
"""
_api_group = 'adi.io.my.domain'
_api_version = 'v1alpha1'
_plural = 'installenvs'
def __init__(
self,
kube_api_client: ApiClient,
name: str,
namespace: str = env_variables['namespace']
):
super().__init__(name, namespace)
self.crd_api = CustomObjectsApi(kube_api_client)
def create_from_yaml(self, yaml_data: dict) -> None:
self.crd_api.create_namespaced_custom_object(
group=self._api_group,
version=self._api_version,
plural=self._plural,
body=yaml_data,
namespace=self.ref.namespace
)
logger.info(
'created installEnv %s: %s', self.ref, pformat(yaml_data)
)
def create(
self,
cluster_deployment: ClusterDeployment,
secret: Secret,
proxy: Proxy,
label_selector: Optional[Dict[str, str]] = None,
ignition_config_override: Optional[str] = None,
**kwargs
) -> None:
body = {
'apiVersion': f'{self._api_group}/{self._api_version}',
'kind': 'InstallEnv',
'metadata': self.ref.as_dict(),
'spec': {
'clusterRef': cluster_deployment.ref.as_dict(),
'pullSecretRef': secret.ref.as_dict(),
'proxy': proxy.as_dict(),
'nmStateConfigLabelSelector': { # TODO: set nmstate configuration
"matchLabels": {
"adi.io.my.domain/selector-nmstate-config-name": "abcd"
}
},
'agentLabelSelector': {'matchLabels': label_selector or {}},
'ignitionConfigOverride': ignition_config_override or ''
}
}
body['spec'].update(kwargs)
self.crd_api.create_namespaced_custom_object(
group=self._api_group,
version=self._api_version,
plural=self._plural,
body=body,
namespace=self.ref.namespace
)
logger.info(
'created installEnv %s: %s', self.ref, pformat(body)
)
def patch(
self,
cluster_deployment: Optional[ClusterDeployment],
secret: Optional[Secret],
label_selector: Optional[Dict[str, str]] = None,
ignition_config_override: Optional[str] = None,
**kwargs
) -> None:
body = {'spec': kwargs}
spec = body['spec']
if cluster_deployment:
spec['clusterRef'] = cluster_deployment.ref.as_dict()
if secret:
spec['pullSecretRef'] = secret.ref.as_dict()
if label_selector:
spec['agentLabelSelector'] = {'matchLabels': label_selector}
if ignition_config_override:
spec['ignitionConfigOverride'] = ignition_config_override
self.crd_api.patch_namespaced_custom_object(
group=self._api_group,
version=self._api_version,
plural=self._plural,
name=self.ref.name,
namespace=self.ref.namespace,
body=body
)
logger.info(
'patching installEnv %s: %s', self.ref, pformat(body)
)
def get(self) -> dict:
return self.crd_api.get_namespaced_custom_object(
group=self._api_group,
version=self._api_version,
plural=self._plural,
name=self.ref.name,
namespace=self.ref.namespace
)
def delete(self) -> None:
self.crd_api.delete_namespaced_custom_object(
group=self._api_group,
version=self._api_version,
plural=self._plural,
name=self.ref.name,
namespace=self.ref.namespace
)
logger.info('deleted installEnv %s', self.ref)
def status(
self,
timeout: Union[int, float] = DEFAULT_WAIT_FOR_CRD_STATUS_TIMEOUT
) -> dict:
"""
Status is a section in the CRD that is created after registration to
assisted service and it defines the observed state of InstallEnv.
Since the status key is created only after resource is processed by the
controller in the service, it might take a few seconds before appears.
"""
def _attempt_to_get_status() -> dict:
return self.get()['status']
return waiting.wait(
_attempt_to_get_status,
sleep_seconds=0.5,
timeout_seconds=timeout,
waiting_for=f'installEnv {self.ref} status',
expected_exceptions=KeyError
)
def get_iso_download_url(self):
def _attempt_to_get_image_url() -> str:
return self.get()['status']['isoDownloadURL']
return waiting.wait(
_attempt_to_get_image_url,
sleep_seconds=3,
timeout_seconds=60,
waiting_for=f'image to be created',
expected_exceptions=KeyError)
def get_cluster_id(self):
return ISO_URL_PATTERN.match(self.get_iso_download_url()).group("cluster_id")
def deploy_default_installenv(
kube_api_client: ApiClient,
name: str,
ignore_conflict: bool = True,
cluster_deployment: Optional[ClusterDeployment] = None,
secret: Optional[Secret] = None,
label_selector: Optional[Dict[str, str]] = None,
ignition_config_override: Optional[str] = None,
**kwargs
) -> InstallEnv:
install_env = InstallEnv(kube_api_client, name)
try:
if 'filepath' in kwargs:
_create_installenv_from_yaml_file(
install_env=install_env,
filepath=kwargs['filepath']
)
else:
_create_installenv_from_attrs(
kube_api_client=kube_api_client,
name=name,
ignore_conflict=ignore_conflict,
install_env=install_env,
cluster_deployment=cluster_deployment,
secret=secret,
label_selector=label_selector,
ignition_config_override=ignition_config_override,
**kwargs
)
except ApiException as e:
if not (e.reason == 'Conflict' and ignore_conflict):
raise
# wait until install-env will have status (i.e until resource will be
# processed in assisted-service).
install_env.status()
return install_env
def _create_installenv_from_yaml_file(
install_env: InstallEnv,
filepath: str
) -> None:
with open(filepath) as fp:
yaml_data = yaml.safe_load(fp)
install_env.create_from_yaml(yaml_data)
def _create_installenv_from_attrs(
kube_api_client: ApiClient,
install_env: InstallEnv,
cluster_deployment: ClusterDeployment,
secret: Optional[Secret] = None,
label_selector: Optional[Dict[str, str]] = None,
ignition_config_override: Optional[str] = None,
**kwargs
) -> None:
if not secret:
secret = deploy_default_secret(
kube_api_client=kube_api_client,
name=cluster_deployment.ref.name
)
install_env.create(
cluster_deployment=cluster_deployment,
secret=secret,
label_selector=label_selector,
ignition_config_override=ignition_config_override,
**kwargs
)
| import re
from typing import Optional, Union, Dict
from pprint import pformat
import yaml
import waiting
from kubernetes.client import ApiClient, CustomObjectsApi
from kubernetes.client.rest import ApiException
from tests.conftest import env_variables
from .base_resource import BaseCustomResource
from .cluster_deployment import ClusterDeployment
from .secret import deploy_default_secret, Secret
from .common import logger
from .global_vars import DEFAULT_WAIT_FOR_CRD_STATUS_TIMEOUT
ISO_URL_PATTERN = re.compile(r"(?P<api_url>.+)/api/assisted-install/v1/clusters/"
r"(?P<cluster_id>[0-9a-z-]+)/downloads/image")
class Proxy:
"""Proxy settings for the installation.
Args:
http_proxy (str): endpoint for accessing in every HTTP request.
https_proxy (str): endpoint for accessing in every HTTPS request.
no_proxy (str): comma separated values of addresses/address ranges/DNS entries
that shouldn't be accessed via proxies.
"""
def __init__(
self,
http_proxy: str,
https_proxy: str,
no_proxy: str
):
self.http_proxy = http_proxy
self.https_proxy = https_proxy
self.no_proxy = no_proxy
def __repr__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> dict:
return {
'httpProxy': self.http_proxy,
'httpsProxy': self.https_proxy,
'noProxy': self.no_proxy,
}
class InstallEnv(BaseCustomResource):
"""
InstallEnv is used to generate cluster iso.
Image is automatically generated on CRD deployment, after InstallEnv is
reconciled. Image download url will be exposed in the status.
"""
_api_group = 'adi.io.my.domain'
_api_version = 'v1alpha1'
_plural = 'installenvs'
def __init__(
self,
kube_api_client: ApiClient,
name: str,
namespace: str = env_variables['namespace']
):
super().__init__(name, namespace)
self.crd_api = CustomObjectsApi(kube_api_client)
def create_from_yaml(self, yaml_data: dict) -> None:
self.crd_api.create_namespaced_custom_object(
group=self._api_group,
version=self._api_version,
plural=self._plural,
body=yaml_data,
namespace=self.ref.namespace
)
logger.info(
'created installEnv %s: %s', self.ref, pformat(yaml_data)
)
def create(
self,
cluster_deployment: ClusterDeployment,
secret: Secret,
proxy: Proxy,
label_selector: Optional[Dict[str, str]] = None,
ignition_config_override: Optional[str] = None,
**kwargs
) -> None:
body = {
'apiVersion': f'{self._api_group}/{self._api_version}',
'kind': 'InstallEnv',
'metadata': self.ref.as_dict(),
'spec': {
'clusterRef': cluster_deployment.ref.as_dict(),
'pullSecretRef': secret.ref.as_dict(),
'proxy': proxy.as_dict(),
'nmStateConfigLabelSelector': { # TODO: set nmstate configuration
"matchLabels": {
"adi.io.my.domain/selector-nmstate-config-name": "abcd"
}
},
'agentLabelSelector': {'matchLabels': label_selector or {}},
'ignitionConfigOverride': ignition_config_override or ''
}
}
body['spec'].update(kwargs)
self.crd_api.create_namespaced_custom_object(
group=self._api_group,
version=self._api_version,
plural=self._plural,
body=body,
namespace=self.ref.namespace
)
logger.info(
'created installEnv %s: %s', self.ref, pformat(body)
)
def patch(
self,
cluster_deployment: Optional[ClusterDeployment],
secret: Optional[Secret],
label_selector: Optional[Dict[str, str]] = None,
ignition_config_override: Optional[str] = None,
**kwargs
) -> None:
body = {'spec': kwargs}
spec = body['spec']
if cluster_deployment:
spec['clusterRef'] = cluster_deployment.ref.as_dict()
if secret:
spec['pullSecretRef'] = secret.ref.as_dict()
if label_selector:
spec['agentLabelSelector'] = {'matchLabels': label_selector}
if ignition_config_override:
spec['ignitionConfigOverride'] = ignition_config_override
self.crd_api.patch_namespaced_custom_object(
group=self._api_group,
version=self._api_version,
plural=self._plural,
name=self.ref.name,
namespace=self.ref.namespace,
body=body
)
logger.info(
'patching installEnv %s: %s', self.ref, pformat(body)
)
def get(self) -> dict:
return self.crd_api.get_namespaced_custom_object(
group=self._api_group,
version=self._api_version,
plural=self._plural,
name=self.ref.name,
namespace=self.ref.namespace
)
def delete(self) -> None:
self.crd_api.delete_namespaced_custom_object(
group=self._api_group,
version=self._api_version,
plural=self._plural,
name=self.ref.name,
namespace=self.ref.namespace
)
logger.info('deleted installEnv %s', self.ref)
def status(
self,
timeout: Union[int, float] = DEFAULT_WAIT_FOR_CRD_STATUS_TIMEOUT
) -> dict:
"""
Status is a section in the CRD that is created after registration to
assisted service and it defines the observed state of InstallEnv.
Since the status key is created only after resource is processed by the
controller in the service, it might take a few seconds before appears.
"""
def _attempt_to_get_status() -> dict:
return self.get()['status']
return waiting.wait(
_attempt_to_get_status,
sleep_seconds=0.5,
timeout_seconds=timeout,
waiting_for=f'installEnv {self.ref} status',
expected_exceptions=KeyError
)
def get_iso_download_url(self):
def _attempt_to_get_image_url() -> str:
return self.get()['status']['isoDownloadURL']
return waiting.wait(
_attempt_to_get_image_url,
sleep_seconds=3,
timeout_seconds=60,
waiting_for=f'image to be created',
expected_exceptions=KeyError)
def get_cluster_id(self):
return ISO_URL_PATTERN.match(self.get_iso_download_url()).group("cluster_id")
def deploy_default_installenv(
kube_api_client: ApiClient,
name: str,
ignore_conflict: bool = True,
cluster_deployment: Optional[ClusterDeployment] = None,
secret: Optional[Secret] = None,
label_selector: Optional[Dict[str, str]] = None,
ignition_config_override: Optional[str] = None,
**kwargs
) -> InstallEnv:
install_env = InstallEnv(kube_api_client, name)
try:
if 'filepath' in kwargs:
_create_installenv_from_yaml_file(
install_env=install_env,
filepath=kwargs['filepath']
)
else:
_create_installenv_from_attrs(
kube_api_client=kube_api_client,
name=name,
ignore_conflict=ignore_conflict,
install_env=install_env,
cluster_deployment=cluster_deployment,
secret=secret,
label_selector=label_selector,
ignition_config_override=ignition_config_override,
**kwargs
)
except ApiException as e:
if not (e.reason == 'Conflict' and ignore_conflict):
raise
# wait until install-env will have status (i.e until resource will be
# processed in assisted-service).
install_env.status()
return install_env
def _create_installenv_from_yaml_file(
install_env: InstallEnv,
filepath: str
) -> None:
with open(filepath) as fp:
yaml_data = yaml.safe_load(fp)
install_env.create_from_yaml(yaml_data)
def _create_installenv_from_attrs(
kube_api_client: ApiClient,
install_env: InstallEnv,
cluster_deployment: ClusterDeployment,
secret: Optional[Secret] = None,
label_selector: Optional[Dict[str, str]] = None,
ignition_config_override: Optional[str] = None,
**kwargs
) -> None:
if not secret:
secret = deploy_default_secret(
kube_api_client=kube_api_client,
name=cluster_deployment.ref.name
)
install_env.create(
cluster_deployment=cluster_deployment,
secret=secret,
label_selector=label_selector,
ignition_config_override=ignition_config_override,
**kwargs
)
| en | 0.898156 | Proxy settings for the installation. Args: http_proxy (str): endpoint for accessing in every HTTP request. https_proxy (str): endpoint for accessing in every HTTPS request. no_proxy (str): comma separated values of addresses/address ranges/DNS entries that shouldn't be accessed via proxies. InstallEnv is used to generate cluster iso. Image is automatically generated on CRD deployment, after InstallEnv is reconciled. Image download url will be exposed in the status. # TODO: set nmstate configuration Status is a section in the CRD that is created after registration to assisted service and it defines the observed state of InstallEnv. Since the status key is created only after resource is processed by the controller in the service, it might take a few seconds before appears. # wait until install-env will have status (i.e until resource will be # processed in assisted-service). | 1.999655 | 2 |
lib/helpers/FilesWalker.py | PetukhovVictor/ast-set2matrix | 4 | 6621339 | from os import path
import glob
class FilesWalker:
@staticmethod
def walk(folder, callback, extension='json'):
for filename in glob.iglob(folder + '/**/*.' + extension, recursive=True):
if path.isfile(filename):
callback(filename)
| from os import path
import glob
class FilesWalker:
@staticmethod
def walk(folder, callback, extension='json'):
for filename in glob.iglob(folder + '/**/*.' + extension, recursive=True):
if path.isfile(filename):
callback(filename)
| none | 1 | 3.128265 | 3 | |
programs/koinos-types/testme.py | joticajulian/koinos-types | 10 | 6621340 |
from dataclasses_json import dataclass_json
from dataclasses import dataclass, field
from typing import List, Tuple, Optional, Union
@dataclass_json
@dataclass
class Node:
name: str
sub: Union["AlphaNode", "BetaNode"]
@dataclass_json
@dataclass
class AlphaNode:
suba: Node
@dataclass_json
@dataclass
class BetaNode:
subb: Node
n = Node("hello", AlphaNode(Node("world", BetaNode(None))))
print(n.to_json())
|
from dataclasses_json import dataclass_json
from dataclasses import dataclass, field
from typing import List, Tuple, Optional, Union
@dataclass_json
@dataclass
class Node:
name: str
sub: Union["AlphaNode", "BetaNode"]
@dataclass_json
@dataclass
class AlphaNode:
suba: Node
@dataclass_json
@dataclass
class BetaNode:
subb: Node
n = Node("hello", AlphaNode(Node("world", BetaNode(None))))
print(n.to_json())
| none | 1 | 2.763861 | 3 | |
sentilab/sentiment/sentiment_menu.py | Sean-Koval/sentilab | 1 | 6621341 | import argparse
from sentilab import feature_flags as ff
from sentilab.helper_functions import get_flair
from sentilab.menu import session
from sentilab.sentiment import reddit_api
from prompt_toolkit.completion import NestedCompleter
def print_sentiment():
""" Print help """
print("\nSentiment:")
print(" help show this sentiment menu again")
print(" q quit this menu, and shows back to main menu")
print(" quit quit to abandon program")
print("")
print("Reddit:")
print(" wsb show what WSB gang is up to in subreddit wallstreetbets")
print(" watchlist show other users watchlist")
print(" popular show popular tickers")
print(
" spac_c show other users spacs announcements from subreddit SPACs community"
)
print(" spac show other users spacs announcements from other subs")
print("")
print("Twitter:")
print(" infer infer about stock's sentiment from latest tweets")
print(" sentiment in-depth sentiment prediction from tweets over time")
print("")
return
def sentiment_menu(s_ticker, s_start):
# Add list of arguments that the discovery parser accepts
sen_parser = argparse.ArgumentParser(prog="sentiment", add_help=False)
choices = [
"help",
"q",
"quit",
"watchlist",
"spac",
"spac_c",
"wsb",
"popular",
"infer",
"sentiment",
]
sen_parser.add_argument("cmd", choices=choices)
completer = NestedCompleter.from_nested_dict({c: None for c in choices})
print_sentiment()
# Loop forever and ever
while True:
# Get input command from user
if session and ff.USE_PROMPT_TOOLKIT:
as_input = session.prompt(
f"{get_flair()} (sen)> ",
completer=completer,
)
else:
as_input = input(f"{get_flair()} (sen)> ")
# Parse sentiment command of the list of possible commands
try:
(ns_known_args, l_args) = sen_parser.parse_known_args(as_input.split())
except SystemExit:
print("The command selected doesn't exist\n")
continue
if ns_known_args.cmd == "help":
print_sentiment()
elif ns_known_args.cmd == "q":
# Just leave the DISC menu
return False
elif ns_known_args.cmd == "quit":
# Abandon the program
return True
elif ns_known_args.cmd == "watchlist":
reddit_api.watchlist(l_args)
elif ns_known_args.cmd == "spac":
reddit_api.spac(l_args)
elif ns_known_args.cmd == "popular":
reddit_api.spac(l_args)
elif ns_known_args.cmd == "spac_c":
reddit_api.spac_community(l_args)
elif ns_known_args.cmd == "wsb":
reddit_api.wsb_community(l_args)
elif ns_known_args.cmd == "infer":
if not ff.ENABLE_PREDICT:
print("Predict is not enabled in feature_flags.py")
print("Twitter inference menu is disabled")
print("")
continue
try:
# pylint: disable=import-outside-toplevel
from sentilab.sentiment import twitter_api
except ModuleNotFoundError as e:
print("One of the optional packages seems to be missing")
print("Optional packages need to be installed")
print(e)
print("")
continue
except Exception as e:
print(e)
print("")
continue
twitter_api.inference(l_args, s_ticker)
elif ns_known_args.cmd == "sentiment":
if not ff.ENABLE_PREDICT:
print("Predict is not enabled in config_terminal.py")
print("Twitter sentiment menu is disabled")
print("")
continue
try:
# pylint: disable=import-outside-toplevel
from sentilab.sentiment import twitter_api
except ModuleNotFoundError as e:
print("One of the optional packages seems to be missing")
print("Optional packages need to be installed")
print(e)
print("")
continue
except Exception as e:
print(e)
print("")
continue
twitter_api.sentiment(l_args, s_ticker)
else:
print("Command not recognized!") | import argparse
from sentilab import feature_flags as ff
from sentilab.helper_functions import get_flair
from sentilab.menu import session
from sentilab.sentiment import reddit_api
from prompt_toolkit.completion import NestedCompleter
def print_sentiment():
""" Print help """
print("\nSentiment:")
print(" help show this sentiment menu again")
print(" q quit this menu, and shows back to main menu")
print(" quit quit to abandon program")
print("")
print("Reddit:")
print(" wsb show what WSB gang is up to in subreddit wallstreetbets")
print(" watchlist show other users watchlist")
print(" popular show popular tickers")
print(
" spac_c show other users spacs announcements from subreddit SPACs community"
)
print(" spac show other users spacs announcements from other subs")
print("")
print("Twitter:")
print(" infer infer about stock's sentiment from latest tweets")
print(" sentiment in-depth sentiment prediction from tweets over time")
print("")
return
def sentiment_menu(s_ticker, s_start):
# Add list of arguments that the discovery parser accepts
sen_parser = argparse.ArgumentParser(prog="sentiment", add_help=False)
choices = [
"help",
"q",
"quit",
"watchlist",
"spac",
"spac_c",
"wsb",
"popular",
"infer",
"sentiment",
]
sen_parser.add_argument("cmd", choices=choices)
completer = NestedCompleter.from_nested_dict({c: None for c in choices})
print_sentiment()
# Loop forever and ever
while True:
# Get input command from user
if session and ff.USE_PROMPT_TOOLKIT:
as_input = session.prompt(
f"{get_flair()} (sen)> ",
completer=completer,
)
else:
as_input = input(f"{get_flair()} (sen)> ")
# Parse sentiment command of the list of possible commands
try:
(ns_known_args, l_args) = sen_parser.parse_known_args(as_input.split())
except SystemExit:
print("The command selected doesn't exist\n")
continue
if ns_known_args.cmd == "help":
print_sentiment()
elif ns_known_args.cmd == "q":
# Just leave the DISC menu
return False
elif ns_known_args.cmd == "quit":
# Abandon the program
return True
elif ns_known_args.cmd == "watchlist":
reddit_api.watchlist(l_args)
elif ns_known_args.cmd == "spac":
reddit_api.spac(l_args)
elif ns_known_args.cmd == "popular":
reddit_api.spac(l_args)
elif ns_known_args.cmd == "spac_c":
reddit_api.spac_community(l_args)
elif ns_known_args.cmd == "wsb":
reddit_api.wsb_community(l_args)
elif ns_known_args.cmd == "infer":
if not ff.ENABLE_PREDICT:
print("Predict is not enabled in feature_flags.py")
print("Twitter inference menu is disabled")
print("")
continue
try:
# pylint: disable=import-outside-toplevel
from sentilab.sentiment import twitter_api
except ModuleNotFoundError as e:
print("One of the optional packages seems to be missing")
print("Optional packages need to be installed")
print(e)
print("")
continue
except Exception as e:
print(e)
print("")
continue
twitter_api.inference(l_args, s_ticker)
elif ns_known_args.cmd == "sentiment":
if not ff.ENABLE_PREDICT:
print("Predict is not enabled in config_terminal.py")
print("Twitter sentiment menu is disabled")
print("")
continue
try:
# pylint: disable=import-outside-toplevel
from sentilab.sentiment import twitter_api
except ModuleNotFoundError as e:
print("One of the optional packages seems to be missing")
print("Optional packages need to be installed")
print(e)
print("")
continue
except Exception as e:
print(e)
print("")
continue
twitter_api.sentiment(l_args, s_ticker)
else:
print("Command not recognized!") | en | 0.628886 | Print help # Add list of arguments that the discovery parser accepts # Loop forever and ever # Get input command from user # Parse sentiment command of the list of possible commands # Just leave the DISC menu # Abandon the program # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel | 2.907157 | 3 |
controller/store/counter_controller.py | mallycrip/Flask-DI-example | 0 | 6621342 | from flask import request
from dataclasses import dataclass
from injector import inject
from controller.base_resource import StoreResource
from service.store_service import StoreService
@inject
@dataclass
class CounterController(StoreResource):
store_service: StoreService
def get(self):
store_id = request.args.get("store-id")
menus = self.store_service.get_menu(store_id)
return {
"store_id": store_id,
"menus": [menu.__dict__ for menu in menus]
}
def post(self):
customer_id = request.json['customer_id']
menu_id = request.json['menu_id']
menu = self.store_service.order(customer_id, menu_id)
return {
"customer_id": customer_id,
"menu": menu.__dict__
}
| from flask import request
from dataclasses import dataclass
from injector import inject
from controller.base_resource import StoreResource
from service.store_service import StoreService
@inject
@dataclass
class CounterController(StoreResource):
store_service: StoreService
def get(self):
store_id = request.args.get("store-id")
menus = self.store_service.get_menu(store_id)
return {
"store_id": store_id,
"menus": [menu.__dict__ for menu in menus]
}
def post(self):
customer_id = request.json['customer_id']
menu_id = request.json['menu_id']
menu = self.store_service.order(customer_id, menu_id)
return {
"customer_id": customer_id,
"menu": menu.__dict__
}
| none | 1 | 2.417183 | 2 | |
ipkg/files/backends/http.py | pmuller/ipkg | 3 | 6621343 | <filename>ipkg/files/backends/http.py
import logging
import requests
from . import BaseFile, BackendException
from .. import cache
from ...compat import StringIO
LOGGER = logging.getLogger(__name__)
class HttpFileException(BackendException):
"""An error occurred while accessing a file over HTTP/s."""
class HttpFile(BaseFile):
"""A file on a remote HTTP server.
"""
def __init__(self, *args, **kw):
super(HttpFile, self).__init__(*args, **kw)
self.__file = None
def __download(self):
LOGGER.info('Downloading: %s', self.name)
try:
response = requests.get(self.name, stream=True)
except requests.RequestException as exc:
raise HttpFileException(str(exc))
else:
content = StringIO()
while True:
data = response.raw.read(1024 * 1024)
if data:
content.write(data)
else:
break
content.seek(0)
if cache.is_active():
cache.set(self.name, content.read())
content.seek(0)
self.__file = content
LOGGER.info('Downloaded: %s', self.name)
def __get_file(self):
if self.__file is None:
if cache.has(self.name):
self.__file = cache.get(self.name)
else:
self.__download()
return self.__file
def seek(self, *args):
self.__get_file().seek(*args)
def tell(self):
return self.__get_file().tell()
def read(self, *args):
return self.__get_file().read(*args)
| <filename>ipkg/files/backends/http.py
import logging
import requests
from . import BaseFile, BackendException
from .. import cache
from ...compat import StringIO
LOGGER = logging.getLogger(__name__)
class HttpFileException(BackendException):
"""An error occurred while accessing a file over HTTP/s."""
class HttpFile(BaseFile):
"""A file on a remote HTTP server.
"""
def __init__(self, *args, **kw):
super(HttpFile, self).__init__(*args, **kw)
self.__file = None
def __download(self):
LOGGER.info('Downloading: %s', self.name)
try:
response = requests.get(self.name, stream=True)
except requests.RequestException as exc:
raise HttpFileException(str(exc))
else:
content = StringIO()
while True:
data = response.raw.read(1024 * 1024)
if data:
content.write(data)
else:
break
content.seek(0)
if cache.is_active():
cache.set(self.name, content.read())
content.seek(0)
self.__file = content
LOGGER.info('Downloaded: %s', self.name)
def __get_file(self):
if self.__file is None:
if cache.has(self.name):
self.__file = cache.get(self.name)
else:
self.__download()
return self.__file
def seek(self, *args):
self.__get_file().seek(*args)
def tell(self):
return self.__get_file().tell()
def read(self, *args):
return self.__get_file().read(*args)
| en | 0.847544 | An error occurred while accessing a file over HTTP/s. A file on a remote HTTP server. | 3.011606 | 3 |
decision-science/lab1/task3.py | Foltrex/bsu | 113 | 6621344 | out = open('output.txt', 'w')
H = [[14, -4, 2], [-4, 8, 8], [4, 4, 4], [2, 8, 2]]
p = [1./4, 0, 1./4, 1./2]
q = [1./3, 1./3, 1./3]
ans = 0
for i in range(4):
for j in range(3):
out.write('+ ({0}*{1}*{2:.2f}) '.format(H[i][j], p[i], q[j]))
ans += H[i][j]*p[i]*q[j]
out.write('= {}'.format(ans))
| out = open('output.txt', 'w')
H = [[14, -4, 2], [-4, 8, 8], [4, 4, 4], [2, 8, 2]]
p = [1./4, 0, 1./4, 1./2]
q = [1./3, 1./3, 1./3]
ans = 0
for i in range(4):
for j in range(3):
out.write('+ ({0}*{1}*{2:.2f}) '.format(H[i][j], p[i], q[j]))
ans += H[i][j]*p[i]*q[j]
out.write('= {}'.format(ans))
| none | 1 | 2.428059 | 2 | |
fix_dataset.py | MagazzuGaetano/Face-Detector | 0 | 6621345 | import os
import cv2
import numpy as np
data_path = '/home/lfx/Downloads/dtd/images'
images = []
for subdir, dirs, files in os.walk(data_path):
for file in files:
#print(os.path.join(subdir, file))
image = cv2.imread(os.path.join(subdir, file))
output_path = os.path.join('/home/lfx/Downloads', 'New Folder', file)
print(output_path)
cv2.imwrite(output_path, image)
| import os
import cv2
import numpy as np
data_path = '/home/lfx/Downloads/dtd/images'
images = []
for subdir, dirs, files in os.walk(data_path):
for file in files:
#print(os.path.join(subdir, file))
image = cv2.imread(os.path.join(subdir, file))
output_path = os.path.join('/home/lfx/Downloads', 'New Folder', file)
print(output_path)
cv2.imwrite(output_path, image)
| ceb | 0.172333 | #print(os.path.join(subdir, file)) | 2.766203 | 3 |
NPS Exercise Files/Chapter 7/7-9.py | coderXeno/eric-matthes-py-book-solutions | 0 | 6621346 | sandwich_orders = ['cheese','onion','pastrami','ham','pork','pastrami','tomato','egg','pastrami']
print("The deli has run out of pastrami")
while 'pastrami' in sandwich_orders:
sandwich_orders.remove('pastrami')
finished_sandwiches = []
while sandwich_orders:
current_order = sandwich_orders.pop()
print("I made your "+current_order+" sandwich order")
| sandwich_orders = ['cheese','onion','pastrami','ham','pork','pastrami','tomato','egg','pastrami']
print("The deli has run out of pastrami")
while 'pastrami' in sandwich_orders:
sandwich_orders.remove('pastrami')
finished_sandwiches = []
while sandwich_orders:
current_order = sandwich_orders.pop()
print("I made your "+current_order+" sandwich order")
| none | 1 | 3.753608 | 4 | |
scraper/__init__.py | farjanul-nayem/Web-Scraping-with-Python | 30 | 6621347 | <gh_stars>10-100
"""Scrape metadata from target URL."""
import requests
from bs4 import BeautifulSoup
import pprint
from .scrape import (
get_title,
get_description,
get_image,
get_site_name,
get_favicon,
get_theme_color
)
def scrape_page_metadata(url):
"""Scrape target URL for metadata."""
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
pp = pprint.PrettyPrinter(indent=4)
r = requests.get(url, headers=headers)
html = BeautifulSoup(r.content, 'html.parser')
metadata = {
'title': get_title(html),
'description': get_description(html),
'image': get_image(html),
'favicon': get_favicon(html, url),
'sitename': get_site_name(html, url),
'color': get_theme_color(html),
'url': url
}
pp.pprint(metadata)
return metadata
| """Scrape metadata from target URL."""
import requests
from bs4 import BeautifulSoup
import pprint
from .scrape import (
get_title,
get_description,
get_image,
get_site_name,
get_favicon,
get_theme_color
)
def scrape_page_metadata(url):
"""Scrape target URL for metadata."""
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
pp = pprint.PrettyPrinter(indent=4)
r = requests.get(url, headers=headers)
html = BeautifulSoup(r.content, 'html.parser')
metadata = {
'title': get_title(html),
'description': get_description(html),
'image': get_image(html),
'favicon': get_favicon(html, url),
'sitename': get_site_name(html, url),
'color': get_theme_color(html),
'url': url
}
pp.pprint(metadata)
return metadata | en | 0.182802 | Scrape metadata from target URL. Scrape target URL for metadata. | 3.352288 | 3 |
word_gen.py | tonyaajjackson/aloke | 0 | 6621348 | <reponame>tonyaajjackson/aloke
#! usr/bin/ python3
import numpy
import random
def word_gen(prob, n_words):
new_words = []
for x in range(n_words):
end_of_word = False
prev_letter = 0
word = ""
# Add loop counter to catch if loop gets stuck
loops = 0
while not end_of_word:
rand_prob = random.random()
# Find a letter corresponding to this probability
found_letter = False
current_letter = 0
while not found_letter:
if rand_prob <= prob[prev_letter, current_letter]:
found_letter = True
prev_letter = current_letter
else:
current_letter +=1
if current_letter == 0:
end_of_word = True
else:
word += chr(current_letter+96)
loops +=1
if loops > 100:
print("Looped too many times - exiting")
end_of_word = True
new_words.append(word)
return new_words | #! usr/bin/ python3
import numpy
import random
def word_gen(prob, n_words):
new_words = []
for x in range(n_words):
end_of_word = False
prev_letter = 0
word = ""
# Add loop counter to catch if loop gets stuck
loops = 0
while not end_of_word:
rand_prob = random.random()
# Find a letter corresponding to this probability
found_letter = False
current_letter = 0
while not found_letter:
if rand_prob <= prob[prev_letter, current_letter]:
found_letter = True
prev_letter = current_letter
else:
current_letter +=1
if current_letter == 0:
end_of_word = True
else:
word += chr(current_letter+96)
loops +=1
if loops > 100:
print("Looped too many times - exiting")
end_of_word = True
new_words.append(word)
return new_words | en | 0.521974 | #! usr/bin/ python3 # Add loop counter to catch if loop gets stuck # Find a letter corresponding to this probability | 3.708617 | 4 |
496. Next Greater Element I/main.py | Competitive-Programmers-Community/LeetCode | 2 | 6621349 | <reponame>Competitive-Programmers-Community/LeetCode
class Solution:
def nextGreaterElement(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
res=[]
for e in nums1:
m=e
for i in range(len(nums2)):
if nums2[i]==e:
break
for j in range(i+1,len(nums2)):
if nums2[j]>m:
m=nums2[j]
break
if m==e:
res.append(-1)
else:
res.append(m)
return res
| class Solution:
def nextGreaterElement(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
res=[]
for e in nums1:
m=e
for i in range(len(nums2)):
if nums2[i]==e:
break
for j in range(i+1,len(nums2)):
if nums2[j]>m:
m=nums2[j]
break
if m==e:
res.append(-1)
else:
res.append(m)
return res | en | 0.11888 | :type nums1: List[int] :type nums2: List[int] :rtype: List[int] | 3.347814 | 3 |
tests/test_epsilon_nfa.py | cxlvinchau/automata-py | 3 | 6621350 | import unittest
from automatapy.automata import EpsilonNFA, Epsilon
class EpsilonNFATest(unittest.TestCase):
def setUp(self) -> None:
self.eps_nfa = EpsilonNFA()
q1, q2, q3 = self.eps_nfa.add_state(initial=True), self.eps_nfa.add_state(final=True), self.eps_nfa.add_state(final=True)
self.eps_nfa.add_transition(q1, Epsilon(), q2)
self.eps_nfa.add_transition(q1, Epsilon(), q3)
self.eps_nfa.add_transition(q2, "a", q2)
self.eps_nfa.add_transition(q3, "b", q3)
def test_to_nfa(self):
nfa = self.eps_nfa.to_nfa()
self.assertTrue(nfa.accepts(""))
self.assertTrue(nfa.accepts("aaaaaaaaa"))
self.assertTrue(nfa.accepts("bbbbbbbbb"))
self.assertFalse(nfa.accepts("abbbbbbbbb"))
if __name__ == '__main__':
unittest.main()
| import unittest
from automatapy.automata import EpsilonNFA, Epsilon
class EpsilonNFATest(unittest.TestCase):
def setUp(self) -> None:
self.eps_nfa = EpsilonNFA()
q1, q2, q3 = self.eps_nfa.add_state(initial=True), self.eps_nfa.add_state(final=True), self.eps_nfa.add_state(final=True)
self.eps_nfa.add_transition(q1, Epsilon(), q2)
self.eps_nfa.add_transition(q1, Epsilon(), q3)
self.eps_nfa.add_transition(q2, "a", q2)
self.eps_nfa.add_transition(q3, "b", q3)
def test_to_nfa(self):
nfa = self.eps_nfa.to_nfa()
self.assertTrue(nfa.accepts(""))
self.assertTrue(nfa.accepts("aaaaaaaaa"))
self.assertTrue(nfa.accepts("bbbbbbbbb"))
self.assertFalse(nfa.accepts("abbbbbbbbb"))
if __name__ == '__main__':
unittest.main()
| none | 1 | 3.36107 | 3 |