max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
tests/test_ascent.py | yamiacat/buildtest | 0 | 6614951 | <reponame>yamiacat/buildtest
import os
import pytest
from buildtest.menu.build import BuildTest
def test_ascent():
# ascent system has LMOD_SYSTEM_NAME set to "ascent" only run this test if this value is set
if os.getenv("LMOD_SYSTEM_NAME") != "ascent":
pytest.skip("Test runs only on ascent")
here = os.path.dirname(os.path.abspath(__file__))
ascent = os.path.join(here, "settings", "ascent.yml")
buildspec_files = os.path.join(here, "examples", "cori_buildspecs", "hostname.yml")
cmd = BuildTest(config_file=ascent, buildspecs=[buildspec_files])
cmd.build()
| import os
import pytest
from buildtest.menu.build import BuildTest
def test_ascent():
# ascent system has LMOD_SYSTEM_NAME set to "ascent" only run this test if this value is set
if os.getenv("LMOD_SYSTEM_NAME") != "ascent":
pytest.skip("Test runs only on ascent")
here = os.path.dirname(os.path.abspath(__file__))
ascent = os.path.join(here, "settings", "ascent.yml")
buildspec_files = os.path.join(here, "examples", "cori_buildspecs", "hostname.yml")
cmd = BuildTest(config_file=ascent, buildspecs=[buildspec_files])
cmd.build() | en | 0.857686 | # ascent system has LMOD_SYSTEM_NAME set to "ascent" only run this test if this value is set | 2.311821 | 2 |
tools/compiler.online.source.py | ISSOtm/Aevilia-GB | 53 | 6614952 | import urllib
import os
deps_link = "https://stuff-for-my-programs.weebly.com/uploads/1/1/4/2/114279849/dependencies.zip"
extractor_link = "https://stuff-for-my-programs.weebly.com/uploads/1/1/4/2/114279849/7za.exe"
dll_list = ["libiconv2.dll", "libintl3.dll", "msvcp60.dll", "msvcrt.dll"]
"""
Downloaded File List:
dependencies.zip:
{
msvcp60.dll
msvcrt.dll
libiconv2.dll
libintl3.dll
make.exe
}
7za.exe
maybe later i'll use the shitty builtin zip module instead but i doubt it
"""
def in_PATH(fileToCheck):
path = os.getenv("PATH").split(";")
for dir in path:
if os.path.isfile(dir + fileToCheck):
return True
return False
def check_deps():
for dll in dll_list:
if not in_PATH(dll):
return False
return in_PATH("make.exe")
def download_deps():
print "==================================\nAEVILIA GB Online Makefile Wrapper\n==================================\nWritten in Py2 by Parzival\nMaintained by Parzival and ISSOtm\nThis version downloads repacked files from a server in order to run them. Nothing will be permanently installed.\nIf you don't like this, use the Offline Wrapper. It'll tell you what you need to download.\n\nIf you wish to download and unpack the files, type YES and press Enter.\nIf you do not wish to, either close this program or type anything else."
try:
confirm_str = raw_input("\"YES\" to confirm > ")
except EOFError:
return False
if confirm_str.lower() != "yes":
return False
print "\nDownload will begin now. Total size: 3 MB"
print "Downloading dependencies... (1.2MB)"
urllib.urlretrieve(deps_link,"dependencies.zip")
print "Downloading decompression utility... (574KB)"
urllib.urlretrieve(extractor_link,"7za.exe")
print "Decompressing dependencies...\n"
os.system("7za e dependencies.zip -y")
os.system("del dependencies.zip")
os.system("del 7za.exe")
return True
def compile_rom():
if check_deps():
print "Compilation tools detected, skipping download."
else:
if not download_deps():
print "Aborted."
return
print "\nBuilding AEVILIA GB ROM...\n"
os.system("make -f Makefile")
print "\nDo you want to clean up the files you downloaded? If you don't, you won't have to re-download them next time."
print "If you want to keep the files, simply close the program. Otherwise, type anything then hit Enter."
try:
raw_input()
except EOFError:
return
# If we reach here, the user asked for cleanup. Ok.
print "\nCleaning up...\n"
for dll in dll_list:
os.system("del " + dll + " /f")
os.system("del make.exe /f")
os.system("del dependencies.zip /f")
os.system("del 7za.exe /f")
print "Done! Exiting..."
compile_rom()
| import urllib
import os
deps_link = "https://stuff-for-my-programs.weebly.com/uploads/1/1/4/2/114279849/dependencies.zip"
extractor_link = "https://stuff-for-my-programs.weebly.com/uploads/1/1/4/2/114279849/7za.exe"
dll_list = ["libiconv2.dll", "libintl3.dll", "msvcp60.dll", "msvcrt.dll"]
"""
Downloaded File List:
dependencies.zip:
{
msvcp60.dll
msvcrt.dll
libiconv2.dll
libintl3.dll
make.exe
}
7za.exe
maybe later i'll use the shitty builtin zip module instead but i doubt it
"""
def in_PATH(fileToCheck):
path = os.getenv("PATH").split(";")
for dir in path:
if os.path.isfile(dir + fileToCheck):
return True
return False
def check_deps():
for dll in dll_list:
if not in_PATH(dll):
return False
return in_PATH("make.exe")
def download_deps():
print "==================================\nAEVILIA GB Online Makefile Wrapper\n==================================\nWritten in Py2 by Parzival\nMaintained by Parzival and ISSOtm\nThis version downloads repacked files from a server in order to run them. Nothing will be permanently installed.\nIf you don't like this, use the Offline Wrapper. It'll tell you what you need to download.\n\nIf you wish to download and unpack the files, type YES and press Enter.\nIf you do not wish to, either close this program or type anything else."
try:
confirm_str = raw_input("\"YES\" to confirm > ")
except EOFError:
return False
if confirm_str.lower() != "yes":
return False
print "\nDownload will begin now. Total size: 3 MB"
print "Downloading dependencies... (1.2MB)"
urllib.urlretrieve(deps_link,"dependencies.zip")
print "Downloading decompression utility... (574KB)"
urllib.urlretrieve(extractor_link,"7za.exe")
print "Decompressing dependencies...\n"
os.system("7za e dependencies.zip -y")
os.system("del dependencies.zip")
os.system("del 7za.exe")
return True
def compile_rom():
if check_deps():
print "Compilation tools detected, skipping download."
else:
if not download_deps():
print "Aborted."
return
print "\nBuilding AEVILIA GB ROM...\n"
os.system("make -f Makefile")
print "\nDo you want to clean up the files you downloaded? If you don't, you won't have to re-download them next time."
print "If you want to keep the files, simply close the program. Otherwise, type anything then hit Enter."
try:
raw_input()
except EOFError:
return
# If we reach here, the user asked for cleanup. Ok.
print "\nCleaning up...\n"
for dll in dll_list:
os.system("del " + dll + " /f")
os.system("del make.exe /f")
os.system("del dependencies.zip /f")
os.system("del 7za.exe /f")
print "Done! Exiting..."
compile_rom()
| en | 0.869496 | Downloaded File List: dependencies.zip: { msvcp60.dll msvcrt.dll libiconv2.dll libintl3.dll make.exe } 7za.exe maybe later i'll use the shitty builtin zip module instead but i doubt it # If we reach here, the user asked for cleanup. Ok. | 2.799579 | 3 |
django_migration_linter/sql_analyser/utils.py | Tesorio/django-migration-linter | 0 | 6614953 | <reponame>Tesorio/django-migration-linter<filename>django_migration_linter/sql_analyser/utils.py
# Copyright 2019 3YOURMIND GmbH
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from copy import deepcopy
def find_error_dict_with_code(tests, code):
return next((test_dict for test_dict in tests if test_dict["code"] == code), None)
def update_migration_tests(base_tests, specific_tests):
base_tests = deepcopy(base_tests)
for override_test in specific_tests:
migration_test_dict = find_error_dict_with_code(
base_tests, override_test["code"]
)
if migration_test_dict is None or not override_test["code"]:
migration_test_dict = {}
base_tests.append(migration_test_dict)
for key in override_test.keys():
migration_test_dict[key] = override_test[key]
return base_tests
def build_error_dict(migration_test, sql_statement):
table_search = (
re.search("TABLE `([^`]*)`", sql_statement, re.IGNORECASE)
if isinstance(sql_statement, str)
else None
)
col_search = (
re.search("COLUMN `([^`]*)`", sql_statement, re.IGNORECASE)
if isinstance(sql_statement, str)
else None
)
return {
"err_msg": migration_test["err_msg"],
"code": migration_test["code"],
"table": table_search.group(1) if table_search else None,
"column": col_search.group(1) if col_search else None,
}
| # Copyright 2019 3YOURMIND GmbH
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from copy import deepcopy
def find_error_dict_with_code(tests, code):
return next((test_dict for test_dict in tests if test_dict["code"] == code), None)
def update_migration_tests(base_tests, specific_tests):
base_tests = deepcopy(base_tests)
for override_test in specific_tests:
migration_test_dict = find_error_dict_with_code(
base_tests, override_test["code"]
)
if migration_test_dict is None or not override_test["code"]:
migration_test_dict = {}
base_tests.append(migration_test_dict)
for key in override_test.keys():
migration_test_dict[key] = override_test[key]
return base_tests
def build_error_dict(migration_test, sql_statement):
table_search = (
re.search("TABLE `([^`]*)`", sql_statement, re.IGNORECASE)
if isinstance(sql_statement, str)
else None
)
col_search = (
re.search("COLUMN `([^`]*)`", sql_statement, re.IGNORECASE)
if isinstance(sql_statement, str)
else None
)
return {
"err_msg": migration_test["err_msg"],
"code": migration_test["code"],
"table": table_search.group(1) if table_search else None,
"column": col_search.group(1) if col_search else None,
} | en | 0.840169 | # Copyright 2019 3YOURMIND GmbH # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.930964 | 2 |
setup.py | dealfonso/configupdate | 0 | 6614954 | <gh_stars>0
#!/usr/bin/env python
import os
from distutils.core import setup
ldesc = ""
f = open("README.md","rt")
ldesc = f.read()
f.close()
setup(name='configupdate',
version="0.2",
description='An utility to combine config files',
author='<NAME>',
author_email='<EMAIL>',
url='http://www.grycap.upv.es',
scripts = [ "configupdate" ],
long_description=ldesc,
license = "MIT",
requires = [ 'cpyutils (>= 0.24)' ],
)
| #!/usr/bin/env python
import os
from distutils.core import setup
ldesc = ""
f = open("README.md","rt")
ldesc = f.read()
f.close()
setup(name='configupdate',
version="0.2",
description='An utility to combine config files',
author='<NAME>',
author_email='<EMAIL>',
url='http://www.grycap.upv.es',
scripts = [ "configupdate" ],
long_description=ldesc,
license = "MIT",
requires = [ 'cpyutils (>= 0.24)' ],
) | ru | 0.26433 | #!/usr/bin/env python | 1.45008 | 1 |
back-end/database/admin.py | junaid1460/comment-system | 0 | 6614955 | <filename>back-end/database/admin.py
from django.contrib import admin
# Register your models here.
from database.models import Comment, Post, Reply
for model in [Comment, Post, Reply]:
admin.site.register(model) | <filename>back-end/database/admin.py
from django.contrib import admin
# Register your models here.
from database.models import Comment, Post, Reply
for model in [Comment, Post, Reply]:
admin.site.register(model) | en | 0.968259 | # Register your models here. | 1.621454 | 2 |
Models.py | ariyanzri/NowCasting-nets | 0 | 6614956 | <reponame>ariyanzri/NowCasting-nets
from numpy.core.defchararray import index
from tensorflow import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import h5py
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from sklearn.model_selection import train_test_split
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Input, ConvLSTM2D, BatchNormalization
from tensorflow.keras.layers import Conv3D, MaxPool3D, Conv3DTranspose, Add
from tensorflow.keras.layers import SpatialDropout3D, UpSampling3D, Dropout, RepeatVector, Average
from tensorflow.keras.models import load_model
from tensorflow.keras.losses import mse, mae, Huber
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
mpl.rcParams['figure.dpi'] = 300
import argparse
import json
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
import tensorflow.keras.layers as layers
import pickle
class UNET_Like_3D_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
if dic['input_bn']:
x_init = BatchNormalization()(input_layer)
else:
x_init = input_layer
x_conv1_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_init)
x_conv2_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b1)
x_max_b1 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b1)
x_bn_b1 = BatchNormalization()(x_max_b1)
x_do_b1 = Dropout(dic['dr_rate'])(x_bn_b1)
x_conv1_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b1)
x_conv2_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b2)
x_max_b2 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b2)
x_bn_b2 = BatchNormalization()(x_max_b2)
x_do_b2 = Dropout(dic['dr_rate'])(x_bn_b2)
x_conv1_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b2)
x_conv2_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b3)
x_max_b3 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b3)
x_bn_b3 = BatchNormalization()(x_max_b3)
x_do_b3 = Dropout(dic['dr_rate'])(x_bn_b3)
x_conv1_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b3)
x_conv2_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b4)
x_max_b4 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b4)
x_bn_b4 = BatchNormalization()(x_max_b4)
x_do_b4 = Dropout(dic['dr_rate'])(x_bn_b4)
# ------- Head Normal Output (normal decoder)
x_conv1_b5 = Conv3D(dic['start_filter']*8, [3, 1, 1], activation=dic['activation'])(x_do_b4)
x_conv2_b5 = Conv3D(dic['start_filter']*8, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b5)
x_deconv_b5 = Conv3DTranspose(dic['start_filter']*8, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b5)
x_bn_b5 = BatchNormalization()(x_deconv_b5)
x_do_b5 = Dropout(dic['dr_rate'])(x_bn_b5)
cropped_x_conv2_b4 = layers.Cropping3D(cropping=((2,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b4)
x_conv1_b6 = Conv3D(dic['start_filter']*4, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b4,x_do_b5]))
x_conv2_b6 = Conv3D(dic['start_filter']*4, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b6)
x_deconv_b6 = Conv3DTranspose(dic['start_filter']*4, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b6)
x_bn_b6 = BatchNormalization()(x_deconv_b6)
x_do_b6 = Dropout(dic['dr_rate'])(x_bn_b6)
cropped_x_conv2_b3 = layers.Cropping3D(cropping=((4,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b3)
x_conv1_b7 = Conv3D(dic['start_filter']*2, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b3,x_do_b6]))
x_conv2_b7 = Conv3D(dic['start_filter']*2, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b7)
x_deconv_b7 = Conv3DTranspose(dic['start_filter']*2, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b7)
x_bn_b7 = BatchNormalization()(x_deconv_b7)
x_do_b7 = Dropout(dic['dr_rate'])(x_bn_b7)
cropped_x_conv2_b2 = layers.Cropping3D(cropping=((6,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b2)
x_conv1_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b2,x_do_b7]))
x_conv2_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b8)
x_deconv_b8 = Conv3DTranspose(dic['start_filter'], [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b8)
x_bn_b8 = BatchNormalization()(x_deconv_b8)
x_do_b8 = Dropout(dic['dr_rate'])(x_bn_b8)
cropped_x_conv2_b1 = layers.Cropping3D(cropping=((6,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b1)
x_conv1_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b1,x_do_b8]))
x_conv2_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b9)
x_bn_b9 = BatchNormalization()(x_conv2_b9)
x_do_b9 = Dropout(dic['dr_rate'])(x_bn_b9)
normal_output = Conv3DTranspose(1, [1, 1, 1], activation='linear')(x_do_b9)
# ----------
model = Model(inputs=[input_layer], outputs=[normal_output])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
self.model = model
class UNET_3D_Residual_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
if dic['input_bn']:
x_init = BatchNormalization()(input_layer)
else:
x_init = input_layer
x_conv1_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_init)
x_conv2_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b1)
x_max_b1 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b1)
x_bn_b1 = BatchNormalization()(x_max_b1)
x_do_b1 = Dropout(dic['dr_rate'])(x_bn_b1)
x_conv1_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b1)
x_conv2_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b2)
x_max_b2 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b2)
x_bn_b2 = BatchNormalization()(x_max_b2)
x_do_b2 = Dropout(dic['dr_rate'])(x_bn_b2)
x_conv1_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b2)
x_conv2_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b3)
x_max_b3 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b3)
x_bn_b3 = BatchNormalization()(x_max_b3)
x_do_b3 = Dropout(dic['dr_rate'])(x_bn_b3)
x_conv1_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b3)
x_conv2_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b4)
x_max_b4 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b4)
x_bn_b4 = BatchNormalization()(x_max_b4)
x_do_b4 = Dropout(dic['dr_rate'])(x_bn_b4)
# ------- Head Residual Output (Residual Decoder)
x_conv1_b5 = Conv3D(dic['start_filter']*8, [3, 1, 1], activation=dic['activation'])(x_do_b4)
x_conv2_b5 = Conv3D(dic['start_filter']*8, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b5)
x_deconv_b5 = Conv3DTranspose(dic['start_filter']*8, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b5)
x_bn_b5 = BatchNormalization()(x_deconv_b5)
x_do_b5 = Dropout(dic['dr_rate'])(x_bn_b5)
cropped_x_conv2_b4 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b4)
cropped_x_conv2_b4 = layers.concatenate([cropped_x_conv2_b4]*7,axis=1)
x_conv1_b6 = Conv3D(dic['start_filter']*4, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b4,x_do_b5]))
x_conv2_b6 = Conv3D(dic['start_filter']*4, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b6)
x_deconv_b6 = Conv3DTranspose(dic['start_filter']*4, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b6)
x_bn_b6 = BatchNormalization()(x_deconv_b6)
x_do_b6 = Dropout(dic['dr_rate'])(x_bn_b6)
cropped_x_conv2_b3 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b3)
cropped_x_conv2_b3 = layers.concatenate([cropped_x_conv2_b3]*5,axis=1)
x_conv1_b7 = Conv3D(dic['start_filter']*2, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b3,x_do_b6]))
x_conv2_b7 = Conv3D(dic['start_filter']*2, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b7)
x_deconv_b7 = Conv3DTranspose(dic['start_filter']*2, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b7)
x_bn_b7 = BatchNormalization()(x_deconv_b7)
x_do_b7 = Dropout(dic['dr_rate'])(x_bn_b7)
cropped_x_conv2_b2 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b2)
cropped_x_conv2_b2 = layers.concatenate([cropped_x_conv2_b2]*3,axis=1)
x_conv1_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b2,x_do_b7]))
x_conv2_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b8)
x_deconv_b8 = Conv3DTranspose(dic['start_filter'], [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b8)
x_bn_b8 = BatchNormalization()(x_deconv_b8)
x_do_b8 = Dropout(dic['dr_rate'])(x_bn_b8)
cropped_x_conv2_b1 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b1)
cropped_x_conv2_b1 = layers.concatenate([cropped_x_conv2_b1]*3,axis=1)
x_conv1_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b1,x_do_b8]))
x_conv2_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b9)
x_bn_b9 = BatchNormalization()(x_conv2_b9)
x_do_b9 = Dropout(dic['dr_rate'])(x_bn_b9)
residual_output = Conv3DTranspose(1, [1, 1, 1], activation='linear')(x_do_b9)
last_timestep_input_residual = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(input_layer)
last_timestep_input_residual = layers.concatenate([last_timestep_input_residual]*3,axis=1)
residual_output = Add()([last_timestep_input_residual, residual_output])
# ----------
model = Model(inputs=[input_layer], outputs=[residual_output])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
self.model = model
class UNET_3D_Both_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
if dic['input_bn']:
x_init = BatchNormalization()(input_layer)
else:
x_init = input_layer
x_conv1_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_init)
x_conv2_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b1)
x_max_b1 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b1)
x_bn_b1 = BatchNormalization()(x_max_b1)
x_do_b1 = Dropout(dic['dr_rate'])(x_bn_b1)
x_conv1_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b1)
x_conv2_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b2)
x_max_b2 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b2)
x_bn_b2 = BatchNormalization()(x_max_b2)
x_do_b2 = Dropout(dic['dr_rate'])(x_bn_b2)
x_conv1_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b2)
x_conv2_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b3)
x_max_b3 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b3)
x_bn_b3 = BatchNormalization()(x_max_b3)
x_do_b3 = Dropout(dic['dr_rate'])(x_bn_b3)
x_conv1_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b3)
x_conv2_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b4)
x_max_b4 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b4)
x_bn_b4 = BatchNormalization()(x_max_b4)
x_do_b4 = Dropout(dic['dr_rate'])(x_bn_b4)
# ------- Head Normal Output (normal decoder)
x_conv1_b5 = Conv3D(dic['start_filter']*8, [3, 1, 1], activation=dic['activation'])(x_do_b4)
x_conv2_b5 = Conv3D(dic['start_filter']*8, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b5)
x_deconv_b5 = Conv3DTranspose(dic['start_filter']*8, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b5)
x_bn_b5 = BatchNormalization()(x_deconv_b5)
x_do_b5 = Dropout(dic['dr_rate'])(x_bn_b5)
cropped_x_conv2_b4 = layers.Cropping3D(cropping=((2,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b4)
x_conv1_b6 = Conv3D(dic['start_filter']*4, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b4,x_do_b5]))
x_conv2_b6 = Conv3D(dic['start_filter']*4, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b6)
x_deconv_b6 = Conv3DTranspose(dic['start_filter']*4, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b6)
x_bn_b6 = BatchNormalization()(x_deconv_b6)
x_do_b6 = Dropout(dic['dr_rate'])(x_bn_b6)
cropped_x_conv2_b3 = layers.Cropping3D(cropping=((4,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b3)
x_conv1_b7 = Conv3D(dic['start_filter']*2, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b3,x_do_b6]))
x_conv2_b7 = Conv3D(dic['start_filter']*2, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b7)
x_deconv_b7 = Conv3DTranspose(dic['start_filter']*2, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b7)
x_bn_b7 = BatchNormalization()(x_deconv_b7)
x_do_b7 = Dropout(dic['dr_rate'])(x_bn_b7)
cropped_x_conv2_b2 = layers.Cropping3D(cropping=((6,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b2)
x_conv1_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b2,x_do_b7]))
x_conv2_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b8)
x_deconv_b8 = Conv3DTranspose(dic['start_filter'], [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b8)
x_bn_b8 = BatchNormalization()(x_deconv_b8)
x_do_b8 = Dropout(dic['dr_rate'])(x_bn_b8)
cropped_x_conv2_b1 = layers.Cropping3D(cropping=((6,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b1)
x_conv1_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b1,x_do_b8]))
x_conv2_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b9)
x_bn_b9 = BatchNormalization()(x_conv2_b9)
x_do_b9 = Dropout(dic['dr_rate'])(x_bn_b9)
normal_output = Conv3DTranspose(1, [1, 1, 1], activation='linear')(x_do_b9)
# ------- Head Residual Output (Residual Decoder)
x_conv1_b5 = Conv3D(dic['start_filter']*8, [3, 1, 1], activation=dic['activation'])(x_max_b4)
x_conv2_b5 = Conv3D(dic['start_filter']*8, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b5)
x_deconv_b5 = Conv3DTranspose(dic['start_filter']*8, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b5)
x_bn_b5 = BatchNormalization()(x_deconv_b5)
x_do_b5 = Dropout(dic['dr_rate'])(x_bn_b5)
cropped_x_conv2_b4 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b4)
cropped_x_conv2_b4 = layers.concatenate([cropped_x_conv2_b4]*7,axis=1)
x_conv1_b6 = Conv3D(dic['start_filter']*4, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b4,x_do_b5]))
x_conv2_b6 = Conv3D(dic['start_filter']*4, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b6)
x_deconv_b6 = Conv3DTranspose(dic['start_filter']*4, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b6)
x_bn_b6 = BatchNormalization()(x_deconv_b6)
x_do_b6 = Dropout(dic['dr_rate'])(x_bn_b6)
cropped_x_conv2_b3 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b3)
cropped_x_conv2_b3 = layers.concatenate([cropped_x_conv2_b3]*5,axis=1)
x_conv1_b7 = Conv3D(dic['start_filter']*2, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b3,x_do_b6]))
x_conv2_b7 = Conv3D(dic['start_filter']*2, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b7)
x_deconv_b7 = Conv3DTranspose(dic['start_filter']*2, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b7)
x_bn_b7 = BatchNormalization()(x_deconv_b7)
x_do_b7 = Dropout(dic['dr_rate'])(x_bn_b7)
cropped_x_conv2_b2 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b2)
cropped_x_conv2_b2 = layers.concatenate([cropped_x_conv2_b2]*3,axis=1)
x_conv1_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b2,x_do_b7]))
x_conv2_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b8)
x_deconv_b8 = Conv3DTranspose(dic['start_filter'], [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b8)
x_bn_b8 = BatchNormalization()(x_deconv_b8)
x_do_b8 = Dropout(dic['dr_rate'])(x_bn_b8)
cropped_x_conv2_b1 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b1)
cropped_x_conv2_b1 = layers.concatenate([cropped_x_conv2_b1]*3,axis=1)
x_conv1_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b1,x_do_b8]))
x_conv2_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b9)
x_bn_b9 = BatchNormalization()(x_conv2_b9)
x_do_b9 = Dropout(dic['dr_rate'])(x_bn_b9)
residual_output = Conv3DTranspose(1, [1, 1, 1], activation='linear')(x_do_b9)
last_timestep_input_residual = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(input_layer)
last_timestep_input_residual = layers.concatenate([last_timestep_input_residual]*3,axis=1)
residual_output = Add()([last_timestep_input_residual, residual_output])
# ---------- Averaging the two output
output = Average()([normal_output,residual_output])
model = Model(inputs=[input_layer], outputs=[output])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
self.model = model
class CONV_LSTM_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
x = input_layer
x = ConvLSTM2D(filters=dic['start_filter'], kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(dic['start_filter'], [3, 1, 1], activation='relu')(x)
x = ConvLSTM2D(filters=int(dic['start_filter']/2), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(int(dic['start_filter']/2), [3, 1, 1], activation='relu')(x)
x = ConvLSTM2D(filters=int(dic['start_filter']/4), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(int(dic['start_filter']/4), [3, 1, 1], activation='relu')(x)
x = Conv3D(1, [1, 1, 1], padding='same', activation='relu')(x)
model = Model(inputs=[input_layer], outputs=[x])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
#
self.model = model
class CONV_LSTM_Residual_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
x = input_layer
x = ConvLSTM2D(filters=dic['start_filter'], kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(dic['start_filter'], [3, 1, 1], activation='relu')(x)
x = ConvLSTM2D(filters=int(dic['start_filter']/2), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(int(dic['start_filter']/2), [3, 1, 1], activation='relu')(x)
x = ConvLSTM2D(filters=int(dic['start_filter']/4), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(int(dic['start_filter']/4), [3, 1, 1], activation='relu')(x)
x = Conv3D(1, [1, 1, 1], padding='same', activation='relu')(x)
last_ts = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(input_layer)
output = Add()([last_ts, x])
model = Model(inputs=[input_layer], outputs=[output])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
#
self.model = model
class CONV_LSTM_Both_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
# Normal Model
x_normal = input_layer
x_normal = ConvLSTM2D(filters=dic['start_filter'], kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_normal)
x_normal = Conv3D(dic['start_filter'], [3, 1, 1], activation='relu')(x_normal)
x_normal = ConvLSTM2D(filters=int(dic['start_filter']/2), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_normal)
x_normal = Conv3D(int(dic['start_filter']/2), [3, 1, 1], activation='relu')(x_normal)
x_normal = ConvLSTM2D(filters=int(dic['start_filter']/4), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_normal)
x_normal = Conv3D(int(dic['start_filter']/4), [3, 1, 1], activation='relu')(x_normal)
output_normal = Conv3D(1, [1, 1, 1], padding='same', activation='relu')(x_normal)
# Residual Model
x_residual = input_layer
x_residual = ConvLSTM2D(filters=dic['start_filter'], kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_residual)
x_residual = Conv3D(dic['start_filter'], [3, 1, 1], activation='relu')(x_residual)
x_residual = ConvLSTM2D(filters=int(dic['start_filter']/2), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_residual)
x_residual = Conv3D(int(dic['start_filter']/2), [3, 1, 1], activation='relu')(x_residual)
x_residual = ConvLSTM2D(filters=int(dic['start_filter']/4), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_residual)
x_residual = Conv3D(int(dic['start_filter']/4), [3, 1, 1], activation='relu')(x_residual)
x_residual = Conv3D(1, [1, 1, 1], padding='same', activation='relu')(x_residual)
last_ts = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(input_layer)
output_residual = Add()([last_ts, x_residual])
# Model
output = Average()([output_normal,output_residual])
model = Model(inputs=[input_layer], outputs=[output])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
#
self.model = model
class LinearRegression_Class:
def load_model(path):
mdl = LinearRegression_Class()
with open(path,'rb') as f:
mdl.sk_model = pickle.load(f)
return mdl
def reshape(arr):
arr = arr.squeeze()
arr = arr.reshape(arr.shape[0],arr.shape[1]*arr.shape[2])
arr = arr.swapaxes(0,1)
return arr
def reshape_back(arr,s):
arr = arr.swapaxes(0,1)
arr = arr.reshape(s[0],s[1],s[2])
return arr
def fit(self, X_train, y_train):
self.sk_model = LinearRegression().fit(X_train, y_train)
def predict(self,X):
s = X.squeeze().shape
reshaped_X = LinearRegression_Class.reshape(X)
reshaped_Y = self.sk_model.predict(reshaped_X)
Y = LinearRegression_Class.reshape_back(reshaped_Y,(3,s[1],s[2]))
return Y
class RandomForrest_Class:
def __init__(self):
self.sk_model = RandomForestRegressor(max_depth=10, random_state=0, n_estimators=20)
def load_model(path):
mdl = RandomForrest_Class()
with open(path,'rb') as f:
mdl.sk_model = pickle.load(f)
return mdl
def reshape(arr):
arr = arr.squeeze()
arr = arr.reshape(arr.shape[0],arr.shape[1]*arr.shape[2])
arr = arr.swapaxes(0,1)
return arr
def reshape_back(arr,s):
arr = arr.swapaxes(0,1)
arr = arr.reshape(s[0],s[1],s[2])
return arr
def get_sample_indexes(arr):
indexes = np.random.choice(np.array(range(arr.shape[0])),10000000)
return indexes
def fit(self, X_train, y_train):
indexes = RandomForrest_Class.get_sample_indexes(X_train)
X = X_train[indexes]
y = y_train[indexes]
self.sk_model = self.sk_model.fit(X,y)
def predict(self,X):
s = X.squeeze().shape
reshaped_X = RandomForrest_Class.reshape(X)
reshaped_Y = self.sk_model.predict(reshaped_X)
Y = RandomForrest_Class.reshape_back(reshaped_Y,(3,s[1],s[2]))
return Y
| from numpy.core.defchararray import index
from tensorflow import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import h5py
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from sklearn.model_selection import train_test_split
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Input, ConvLSTM2D, BatchNormalization
from tensorflow.keras.layers import Conv3D, MaxPool3D, Conv3DTranspose, Add
from tensorflow.keras.layers import SpatialDropout3D, UpSampling3D, Dropout, RepeatVector, Average
from tensorflow.keras.models import load_model
from tensorflow.keras.losses import mse, mae, Huber
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
mpl.rcParams['figure.dpi'] = 300
import argparse
import json
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
import tensorflow.keras.layers as layers
import pickle
class UNET_Like_3D_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
if dic['input_bn']:
x_init = BatchNormalization()(input_layer)
else:
x_init = input_layer
x_conv1_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_init)
x_conv2_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b1)
x_max_b1 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b1)
x_bn_b1 = BatchNormalization()(x_max_b1)
x_do_b1 = Dropout(dic['dr_rate'])(x_bn_b1)
x_conv1_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b1)
x_conv2_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b2)
x_max_b2 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b2)
x_bn_b2 = BatchNormalization()(x_max_b2)
x_do_b2 = Dropout(dic['dr_rate'])(x_bn_b2)
x_conv1_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b2)
x_conv2_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b3)
x_max_b3 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b3)
x_bn_b3 = BatchNormalization()(x_max_b3)
x_do_b3 = Dropout(dic['dr_rate'])(x_bn_b3)
x_conv1_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b3)
x_conv2_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b4)
x_max_b4 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b4)
x_bn_b4 = BatchNormalization()(x_max_b4)
x_do_b4 = Dropout(dic['dr_rate'])(x_bn_b4)
# ------- Head Normal Output (normal decoder)
x_conv1_b5 = Conv3D(dic['start_filter']*8, [3, 1, 1], activation=dic['activation'])(x_do_b4)
x_conv2_b5 = Conv3D(dic['start_filter']*8, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b5)
x_deconv_b5 = Conv3DTranspose(dic['start_filter']*8, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b5)
x_bn_b5 = BatchNormalization()(x_deconv_b5)
x_do_b5 = Dropout(dic['dr_rate'])(x_bn_b5)
cropped_x_conv2_b4 = layers.Cropping3D(cropping=((2,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b4)
x_conv1_b6 = Conv3D(dic['start_filter']*4, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b4,x_do_b5]))
x_conv2_b6 = Conv3D(dic['start_filter']*4, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b6)
x_deconv_b6 = Conv3DTranspose(dic['start_filter']*4, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b6)
x_bn_b6 = BatchNormalization()(x_deconv_b6)
x_do_b6 = Dropout(dic['dr_rate'])(x_bn_b6)
cropped_x_conv2_b3 = layers.Cropping3D(cropping=((4,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b3)
x_conv1_b7 = Conv3D(dic['start_filter']*2, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b3,x_do_b6]))
x_conv2_b7 = Conv3D(dic['start_filter']*2, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b7)
x_deconv_b7 = Conv3DTranspose(dic['start_filter']*2, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b7)
x_bn_b7 = BatchNormalization()(x_deconv_b7)
x_do_b7 = Dropout(dic['dr_rate'])(x_bn_b7)
cropped_x_conv2_b2 = layers.Cropping3D(cropping=((6,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b2)
x_conv1_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b2,x_do_b7]))
x_conv2_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b8)
x_deconv_b8 = Conv3DTranspose(dic['start_filter'], [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b8)
x_bn_b8 = BatchNormalization()(x_deconv_b8)
x_do_b8 = Dropout(dic['dr_rate'])(x_bn_b8)
cropped_x_conv2_b1 = layers.Cropping3D(cropping=((6,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b1)
x_conv1_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b1,x_do_b8]))
x_conv2_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b9)
x_bn_b9 = BatchNormalization()(x_conv2_b9)
x_do_b9 = Dropout(dic['dr_rate'])(x_bn_b9)
normal_output = Conv3DTranspose(1, [1, 1, 1], activation='linear')(x_do_b9)
# ----------
model = Model(inputs=[input_layer], outputs=[normal_output])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
self.model = model
class UNET_3D_Residual_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
if dic['input_bn']:
x_init = BatchNormalization()(input_layer)
else:
x_init = input_layer
x_conv1_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_init)
x_conv2_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b1)
x_max_b1 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b1)
x_bn_b1 = BatchNormalization()(x_max_b1)
x_do_b1 = Dropout(dic['dr_rate'])(x_bn_b1)
x_conv1_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b1)
x_conv2_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b2)
x_max_b2 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b2)
x_bn_b2 = BatchNormalization()(x_max_b2)
x_do_b2 = Dropout(dic['dr_rate'])(x_bn_b2)
x_conv1_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b2)
x_conv2_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b3)
x_max_b3 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b3)
x_bn_b3 = BatchNormalization()(x_max_b3)
x_do_b3 = Dropout(dic['dr_rate'])(x_bn_b3)
x_conv1_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b3)
x_conv2_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b4)
x_max_b4 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b4)
x_bn_b4 = BatchNormalization()(x_max_b4)
x_do_b4 = Dropout(dic['dr_rate'])(x_bn_b4)
# ------- Head Residual Output (Residual Decoder)
x_conv1_b5 = Conv3D(dic['start_filter']*8, [3, 1, 1], activation=dic['activation'])(x_do_b4)
x_conv2_b5 = Conv3D(dic['start_filter']*8, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b5)
x_deconv_b5 = Conv3DTranspose(dic['start_filter']*8, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b5)
x_bn_b5 = BatchNormalization()(x_deconv_b5)
x_do_b5 = Dropout(dic['dr_rate'])(x_bn_b5)
cropped_x_conv2_b4 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b4)
cropped_x_conv2_b4 = layers.concatenate([cropped_x_conv2_b4]*7,axis=1)
x_conv1_b6 = Conv3D(dic['start_filter']*4, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b4,x_do_b5]))
x_conv2_b6 = Conv3D(dic['start_filter']*4, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b6)
x_deconv_b6 = Conv3DTranspose(dic['start_filter']*4, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b6)
x_bn_b6 = BatchNormalization()(x_deconv_b6)
x_do_b6 = Dropout(dic['dr_rate'])(x_bn_b6)
cropped_x_conv2_b3 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b3)
cropped_x_conv2_b3 = layers.concatenate([cropped_x_conv2_b3]*5,axis=1)
x_conv1_b7 = Conv3D(dic['start_filter']*2, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b3,x_do_b6]))
x_conv2_b7 = Conv3D(dic['start_filter']*2, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b7)
x_deconv_b7 = Conv3DTranspose(dic['start_filter']*2, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b7)
x_bn_b7 = BatchNormalization()(x_deconv_b7)
x_do_b7 = Dropout(dic['dr_rate'])(x_bn_b7)
cropped_x_conv2_b2 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b2)
cropped_x_conv2_b2 = layers.concatenate([cropped_x_conv2_b2]*3,axis=1)
x_conv1_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b2,x_do_b7]))
x_conv2_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b8)
x_deconv_b8 = Conv3DTranspose(dic['start_filter'], [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b8)
x_bn_b8 = BatchNormalization()(x_deconv_b8)
x_do_b8 = Dropout(dic['dr_rate'])(x_bn_b8)
cropped_x_conv2_b1 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b1)
cropped_x_conv2_b1 = layers.concatenate([cropped_x_conv2_b1]*3,axis=1)
x_conv1_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b1,x_do_b8]))
x_conv2_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b9)
x_bn_b9 = BatchNormalization()(x_conv2_b9)
x_do_b9 = Dropout(dic['dr_rate'])(x_bn_b9)
residual_output = Conv3DTranspose(1, [1, 1, 1], activation='linear')(x_do_b9)
last_timestep_input_residual = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(input_layer)
last_timestep_input_residual = layers.concatenate([last_timestep_input_residual]*3,axis=1)
residual_output = Add()([last_timestep_input_residual, residual_output])
# ----------
model = Model(inputs=[input_layer], outputs=[residual_output])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
self.model = model
class UNET_3D_Both_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
if dic['input_bn']:
x_init = BatchNormalization()(input_layer)
else:
x_init = input_layer
x_conv1_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_init)
x_conv2_b1 = Conv3D(dic['start_filter'], [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b1)
x_max_b1 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b1)
x_bn_b1 = BatchNormalization()(x_max_b1)
x_do_b1 = Dropout(dic['dr_rate'])(x_bn_b1)
x_conv1_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b1)
x_conv2_b2 = Conv3D(dic['start_filter']*2, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b2)
x_max_b2 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b2)
x_bn_b2 = BatchNormalization()(x_max_b2)
x_do_b2 = Dropout(dic['dr_rate'])(x_bn_b2)
x_conv1_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b2)
x_conv2_b3 = Conv3D(dic['start_filter']*4, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b3)
x_max_b3 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b3)
x_bn_b3 = BatchNormalization()(x_max_b3)
x_do_b3 = Dropout(dic['dr_rate'])(x_bn_b3)
x_conv1_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_do_b3)
x_conv2_b4 = Conv3D(dic['start_filter']*8, [1, dic['conv_kernel_size'], dic['conv_kernel_size']],padding='same', activation=dic['activation'])(x_conv1_b4)
x_max_b4 = MaxPool3D([1, 2, 2],padding='same')(x_conv2_b4)
x_bn_b4 = BatchNormalization()(x_max_b4)
x_do_b4 = Dropout(dic['dr_rate'])(x_bn_b4)
# ------- Head Normal Output (normal decoder)
x_conv1_b5 = Conv3D(dic['start_filter']*8, [3, 1, 1], activation=dic['activation'])(x_do_b4)
x_conv2_b5 = Conv3D(dic['start_filter']*8, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b5)
x_deconv_b5 = Conv3DTranspose(dic['start_filter']*8, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b5)
x_bn_b5 = BatchNormalization()(x_deconv_b5)
x_do_b5 = Dropout(dic['dr_rate'])(x_bn_b5)
cropped_x_conv2_b4 = layers.Cropping3D(cropping=((2,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b4)
x_conv1_b6 = Conv3D(dic['start_filter']*4, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b4,x_do_b5]))
x_conv2_b6 = Conv3D(dic['start_filter']*4, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b6)
x_deconv_b6 = Conv3DTranspose(dic['start_filter']*4, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b6)
x_bn_b6 = BatchNormalization()(x_deconv_b6)
x_do_b6 = Dropout(dic['dr_rate'])(x_bn_b6)
cropped_x_conv2_b3 = layers.Cropping3D(cropping=((4,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b3)
x_conv1_b7 = Conv3D(dic['start_filter']*2, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b3,x_do_b6]))
x_conv2_b7 = Conv3D(dic['start_filter']*2, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b7)
x_deconv_b7 = Conv3DTranspose(dic['start_filter']*2, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b7)
x_bn_b7 = BatchNormalization()(x_deconv_b7)
x_do_b7 = Dropout(dic['dr_rate'])(x_bn_b7)
cropped_x_conv2_b2 = layers.Cropping3D(cropping=((6,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b2)
x_conv1_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b2,x_do_b7]))
x_conv2_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b8)
x_deconv_b8 = Conv3DTranspose(dic['start_filter'], [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b8)
x_bn_b8 = BatchNormalization()(x_deconv_b8)
x_do_b8 = Dropout(dic['dr_rate'])(x_bn_b8)
cropped_x_conv2_b1 = layers.Cropping3D(cropping=((6,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b1)
x_conv1_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b1,x_do_b8]))
x_conv2_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b9)
x_bn_b9 = BatchNormalization()(x_conv2_b9)
x_do_b9 = Dropout(dic['dr_rate'])(x_bn_b9)
normal_output = Conv3DTranspose(1, [1, 1, 1], activation='linear')(x_do_b9)
# ------- Head Residual Output (Residual Decoder)
x_conv1_b5 = Conv3D(dic['start_filter']*8, [3, 1, 1], activation=dic['activation'])(x_max_b4)
x_conv2_b5 = Conv3D(dic['start_filter']*8, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b5)
x_deconv_b5 = Conv3DTranspose(dic['start_filter']*8, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b5)
x_bn_b5 = BatchNormalization()(x_deconv_b5)
x_do_b5 = Dropout(dic['dr_rate'])(x_bn_b5)
cropped_x_conv2_b4 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b4)
cropped_x_conv2_b4 = layers.concatenate([cropped_x_conv2_b4]*7,axis=1)
x_conv1_b6 = Conv3D(dic['start_filter']*4, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b4,x_do_b5]))
x_conv2_b6 = Conv3D(dic['start_filter']*4, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b6)
x_deconv_b6 = Conv3DTranspose(dic['start_filter']*4, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b6)
x_bn_b6 = BatchNormalization()(x_deconv_b6)
x_do_b6 = Dropout(dic['dr_rate'])(x_bn_b6)
cropped_x_conv2_b3 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b3)
cropped_x_conv2_b3 = layers.concatenate([cropped_x_conv2_b3]*5,axis=1)
x_conv1_b7 = Conv3D(dic['start_filter']*2, [3, 1, 1], activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b3,x_do_b6]))
x_conv2_b7 = Conv3D(dic['start_filter']*2, [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b7)
x_deconv_b7 = Conv3DTranspose(dic['start_filter']*2, [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b7)
x_bn_b7 = BatchNormalization()(x_deconv_b7)
x_do_b7 = Dropout(dic['dr_rate'])(x_bn_b7)
cropped_x_conv2_b2 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b2)
cropped_x_conv2_b2 = layers.concatenate([cropped_x_conv2_b2]*3,axis=1)
x_conv1_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b2,x_do_b7]))
x_conv2_b8 = Conv3D(dic['start_filter'], [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b8)
x_deconv_b8 = Conv3DTranspose(dic['start_filter'], [1, dic['deconv_kernel_size'], dic['deconv_kernel_size']],(1,2,2),padding='same', activation=dic['activation'])(x_conv2_b8)
x_bn_b8 = BatchNormalization()(x_deconv_b8)
x_do_b8 = Dropout(dic['dr_rate'])(x_bn_b8)
cropped_x_conv2_b1 = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(x_conv2_b1)
cropped_x_conv2_b1 = layers.concatenate([cropped_x_conv2_b1]*3,axis=1)
x_conv1_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(layers.concatenate([cropped_x_conv2_b1,x_do_b8]))
x_conv2_b9 = Conv3D(int(dic['start_filter']/2), [1, 1, 1],padding='same', activation=dic['activation'])(x_conv1_b9)
x_bn_b9 = BatchNormalization()(x_conv2_b9)
x_do_b9 = Dropout(dic['dr_rate'])(x_bn_b9)
residual_output = Conv3DTranspose(1, [1, 1, 1], activation='linear')(x_do_b9)
last_timestep_input_residual = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(input_layer)
last_timestep_input_residual = layers.concatenate([last_timestep_input_residual]*3,axis=1)
residual_output = Add()([last_timestep_input_residual, residual_output])
# ---------- Averaging the two output
output = Average()([normal_output,residual_output])
model = Model(inputs=[input_layer], outputs=[output])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
self.model = model
class CONV_LSTM_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
x = input_layer
x = ConvLSTM2D(filters=dic['start_filter'], kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(dic['start_filter'], [3, 1, 1], activation='relu')(x)
x = ConvLSTM2D(filters=int(dic['start_filter']/2), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(int(dic['start_filter']/2), [3, 1, 1], activation='relu')(x)
x = ConvLSTM2D(filters=int(dic['start_filter']/4), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(int(dic['start_filter']/4), [3, 1, 1], activation='relu')(x)
x = Conv3D(1, [1, 1, 1], padding='same', activation='relu')(x)
model = Model(inputs=[input_layer], outputs=[x])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
#
self.model = model
class CONV_LSTM_Residual_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
x = input_layer
x = ConvLSTM2D(filters=dic['start_filter'], kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(dic['start_filter'], [3, 1, 1], activation='relu')(x)
x = ConvLSTM2D(filters=int(dic['start_filter']/2), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(int(dic['start_filter']/2), [3, 1, 1], activation='relu')(x)
x = ConvLSTM2D(filters=int(dic['start_filter']/4), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x)
x = Conv3D(int(dic['start_filter']/4), [3, 1, 1], activation='relu')(x)
x = Conv3D(1, [1, 1, 1], padding='same', activation='relu')(x)
last_ts = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(input_layer)
output = Add()([last_ts, x])
model = Model(inputs=[input_layer], outputs=[output])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
#
self.model = model
class CONV_LSTM_Both_Class:
def __init__(self, dic):
input_layer = Input(dic['input_shape'])
# Normal Model
x_normal = input_layer
x_normal = ConvLSTM2D(filters=dic['start_filter'], kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_normal)
x_normal = Conv3D(dic['start_filter'], [3, 1, 1], activation='relu')(x_normal)
x_normal = ConvLSTM2D(filters=int(dic['start_filter']/2), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_normal)
x_normal = Conv3D(int(dic['start_filter']/2), [3, 1, 1], activation='relu')(x_normal)
x_normal = ConvLSTM2D(filters=int(dic['start_filter']/4), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_normal)
x_normal = Conv3D(int(dic['start_filter']/4), [3, 1, 1], activation='relu')(x_normal)
output_normal = Conv3D(1, [1, 1, 1], padding='same', activation='relu')(x_normal)
# Residual Model
x_residual = input_layer
x_residual = ConvLSTM2D(filters=dic['start_filter'], kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_residual)
x_residual = Conv3D(dic['start_filter'], [3, 1, 1], activation='relu')(x_residual)
x_residual = ConvLSTM2D(filters=int(dic['start_filter']/2), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_residual)
x_residual = Conv3D(int(dic['start_filter']/2), [3, 1, 1], activation='relu')(x_residual)
x_residual = ConvLSTM2D(filters=int(dic['start_filter']/4), kernel_size=(dic['conv_kernel_size'], dic['conv_kernel_size']), padding="same", activation='relu', return_sequences=True)(x_residual)
x_residual = Conv3D(int(dic['start_filter']/4), [3, 1, 1], activation='relu')(x_residual)
x_residual = Conv3D(1, [1, 1, 1], padding='same', activation='relu')(x_residual)
last_ts = layers.Cropping3D(cropping=((8,0),(0,0),(0,0)),data_format="channels_last")(input_layer)
output_residual = Add()([last_ts, x_residual])
# Model
output = Average()([output_normal,output_residual])
model = Model(inputs=[input_layer], outputs=[output])
opt = eval(dic['optimizer'])
if dic['loss'] == 'huber':
loss = Huber()
else:
loss = dic['loss']
model.compile(optimizer=opt(dic['lr']), loss=loss,
metrics=['mse', 'mae'])
#
self.model = model
class LinearRegression_Class:
def load_model(path):
mdl = LinearRegression_Class()
with open(path,'rb') as f:
mdl.sk_model = pickle.load(f)
return mdl
def reshape(arr):
arr = arr.squeeze()
arr = arr.reshape(arr.shape[0],arr.shape[1]*arr.shape[2])
arr = arr.swapaxes(0,1)
return arr
def reshape_back(arr,s):
arr = arr.swapaxes(0,1)
arr = arr.reshape(s[0],s[1],s[2])
return arr
def fit(self, X_train, y_train):
self.sk_model = LinearRegression().fit(X_train, y_train)
def predict(self,X):
s = X.squeeze().shape
reshaped_X = LinearRegression_Class.reshape(X)
reshaped_Y = self.sk_model.predict(reshaped_X)
Y = LinearRegression_Class.reshape_back(reshaped_Y,(3,s[1],s[2]))
return Y
class RandomForrest_Class:
def __init__(self):
self.sk_model = RandomForestRegressor(max_depth=10, random_state=0, n_estimators=20)
def load_model(path):
mdl = RandomForrest_Class()
with open(path,'rb') as f:
mdl.sk_model = pickle.load(f)
return mdl
def reshape(arr):
arr = arr.squeeze()
arr = arr.reshape(arr.shape[0],arr.shape[1]*arr.shape[2])
arr = arr.swapaxes(0,1)
return arr
def reshape_back(arr,s):
arr = arr.swapaxes(0,1)
arr = arr.reshape(s[0],s[1],s[2])
return arr
def get_sample_indexes(arr):
indexes = np.random.choice(np.array(range(arr.shape[0])),10000000)
return indexes
def fit(self, X_train, y_train):
indexes = RandomForrest_Class.get_sample_indexes(X_train)
X = X_train[indexes]
y = y_train[indexes]
self.sk_model = self.sk_model.fit(X,y)
def predict(self,X):
s = X.squeeze().shape
reshaped_X = RandomForrest_Class.reshape(X)
reshaped_Y = self.sk_model.predict(reshaped_X)
Y = RandomForrest_Class.reshape_back(reshaped_Y,(3,s[1],s[2]))
return Y | en | 0.587907 | # ------- Head Normal Output (normal decoder) # ---------- # ------- Head Residual Output (Residual Decoder) # ---------- # ------- Head Normal Output (normal decoder) # ------- Head Residual Output (Residual Decoder) # ---------- Averaging the two output # # # Normal Model # Residual Model # Model # | 1.996636 | 2 |
test/test_ip_pools.py | Radico/python-sparkpost | 0 | 6614957 | <reponame>Radico/python-sparkpost
import pytest
import responses
from sparkpost import SparkPost
from sparkpost.exceptions import SparkPostAPIException
@responses.activate
def test_success_list_ip_pools():
responses.add(
responses.GET,
"https://api.sparkpost.com/api/v1/ip-pools",
status=200,
content_type="application/json",
body="""{
"results": [
{
"id": "marketing_ip_pool",
"name": "Marketing IP Pool",
"ips": [],
"signing_domain": "example.com",
"fbl_signing_domain": "sparkpostmail.com",
"auto_warmup_overflow_pool": "overflow_pool"
},
{
"id": "default",
"name": "Default",
"ips": [
{
"external_ip": "172.16.17.32",
"hostname": "mta472a.sparkpostmail.com",
"auto_warmup_enabled": true,
"auto_warmup_stage": 5
}
]
}
]
}"""
)
sp = SparkPost("fake-key")
results = sp.ip_pools.list()
assert len(results) == 2
assert results[0]["id"] == "marketing_ip_pool"
@responses.activate
def test_fail_list_ip_pools():
responses.add(
responses.GET,
"https://api.sparkpost.com/api/v1/ip-pools",
status=500,
content_type="application/json",
body="""
{"errors": [{"message": "You failed", "description": "More Info"}]}
"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost("fake-key")
sp.ip_pools.list()
@responses.activate
def test_success_get_ip_pool():
responses.add(
responses.GET,
"https://api.sparkpost.com/api/v1/ip-pools/marketing_ip_pool",
status=200,
content_type="application/json",
body="""{
"results": {
"id": "marketing_ip_pool",
"name": "Marketing IP Pool",
"fbl_signing_domain": "sparkpostmail.com",
"ips": [
{
"external_ip": "172.16.17.32",
"hostname": "mta472a.sparkpostmail.com",
"auto_warmup_enabled": true,
"auto_warmup_stage": 5
},
{
"external_ip": "172.16.31.10",
"hostname": "mta474a.sparkpostmail.com",
"auto_warmup_enabled": false
}
],
"signing_domain": "example.com",
"auto_warmup_overflow_pool": "overflow_pool"
}
}"""
)
sp = SparkPost("fake-key")
result = sp.ip_pools.get("marketing_ip_pool")
assert result is not None
@responses.activate
def test_not_found_get_ip_pool():
responses.add(
responses.GET,
"https://api.sparkpost.com/api/v1/ip-pools/foo.com",
status=404,
content_type="application/json",
body="""{"errors": [{"message": "Resource could not be found"}]}"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost("fake-key")
sp.ip_pools.get("foo.com")
@responses.activate
def test_success_delete_ip_pool():
responses.add(
responses.DELETE,
"https://api.sparkpost.com/api/v1/ip-pools/marketing_ip_pool",
status=204,
content_type="application/json"
)
sp = SparkPost("fake-key")
results = sp.ip_pools.delete("marketing_ip_pool")
assert results is True
@responses.activate
def test_not_found_delete_ip_pool():
responses.add(
responses.DELETE,
"https://api.sparkpost.com/api/v1/ip-pools/foo.com",
status=404,
content_type="application/json",
body="""{"errors": [{"message": "Resource could not be found"}]}"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost("fake-key")
sp.ip_pools.delete("foo.com")
@responses.activate
def test_success_update_ip_pool():
responses.add(
responses.PUT,
"https://api.sparkpost.com/api/v1/ip-pools/marketing",
status=200,
content_type="application/json",
body="""{
"name": "Updated Marketing Pool",
"fbl_signing_domain": "sparkpostmail.com",
"auto_warmup_overflow_pool": "overflow_pool"
}"""
)
sp = SparkPost("fake-key")
results = sp.ip_pools.update("marketing",
name="Updated Marketing Pool",
fbl_signing_domain="sparkpostmail.com",
auto_warmup_overflow_pool="overflow_pool")
assert results is not None
assert results["name"] == "Updated Marketing Pool"
@responses.activate
def test_success_create_ip_pool():
responses.add(
responses.POST,
"https://api.sparkpost.com/api/v1/ip-pools",
status=200,
content_type="application/json",
body="""{
"results": {
"id": "marketing_ip_pool"
}
}"""
)
sp = SparkPost("fake-key")
results = sp.ip_pools.create(name="marketing IP Pool",
fbl_signing_domain="sparkpostmail.com",
auto_warmup_overflow_pool="overflow_pool")
assert results is not None
assert results["id"] == "marketing_ip_pool"
| import pytest
import responses
from sparkpost import SparkPost
from sparkpost.exceptions import SparkPostAPIException
@responses.activate
def test_success_list_ip_pools():
responses.add(
responses.GET,
"https://api.sparkpost.com/api/v1/ip-pools",
status=200,
content_type="application/json",
body="""{
"results": [
{
"id": "marketing_ip_pool",
"name": "Marketing IP Pool",
"ips": [],
"signing_domain": "example.com",
"fbl_signing_domain": "sparkpostmail.com",
"auto_warmup_overflow_pool": "overflow_pool"
},
{
"id": "default",
"name": "Default",
"ips": [
{
"external_ip": "172.16.17.32",
"hostname": "mta472a.sparkpostmail.com",
"auto_warmup_enabled": true,
"auto_warmup_stage": 5
}
]
}
]
}"""
)
sp = SparkPost("fake-key")
results = sp.ip_pools.list()
assert len(results) == 2
assert results[0]["id"] == "marketing_ip_pool"
@responses.activate
def test_fail_list_ip_pools():
responses.add(
responses.GET,
"https://api.sparkpost.com/api/v1/ip-pools",
status=500,
content_type="application/json",
body="""
{"errors": [{"message": "You failed", "description": "More Info"}]}
"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost("fake-key")
sp.ip_pools.list()
@responses.activate
def test_success_get_ip_pool():
responses.add(
responses.GET,
"https://api.sparkpost.com/api/v1/ip-pools/marketing_ip_pool",
status=200,
content_type="application/json",
body="""{
"results": {
"id": "marketing_ip_pool",
"name": "Marketing IP Pool",
"fbl_signing_domain": "sparkpostmail.com",
"ips": [
{
"external_ip": "172.16.17.32",
"hostname": "mta472a.sparkpostmail.com",
"auto_warmup_enabled": true,
"auto_warmup_stage": 5
},
{
"external_ip": "172.16.31.10",
"hostname": "mta474a.sparkpostmail.com",
"auto_warmup_enabled": false
}
],
"signing_domain": "example.com",
"auto_warmup_overflow_pool": "overflow_pool"
}
}"""
)
sp = SparkPost("fake-key")
result = sp.ip_pools.get("marketing_ip_pool")
assert result is not None
@responses.activate
def test_not_found_get_ip_pool():
responses.add(
responses.GET,
"https://api.sparkpost.com/api/v1/ip-pools/foo.com",
status=404,
content_type="application/json",
body="""{"errors": [{"message": "Resource could not be found"}]}"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost("fake-key")
sp.ip_pools.get("foo.com")
@responses.activate
def test_success_delete_ip_pool():
responses.add(
responses.DELETE,
"https://api.sparkpost.com/api/v1/ip-pools/marketing_ip_pool",
status=204,
content_type="application/json"
)
sp = SparkPost("fake-key")
results = sp.ip_pools.delete("marketing_ip_pool")
assert results is True
@responses.activate
def test_not_found_delete_ip_pool():
responses.add(
responses.DELETE,
"https://api.sparkpost.com/api/v1/ip-pools/foo.com",
status=404,
content_type="application/json",
body="""{"errors": [{"message": "Resource could not be found"}]}"""
)
with pytest.raises(SparkPostAPIException):
sp = SparkPost("fake-key")
sp.ip_pools.delete("foo.com")
@responses.activate
def test_success_update_ip_pool():
responses.add(
responses.PUT,
"https://api.sparkpost.com/api/v1/ip-pools/marketing",
status=200,
content_type="application/json",
body="""{
"name": "Updated Marketing Pool",
"fbl_signing_domain": "sparkpostmail.com",
"auto_warmup_overflow_pool": "overflow_pool"
}"""
)
sp = SparkPost("fake-key")
results = sp.ip_pools.update("marketing",
name="Updated Marketing Pool",
fbl_signing_domain="sparkpostmail.com",
auto_warmup_overflow_pool="overflow_pool")
assert results is not None
assert results["name"] == "Updated Marketing Pool"
@responses.activate
def test_success_create_ip_pool():
responses.add(
responses.POST,
"https://api.sparkpost.com/api/v1/ip-pools",
status=200,
content_type="application/json",
body="""{
"results": {
"id": "marketing_ip_pool"
}
}"""
)
sp = SparkPost("fake-key")
results = sp.ip_pools.create(name="marketing IP Pool",
fbl_signing_domain="sparkpostmail.com",
auto_warmup_overflow_pool="overflow_pool")
assert results is not None
assert results["id"] == "marketing_ip_pool" | en | 0.204681 | { "results": [ { "id": "marketing_ip_pool", "name": "Marketing IP Pool", "ips": [], "signing_domain": "example.com", "fbl_signing_domain": "sparkpostmail.com", "auto_warmup_overflow_pool": "overflow_pool" }, { "id": "default", "name": "Default", "ips": [ { "external_ip": "172.16.17.32", "hostname": "mta472a.sparkpostmail.com", "auto_warmup_enabled": true, "auto_warmup_stage": 5 } ] } ] } {"errors": [{"message": "You failed", "description": "More Info"}]} { "results": { "id": "marketing_ip_pool", "name": "Marketing IP Pool", "fbl_signing_domain": "sparkpostmail.com", "ips": [ { "external_ip": "172.16.17.32", "hostname": "mta472a.sparkpostmail.com", "auto_warmup_enabled": true, "auto_warmup_stage": 5 }, { "external_ip": "172.16.31.10", "hostname": "mta474a.sparkpostmail.com", "auto_warmup_enabled": false } ], "signing_domain": "example.com", "auto_warmup_overflow_pool": "overflow_pool" } } {"errors": [{"message": "Resource could not be found"}]} {"errors": [{"message": "Resource could not be found"}]} { "name": "Updated Marketing Pool", "fbl_signing_domain": "sparkpostmail.com", "auto_warmup_overflow_pool": "overflow_pool" } { "results": { "id": "marketing_ip_pool" } } | 2.226792 | 2 |
services/lst_dates_service.py | fudo-myo/LST_BBDD | 0 | 6614958 | <reponame>fudo-myo/LST_BBDD<filename>services/lst_dates_service.py
from typing import List
from sqlalchemy.exc import InvalidRequestError, OperationalError
from sqlalchemy.orm import Session
from DTO.dates_dto import DatesDto, create_date
from config.base import getSession
from utils.checkers import Checkers
try:
from entities.lst_dates import LstDates
except ImportError as error:
Checkers.print_exception_one_param(error)
class LstDatesService:
def __init__(self):
self.__session: Session = getSession()
self.__all_dates = None
self.__date_by_id = None
def insert_dates(self, dates_insert: DatesDto):
try:
dates_aux = LstDates(date=dates_insert.date)
self.__session.add(dates_aux)
self.__session.commit()
if dates_aux.id_date is not None:
print("RECORD INSERTED IN TABLE '{}' WITH ID '{}'".format(LstDates.__tablename__.name,
dates_aux.id_date))
else:
print(" THE RECORD OF TABLE '{}' HAS NOT BEEN INSERTED".format(LstDates.__tablename__.name))
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
def update_dates(self, date_to_serach, id_date, date_to_update=None):
try:
date_before: DatesDto = self.get_date_by_id(id_date)
if Checkers.validate_int(id_date, LstDates.id_date.name) and Checkers.validate_datetime(
date_to_serach, LstDates.date) and date_before.id_date is not None and date_before.date is not None:
self.__session.query(LstDates).filter(LstDates.date.like(date_to_serach),
LstDates.id_date.like(id_date)) \
.update({
LstDates.date: Checkers.check_field_not_null(LstDates.date, date_to_update)},
synchronize_session=False
)
self.__session.commit()
date_after: DatesDto = self.get_date_by_id(id_date)
if date_before.__dict__ != date_after.__dict__:
print("RECORD UPDATE IN TABLE '{}' WITH ID '{}'".format(LstDates.__tablename__.name,
id_date))
else:
print(" THE RECORD OF TABLE '{}' HAS NOT BEEN UPDATED".format(LstDates.__tablename__.name))
else:
print(" THE RECORD OF TABLE '{}' COULD NOT BE UPDATED ".format(LstDates.__tablename__.name))
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
def delete_date(self, date_to_delete, id_date):
try:
date_before: DatesDto = self.get_date_by_id(id_date)
if Checkers.validate_int(id_date, LstDates.id_date.name) and Checkers.validate_datetime(
date_to_delete, LstDates.date) and date_before.id_date is not None and date_before.date is not None:
self.__session.query(LstDates).filter(LstDates.date.like(date_to_delete),
LstDates.id_date.like(id_date)) \
.delete(synchronize_session=False)
self.__session.commit()
date_after: DatesDto = self.get_date_by_id(id_date)
if date_before.id_date is not None and date_after.id_date is None:
print("RECORD DELETE IN TABLE '{}' WITH ID '{}'".format(LstDates.__tablename__.name,
id_date))
else:
print(" THE RECORD OF TABLE '{}' WITH ID '{}' HAS NOT BEEN DELETED BECAUSE IT DID NOT EXIST".format(
LstDates.__tablename__.name,
id_date))
else:
print(" THE RECORD OF TABLE '{}' COULD NOT BE DELETED".format(LstDates.__tablename__.name))
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
def get_all_dates(self):
dates_dto_list = []
try:
self.__all_dates: List[DatesDto] = self.__session.query(LstDates).all()
if len(self.__all_dates) != 0:
for row in self.__all_dates:
date_aux = create_date(
row.id_date,
row.date
)
dates_dto_list.append(date_aux)
else:
Checkers.empty_list(LstDates.__tablename__.name)
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
return dates_dto_list
def get_date_by_id(self, id_date):
try:
self.__date_by_id: DatesDto = self.__session.query(LstDates).filter(LstDates.id_date.like(id_date)).first()
if self.__date_by_id is not None:
return create_date(
self.__date_by_id.id_date,
self.__date_by_id.date,
)
else:
Checkers.print_object_filter_null(LstDates.id_date, str(id_date))
return create_date(None, None)
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
return create_date(None, None)
| from typing import List
from sqlalchemy.exc import InvalidRequestError, OperationalError
from sqlalchemy.orm import Session
from DTO.dates_dto import DatesDto, create_date
from config.base import getSession
from utils.checkers import Checkers
try:
from entities.lst_dates import LstDates
except ImportError as error:
Checkers.print_exception_one_param(error)
class LstDatesService:
def __init__(self):
self.__session: Session = getSession()
self.__all_dates = None
self.__date_by_id = None
def insert_dates(self, dates_insert: DatesDto):
try:
dates_aux = LstDates(date=dates_insert.date)
self.__session.add(dates_aux)
self.__session.commit()
if dates_aux.id_date is not None:
print("RECORD INSERTED IN TABLE '{}' WITH ID '{}'".format(LstDates.__tablename__.name,
dates_aux.id_date))
else:
print(" THE RECORD OF TABLE '{}' HAS NOT BEEN INSERTED".format(LstDates.__tablename__.name))
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
def update_dates(self, date_to_serach, id_date, date_to_update=None):
try:
date_before: DatesDto = self.get_date_by_id(id_date)
if Checkers.validate_int(id_date, LstDates.id_date.name) and Checkers.validate_datetime(
date_to_serach, LstDates.date) and date_before.id_date is not None and date_before.date is not None:
self.__session.query(LstDates).filter(LstDates.date.like(date_to_serach),
LstDates.id_date.like(id_date)) \
.update({
LstDates.date: Checkers.check_field_not_null(LstDates.date, date_to_update)},
synchronize_session=False
)
self.__session.commit()
date_after: DatesDto = self.get_date_by_id(id_date)
if date_before.__dict__ != date_after.__dict__:
print("RECORD UPDATE IN TABLE '{}' WITH ID '{}'".format(LstDates.__tablename__.name,
id_date))
else:
print(" THE RECORD OF TABLE '{}' HAS NOT BEEN UPDATED".format(LstDates.__tablename__.name))
else:
print(" THE RECORD OF TABLE '{}' COULD NOT BE UPDATED ".format(LstDates.__tablename__.name))
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
def delete_date(self, date_to_delete, id_date):
try:
date_before: DatesDto = self.get_date_by_id(id_date)
if Checkers.validate_int(id_date, LstDates.id_date.name) and Checkers.validate_datetime(
date_to_delete, LstDates.date) and date_before.id_date is not None and date_before.date is not None:
self.__session.query(LstDates).filter(LstDates.date.like(date_to_delete),
LstDates.id_date.like(id_date)) \
.delete(synchronize_session=False)
self.__session.commit()
date_after: DatesDto = self.get_date_by_id(id_date)
if date_before.id_date is not None and date_after.id_date is None:
print("RECORD DELETE IN TABLE '{}' WITH ID '{}'".format(LstDates.__tablename__.name,
id_date))
else:
print(" THE RECORD OF TABLE '{}' WITH ID '{}' HAS NOT BEEN DELETED BECAUSE IT DID NOT EXIST".format(
LstDates.__tablename__.name,
id_date))
else:
print(" THE RECORD OF TABLE '{}' COULD NOT BE DELETED".format(LstDates.__tablename__.name))
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
def get_all_dates(self):
dates_dto_list = []
try:
self.__all_dates: List[DatesDto] = self.__session.query(LstDates).all()
if len(self.__all_dates) != 0:
for row in self.__all_dates:
date_aux = create_date(
row.id_date,
row.date
)
dates_dto_list.append(date_aux)
else:
Checkers.empty_list(LstDates.__tablename__.name)
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
return dates_dto_list
def get_date_by_id(self, id_date):
try:
self.__date_by_id: DatesDto = self.__session.query(LstDates).filter(LstDates.id_date.like(id_date)).first()
if self.__date_by_id is not None:
return create_date(
self.__date_by_id.id_date,
self.__date_by_id.date,
)
else:
Checkers.print_object_filter_null(LstDates.id_date, str(id_date))
return create_date(None, None)
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
return create_date(None, None) | none | 1 | 2.450868 | 2 | |
Tarea11.py | crltsnch/calentamiento | 0 | 6614959 | import random
import seaborn as sns
import matplotlib.pyplot as plt
iterations = 10000
variables = ["work", "salary","conditions", "career_advancement", "social_impact", "job_security", "life_balance", "locations", "atmosphere", "travel" ]
def Monte_Carlo(grande):
final_results = []
weight = [0.15, 0.09, 0.12, 0.11, 0.12, 0.02, 0.11, 0.07, 0.1, 0.11]
for n in range(iterations):
results = []
for i in range(len(variables)):
value = weight[i] * (random.uniform(grade[i][0], grade[i][1]))
results.append(value)
final_results.append(sum(results))
return(final_results)
a = Monte_Carlo([[4,9], [8.5, 10], [5, 9], [8.5, 9.5], [3, 7], [4, 9], [3, 8], [7.5, 8], [5, 9], [0, 6]])
b = Monte_Carlo([[5, 10], [4, 4], [7, 9], [2, 8], [6, 9.5], [8.5, 10], [8, 10], [0, 7], [3, 9], [0, 3]])
c = Monte_Carlo([[4, 7], [6, 8], [6, 9], [6.5, 9], [2, 6], [6.5, 9], [5.5, 9], [9.5, 9.5], [5, 9], [4, 9]])
fig = plt.figure(figsize=(10, 6))
sns.distplot(a)
sns.distplot(b)
sns.distplot(c)
fig.legend(labels=["Job A", "Job B", "Job C"])
plt.title("Monte_Carlo Distributions")
plt.show() | import random
import seaborn as sns
import matplotlib.pyplot as plt
iterations = 10000
variables = ["work", "salary","conditions", "career_advancement", "social_impact", "job_security", "life_balance", "locations", "atmosphere", "travel" ]
def Monte_Carlo(grande):
final_results = []
weight = [0.15, 0.09, 0.12, 0.11, 0.12, 0.02, 0.11, 0.07, 0.1, 0.11]
for n in range(iterations):
results = []
for i in range(len(variables)):
value = weight[i] * (random.uniform(grade[i][0], grade[i][1]))
results.append(value)
final_results.append(sum(results))
return(final_results)
a = Monte_Carlo([[4,9], [8.5, 10], [5, 9], [8.5, 9.5], [3, 7], [4, 9], [3, 8], [7.5, 8], [5, 9], [0, 6]])
b = Monte_Carlo([[5, 10], [4, 4], [7, 9], [2, 8], [6, 9.5], [8.5, 10], [8, 10], [0, 7], [3, 9], [0, 3]])
c = Monte_Carlo([[4, 7], [6, 8], [6, 9], [6.5, 9], [2, 6], [6.5, 9], [5.5, 9], [9.5, 9.5], [5, 9], [4, 9]])
fig = plt.figure(figsize=(10, 6))
sns.distplot(a)
sns.distplot(b)
sns.distplot(c)
fig.legend(labels=["Job A", "Job B", "Job C"])
plt.title("Monte_Carlo Distributions")
plt.show() | none | 1 | 3.297634 | 3 | |
elevenclock/lang/LOCALE_UPDATER.py | ShintakuNobuhiro/ElevenClock | 0 | 6614960 | <filename>elevenclock/lang/LOCALE_UPDATER.py
import glob, os
OLDSTR = b"lang2_7_bis = {"
NEWSTR = b"""lang_2_8 = {
"Force the clock to be at the top of the screen": "",
"Show the clock on the primary screen": "",
"Use a custom font color": "",
"Use a custom background color": "",
"Align the clock text to the center": "",
"Select custom color": "",
"Hide the clock when a program occupies all screens": "",
}
lang2_7_bis = lang_2_8 | {"""
input(f"Path is \"{os.getcwd()}\" Press [INTRO] to contniue")
print()
print()
print("Lang files to update: ")
for file in glob.glob("lang_*.py"):
print(" -", file)
print()
input("Press [INTRO] to contniue")
print()
print()
print("old string:", OLDSTR)
print("new string:", NEWSTR)
print()
input("Press [INTRO] to contniue")
for file in glob.glob("lang_*.py"):
print("Processing", file, "...")
try:
with open(file, "rb") as f:
contents = f.read()
with open(file, "wb") as f:
f.write(contents.replace(OLDSTR, NEWSTR))
print(file, "has been updated successfully")
except:
print("🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥", file, "has been updated successfully")
input("Finished, press [INTRO] to close")
| <filename>elevenclock/lang/LOCALE_UPDATER.py
import glob, os
OLDSTR = b"lang2_7_bis = {"
NEWSTR = b"""lang_2_8 = {
"Force the clock to be at the top of the screen": "",
"Show the clock on the primary screen": "",
"Use a custom font color": "",
"Use a custom background color": "",
"Align the clock text to the center": "",
"Select custom color": "",
"Hide the clock when a program occupies all screens": "",
}
lang2_7_bis = lang_2_8 | {"""
input(f"Path is \"{os.getcwd()}\" Press [INTRO] to contniue")
print()
print()
print("Lang files to update: ")
for file in glob.glob("lang_*.py"):
print(" -", file)
print()
input("Press [INTRO] to contniue")
print()
print()
print("old string:", OLDSTR)
print("new string:", NEWSTR)
print()
input("Press [INTRO] to contniue")
for file in glob.glob("lang_*.py"):
print("Processing", file, "...")
try:
with open(file, "rb") as f:
contents = f.read()
with open(file, "wb") as f:
f.write(contents.replace(OLDSTR, NEWSTR))
print(file, "has been updated successfully")
except:
print("🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥🟥", file, "has been updated successfully")
input("Finished, press [INTRO] to close")
| en | 0.559212 | lang_2_8 = { "Force the clock to be at the top of the screen": "", "Show the clock on the primary screen": "", "Use a custom font color": "", "Use a custom background color": "", "Align the clock text to the center": "", "Select custom color": "", "Hide the clock when a program occupies all screens": "", } lang2_7_bis = lang_2_8 | { | 2.777575 | 3 |
taxinnovation/apps/users/migrations/0010_usertemporalmedia_validation_video.py | rootUserM/Docekerfiles-examples | 0 | 6614961 | # Generated by Django 3.0.8 on 2020-08-21 16:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0009_usertemporalmedia'),
]
operations = [
migrations.AddField(
model_name='usertemporalmedia',
name='validation_video',
field=models.FileField(blank=True, max_length=300, null=True, upload_to='users/documents/validation_video/%Y/%m/%d/', verbose_name='Video de validacion'),
),
]
| # Generated by Django 3.0.8 on 2020-08-21 16:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0009_usertemporalmedia'),
]
operations = [
migrations.AddField(
model_name='usertemporalmedia',
name='validation_video',
field=models.FileField(blank=True, max_length=300, null=True, upload_to='users/documents/validation_video/%Y/%m/%d/', verbose_name='Video de validacion'),
),
]
| en | 0.808635 | # Generated by Django 3.0.8 on 2020-08-21 16:00 | 1.527946 | 2 |
rationale_net/models/encoder.py | ravenscroftj/text_nn | 62 | 6614962 | import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.nn.functional as F
import rationale_net.models.cnn as cnn
import pdb
class Encoder(nn.Module):
def __init__(self, embeddings, args):
super(Encoder, self).__init__()
### Encoder
self.args = args
vocab_size, hidden_dim = embeddings.shape
self.embedding_dim = hidden_dim
self.embedding_layer = nn.Embedding( vocab_size, hidden_dim)
self.embedding_layer.weight.data = torch.from_numpy( embeddings )
self.embedding_layer.weight.requires_grad = True
self.embedding_fc = nn.Linear( hidden_dim, hidden_dim )
self.embedding_bn = nn.BatchNorm1d( hidden_dim)
if args.model_form == 'cnn':
self.cnn = cnn.CNN(args, max_pool_over_time=(not args.use_as_tagger))
self.fc = nn.Linear( len(args.filters)*args.filter_num, args.hidden_dim)
else:
raise NotImplementedError("Model form {} not yet supported for encoder!".format(args.model_form))
self.dropout = nn.Dropout(args.dropout)
self.hidden = nn.Linear(args.hidden_dim, args.num_class)
def forward(self, x_indx, mask=None):
'''
x_indx: batch of word indices
mask: Mask to apply over embeddings for tao ratioanles
'''
x = self.embedding_layer(x_indx.squeeze(1))
if self.args.cuda:
x = x.cuda()
if not mask is None:
x = x * mask.unsqueeze(-1)
x = F.relu( self.embedding_fc(x))
x = self.dropout(x)
if self.args.model_form == 'cnn':
x = torch.transpose(x, 1, 2) # Switch X to (Batch, Embed, Length)
hidden = self.cnn(x)
hidden = F.relu( self.fc(hidden) )
else:
raise Exception("Model form {} not yet supported for encoder!".format(args.model_form))
hidden = self.dropout(hidden)
logit = self.hidden(hidden)
return logit, hidden
| import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.nn.functional as F
import rationale_net.models.cnn as cnn
import pdb
class Encoder(nn.Module):
def __init__(self, embeddings, args):
super(Encoder, self).__init__()
### Encoder
self.args = args
vocab_size, hidden_dim = embeddings.shape
self.embedding_dim = hidden_dim
self.embedding_layer = nn.Embedding( vocab_size, hidden_dim)
self.embedding_layer.weight.data = torch.from_numpy( embeddings )
self.embedding_layer.weight.requires_grad = True
self.embedding_fc = nn.Linear( hidden_dim, hidden_dim )
self.embedding_bn = nn.BatchNorm1d( hidden_dim)
if args.model_form == 'cnn':
self.cnn = cnn.CNN(args, max_pool_over_time=(not args.use_as_tagger))
self.fc = nn.Linear( len(args.filters)*args.filter_num, args.hidden_dim)
else:
raise NotImplementedError("Model form {} not yet supported for encoder!".format(args.model_form))
self.dropout = nn.Dropout(args.dropout)
self.hidden = nn.Linear(args.hidden_dim, args.num_class)
def forward(self, x_indx, mask=None):
'''
x_indx: batch of word indices
mask: Mask to apply over embeddings for tao ratioanles
'''
x = self.embedding_layer(x_indx.squeeze(1))
if self.args.cuda:
x = x.cuda()
if not mask is None:
x = x * mask.unsqueeze(-1)
x = F.relu( self.embedding_fc(x))
x = self.dropout(x)
if self.args.model_form == 'cnn':
x = torch.transpose(x, 1, 2) # Switch X to (Batch, Embed, Length)
hidden = self.cnn(x)
hidden = F.relu( self.fc(hidden) )
else:
raise Exception("Model form {} not yet supported for encoder!".format(args.model_form))
hidden = self.dropout(hidden)
logit = self.hidden(hidden)
return logit, hidden
| en | 0.72174 | ### Encoder x_indx: batch of word indices mask: Mask to apply over embeddings for tao ratioanles # Switch X to (Batch, Embed, Length) | 2.639603 | 3 |
fields/meta_field.py | Forevka/PyOrm | 0 | 6614963 | from pypika import Field
import typing
from loguru import logger
T = typing.TypeVar('T')
class MetaField(Field):
name: str
value: typing.Generic[T]
_required: bool = True
primary_key: bool
def __init__(self, *args, required: bool = True, pk: bool = False):
self._required = required
self.primary_key = pk
self.value = None
super(Field, self).__init__(self, *args)
def _setup(self, varname: str):
self.name = varname
def _marshall(self, value: str):
logger.info(value)
self.value = value
return self
def __str__(self,):
return self.value
| from pypika import Field
import typing
from loguru import logger
T = typing.TypeVar('T')
class MetaField(Field):
name: str
value: typing.Generic[T]
_required: bool = True
primary_key: bool
def __init__(self, *args, required: bool = True, pk: bool = False):
self._required = required
self.primary_key = pk
self.value = None
super(Field, self).__init__(self, *args)
def _setup(self, varname: str):
self.name = varname
def _marshall(self, value: str):
logger.info(value)
self.value = value
return self
def __str__(self,):
return self.value
| none | 1 | 2.469093 | 2 | |
assembler/__main__.py | paulscottrobson/eris | 13 | 6614964 | <reponame>paulscottrobson/eris
# *****************************************************************************
# *****************************************************************************
#
# Name: __main__.py
# Purpose: Macro Assembler main program
# Created: 8th March 2020
# Author: <NAME> (<EMAIL>)
#
# *****************************************************************************
# *****************************************************************************
import re,sys,os
from assembler import *
if __name__ == "__main__":
srcFiles = [x.strip() for x in open("asm.project").readlines() if x.strip() != "" and not x.startswith(";")]
asm = Assembler()
print("EAS : Eris Assembler (21-03-2020)")
for passNumber in [1,2]:
h = None if passNumber == 1 else open("bin"+os.sep+"listing.eas","w")
asm.startPass(passNumber,h)
for f in srcFiles:
src = open(f).readlines()
try:
asm.assemble(f,src)
except Exception as e:
err = "\t{0} ({1}:{2})".format(str(e),AssemblerException.FILE,AssemblerException.LINE)
print(err)
sys.exit(1)
if h is not None:
h.close()
asm.complete()
print("\tAssembled {0} words.".format(asm.getSize()))
sys.exit(0)
| # *****************************************************************************
# *****************************************************************************
#
# Name: __main__.py
# Purpose: Macro Assembler main program
# Created: 8th March 2020
# Author: <NAME> (<EMAIL>)
#
# *****************************************************************************
# *****************************************************************************
import re,sys,os
from assembler import *
if __name__ == "__main__":
srcFiles = [x.strip() for x in open("asm.project").readlines() if x.strip() != "" and not x.startswith(";")]
asm = Assembler()
print("EAS : Eris Assembler (21-03-2020)")
for passNumber in [1,2]:
h = None if passNumber == 1 else open("bin"+os.sep+"listing.eas","w")
asm.startPass(passNumber,h)
for f in srcFiles:
src = open(f).readlines()
try:
asm.assemble(f,src)
except Exception as e:
err = "\t{0} ({1}:{2})".format(str(e),AssemblerException.FILE,AssemblerException.LINE)
print(err)
sys.exit(1)
if h is not None:
h.close()
asm.complete()
print("\tAssembled {0} words.".format(asm.getSize()))
sys.exit(0) | en | 0.323976 | # ***************************************************************************** # ***************************************************************************** # # Name: __main__.py # Purpose: Macro Assembler main program # Created: 8th March 2020 # Author: <NAME> (<EMAIL>) # # ***************************************************************************** # ***************************************************************************** | 2.280182 | 2 |
src/elementary_flask/cron/cron_entry.py | xaled/flaskup | 0 | 6614965 | <reponame>xaled/flaskup
from croniter import croniter
from elementary_flask.typing import Callable
class CronEntry:
def __init__(self, name: str, expr_format: str, /, *, task: Callable, hash_id=None, args=None, kwargs=None):
hash_id = hash_id or name
self.name = name
self.croniter = croniter(expr_format, hash_id=hash_id, ret_type=float)
self.task = task
self.args = args or tuple()
self.kwargs = kwargs or dict()
def __call__(self, cron_context=None):
return self.fire(cron_context=cron_context)
def fire(self, cron_context=None):
# return self.task(cron_context=cron_context, *self.args, **self.kwargs)
return self.task(*self.args, **self.kwargs)
def get_next(self, start_time=None):
return self.croniter.get_next(start_time=start_time)
| from croniter import croniter
from elementary_flask.typing import Callable
class CronEntry:
def __init__(self, name: str, expr_format: str, /, *, task: Callable, hash_id=None, args=None, kwargs=None):
hash_id = hash_id or name
self.name = name
self.croniter = croniter(expr_format, hash_id=hash_id, ret_type=float)
self.task = task
self.args = args or tuple()
self.kwargs = kwargs or dict()
def __call__(self, cron_context=None):
return self.fire(cron_context=cron_context)
def fire(self, cron_context=None):
# return self.task(cron_context=cron_context, *self.args, **self.kwargs)
return self.task(*self.args, **self.kwargs)
def get_next(self, start_time=None):
return self.croniter.get_next(start_time=start_time) | ru | 0.067216 | # return self.task(cron_context=cron_context, *self.args, **self.kwargs) | 2.606663 | 3 |
herder/tests/functional/test_rss.py | cc-archive/herder | 0 | 6614966 | # -*- coding: utf-8 -*-
import herder.tests
from pylons import config
import os.path
from herder.tests import *
from herder.tests.functional.test_language import TestLanguageController
import datetime
import feedparser
class TestFeed(TestController):
# Note: Architecture of RSS feeds is currently very lame,
# for now printing just that the RSS feed noticed an event,
# since without versioning there's no way to be sure of what
# actually changed.
# Later we should pass around version control system IDs in
# the event, or the whole event's data and metadata.
def test_make_event_and_watch_rss_feed_update_properly(self):
# check the date and calc. the filename
with_nanosec = datetime.datetime.now()
now = datetime.datetime(*with_nanosec.timetuple()[:7])
filename = os.path.join(
config['herder.feed_dir'], 'cc_org', 'en_US', 'index.xml')
# clear it if it's there
if os.path.exists(filename):
os.unlink(filename)
# edit and un-edit a string
tlc = TestLanguageController()
tlc.test_edit_string_as_bureau()
# Now we parse the feed to show it got updated twice
parsed = feedparser.parse(open(filename))
assert len(parsed.entries) == 2
relevant = [ entry for entry in parsed.entries if
datetime.datetime(*entry.updated_parsed[:7]) >=
now ]
assert len(relevant) == 2
for entry in relevant:
assert 'Untied States' in entry.summary
| # -*- coding: utf-8 -*-
import herder.tests
from pylons import config
import os.path
from herder.tests import *
from herder.tests.functional.test_language import TestLanguageController
import datetime
import feedparser
class TestFeed(TestController):
# Note: Architecture of RSS feeds is currently very lame,
# for now printing just that the RSS feed noticed an event,
# since without versioning there's no way to be sure of what
# actually changed.
# Later we should pass around version control system IDs in
# the event, or the whole event's data and metadata.
def test_make_event_and_watch_rss_feed_update_properly(self):
# check the date and calc. the filename
with_nanosec = datetime.datetime.now()
now = datetime.datetime(*with_nanosec.timetuple()[:7])
filename = os.path.join(
config['herder.feed_dir'], 'cc_org', 'en_US', 'index.xml')
# clear it if it's there
if os.path.exists(filename):
os.unlink(filename)
# edit and un-edit a string
tlc = TestLanguageController()
tlc.test_edit_string_as_bureau()
# Now we parse the feed to show it got updated twice
parsed = feedparser.parse(open(filename))
assert len(parsed.entries) == 2
relevant = [ entry for entry in parsed.entries if
datetime.datetime(*entry.updated_parsed[:7]) >=
now ]
assert len(relevant) == 2
for entry in relevant:
assert 'Untied States' in entry.summary
| en | 0.912703 | # -*- coding: utf-8 -*- # Note: Architecture of RSS feeds is currently very lame, # for now printing just that the RSS feed noticed an event, # since without versioning there's no way to be sure of what # actually changed. # Later we should pass around version control system IDs in # the event, or the whole event's data and metadata. # check the date and calc. the filename # clear it if it's there # edit and un-edit a string # Now we parse the feed to show it got updated twice | 2.223933 | 2 |
turtlePlayer.py | Ashnwor/Python-beginner-projects | 0 | 6614967 | <filename>turtlePlayer.py
# @Author: ashnwor
# @Date: 03-Nov-2018
# @Email: <EMAIL>
# @Last modified by: ashnwor
# @Last modified time: 03-Nov-2018
import turtle
window = turtle.Screen()
window.bgcolor("black")
player = turtle.Turtle()
player.speed(0)
def turnLeft():
player.left(15)
def turnRight():
player.right(15)
def goForward():
player.forward(10)
def drawSonDraw():
turtle.pendown()
def dontDraw():
turtle.penup()
def main():
player.color("white")
player.shape("triangle")
player.penup()
turtle.listen()
turtle.onkeypress(turnLeft, "a")
turtle.onkeypress(turnRight, "d")
turtle.onkeypress(goForward, "w")
turtle.onkey(drawSonDraw, "space")
turtle.onkeyrelease(dontDraw, "space")
input()
main()
| <filename>turtlePlayer.py
# @Author: ashnwor
# @Date: 03-Nov-2018
# @Email: <EMAIL>
# @Last modified by: ashnwor
# @Last modified time: 03-Nov-2018
import turtle
window = turtle.Screen()
window.bgcolor("black")
player = turtle.Turtle()
player.speed(0)
def turnLeft():
player.left(15)
def turnRight():
player.right(15)
def goForward():
player.forward(10)
def drawSonDraw():
turtle.pendown()
def dontDraw():
turtle.penup()
def main():
player.color("white")
player.shape("triangle")
player.penup()
turtle.listen()
turtle.onkeypress(turnLeft, "a")
turtle.onkeypress(turnRight, "d")
turtle.onkeypress(goForward, "w")
turtle.onkey(drawSonDraw, "space")
turtle.onkeyrelease(dontDraw, "space")
input()
main()
| en | 0.460942 | # @Author: ashnwor # @Date: 03-Nov-2018 # @Email: <EMAIL> # @Last modified by: ashnwor # @Last modified time: 03-Nov-2018 | 3.594949 | 4 |
day3/nestedset.py | nikhilsamninan/python-files | 0 | 6614968 | #L=[[1,2,3],['a','b','c']]
#print(L[1][1])
k={'Ligin':{'roll no':24}}
print(k['Ligin']['roll no']) | #L=[[1,2,3],['a','b','c']]
#print(L[1][1])
k={'Ligin':{'roll no':24}}
print(k['Ligin']['roll no']) | en | 0.158028 | #L=[[1,2,3],['a','b','c']] #print(L[1][1]) | 3.070864 | 3 |
handlers/House.py | qq453388937/Tornado_home_Git | 0 | 6614969 | <reponame>qq453388937/Tornado_home_Git
# -*- coding:utf-8 -*-
from .BaseHandler import BaseHandler
from utils.response_code import RET
import logging
import json
import constants
import math
from utils.commons import required_login
class AreaInfoHandler(BaseHandler):
"""获取区域信息"""
def get(self):
# 先查redis
try:
res = self.redis.get("area_info")
# res = json.loads(res)
except Exception as e:
res = None
logging.error(e)
if res:
# return self.write(dict(errcode=RET.OK, errmsg="ok!!", data=res))
return self.write("{'errcode':%s,errmsg:%s,data:%s}" % (RET.OK, "ok!!!", res)) # 少一次序列化操作
print(res)
logging.debug(res)
# 继续执行查询数据库
try:
ret = self.db.query("select ai_area_id,ai_name from ih_area_info")
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="查询出错了!!"))
if not ret:
return self.write(dict(errcode=RET.NODATA, errmsg="no data"))
areas = []
for x in ret:
model = {
"area_id": x["ai_area_id"],
"name": x["ai_name"],
}
areas.append(model)
# 存储到redis
try:
self.redis.setex("area_info", constants.REDIS_AREA_INFO_EXPIRES_SECONDES, json.dumps(areas))
except Exception as e:
logging.error(e)
self.write(dict(errcode=RET.OK, errmsg="ok!!", data=areas))
class MyHousehandler(BaseHandler):
@required_login
def get(self):
"""user_id从session获取而不是前端穿过来更加安全"""
# 这里能使用self.session 的原因是 @required_login装饰器调用了get_current_user()方法
# 而且get_current_user方法里面动态的添加了属性给BaseHandler()
user_id = self.session.data["user_id"]
try:
sql = "select a.hi_house_id,a.hi_title,a.hi_price,a.hi_ctime,b.ai_name,a.hi_index_image_url " \
"from ih_house_info a inner join ih_area_info b on a.hi_area_id=b.ai_area_id where a.hi_user_id=%s;"
ret = self.db.query(sql, user_id)
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="查询出错了!!"))
houses = []
if ret:
for line in ret:
house = {
"house_id": line["hi_house_id"],
"title": line["hi_title"],
"price": line["hi_price"],
"ctime": line["hi_ctime"].strftime("%Y-%m-%d"), # 时间转为字符串
"area_name": line["ai_name"],
"img_url": constants.QINIU_URL_PREFIX + line["hi_index_image_url"] if line[
"hi_index_image_url"] else ""
}
houses.append(house)
house_test = {
"house_id": "测试",
"title": "测试title",
"price": 998,
"ctime": '2018-03-21', # 时间转为字符串
"area_name": '北京',
"img_url": 'http://p5ufc44c8.bkt.clouddn.com/FlsI6fRX-RJ_FFF39hgGT0zb_zlp'
}
houses.append(house_test)
self.write(dict(errcode=RET.OK, errmsg="ok", houses=houses))
class HouseInfoHandler(BaseHandler):
def get(self):
"""拉取房源信息"""
self.write("ok")
@required_login
def post(self):
"""新增房源信息
方便测试
注释登陆装饰器和xsrf校验
"""
user_id = self.session.data["user_id"]
# 测试
# user_id = 11
title = self.json_args.get("title")
price = self.json_args.get("price")
area_id = self.json_args.get("area_id")
address = self.json_args.get("address")
room_count = self.json_args.get("room_count")
acreage = self.json_args.get("acreage")
unit = self.json_args.get("unit")
capacity = self.json_args.get("capacity")
beds = self.json_args.get("beds")
deposit = self.json_args.get("deposit")
min_days = self.json_args.get("min_days")
max_days = self.json_args.get("max_days")
facility = self.json_args.get("facility") # 对一个房屋的设施,是列表类型
# 校验
if not all((title, price, area_id, address, room_count, acreage, unit, capacity, beds, deposit, min_days,
max_days)):
return self.write(dict(errcode=RET.PARAMERR, errmsg="缺少参数"))
try:
price = int(price) * 100
deposit = int(deposit) * 100
except Exception as e:
return self.write(dict(errcode=RET.PARAMERR, errmsg="参数错误"))
# 存储ih_house_info 基本表
try:
sql = "insert into ih_house_info(hi_user_id,hi_title,hi_price,hi_area_id,hi_address,hi_room_count," \
"hi_acreage,hi_house_unit,hi_capacity,hi_beds,hi_deposit,hi_min_days,hi_max_days)" \
"values(%(user_id)s,%(title)s,%(price)s,%(area_id)s,%(address)s,%(room_count)s,%(acreage)s," \
"%(house_unit)s,%(capacity)s,%(beds)s,%(deposit)s,%(min_days)s,%(max_days)s)"
# ret = self.db.execute_rowcount()
house_id = self.db.execute(sql, user_id=user_id, title=title, price=price, area_id=area_id, address=address,
room_count=room_count, acreage=acreage, house_unit=unit, capacity=capacity,
beds=beds, deposit=deposit, min_days=min_days, max_days=max_days)
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="数据错误!"))
# 存储配套设置
try:
sql = "insert into ih_house_facility(hf_house_id,hf_facility_id) values"
sql_tuple = [] # 临时列表容器
temp = [] # 最后转换为元祖
for facility_id in facility:
sql_tuple.append("(%s,%s)") # 2.存到列表里
# sql += "(%s,%s),"
temp.append(house_id) # 元素添加
temp.append(facility_id)
# sql = sql[:-1]
# 2.存到列表里取出来拼接sql
sql += ",".join(sql_tuple)
logging.debug(sql)
temp = tuple(temp) # 最后转换为元祖 元祖解包
new_id = self.db.execute(sql, *temp) # 元祖解包
except Exception as e:
logging.error(e)
try:
# torndb 没有提供事务机制所以必须手动删除
self.db.execute("delete from ih_house_info WHERE hi_house_id=%s", house_id)
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="删除出错"))
else:
return self.write(dict(errcode=RET.DBERR, errmsg="存储房屋基本信息失败!!"))
self.write(dict(errcode=RET.OK, errmsg="OK", house_id=house_id))
class HouseListHandler(BaseHandler):
def get(self):
"""
get 方式 对数据本身不会有什么影响不存在安全问题
传入参数说明
start_date 用户查询的起始时间 sd 非必传 "" "2017-02-28"
end_date 用户查询的终止时间 ed 非必传 ""
area_id 用户查询的区域条件 aid 非必传 ""
sort_key 排序的关键词 sk 非必传 "new" "new" "booking" "price-inc" "price-des"
page 返回的数据页数 p 非必传 1
"""
start_date = self.get_argument("sd", "") # 不设置默认值会报400
end_date = self.get_argument("ed", "")
area_id = self.get_argument("aid", "")
sort_key = self.get_argument("sk", "new")
page = self.get_argument("p", "1")
# 校验参数
# 数据查询
# 涉及到表: ih_house_info 房屋的基本信息 ih_user_profile 房东的用户信息 ih_order_info 房屋订单数据
sql = "select * from ih_house_info AS ihi INNER JOIN ih_user_profile AS iup ON ihi.hi_user_id=iup.up_user_id" \
"left JOIN ih_order_info AS ioi ON ioi.oi_house_id = ihi.hi_house_id "
"""出现在order by 后面的必须出现在distinct 里面
房屋基本信息都一样只要distinct 后面不要出现start_date end_date 就好,理解为联合去重
"""
sql = "select distinct hi_title,hi_house_id,hi_price,hi_room_count,hi_address,hi_order_count,up_avatar,hi_index_image_url,hi_ctime" \
" from ih_house_info inner join ih_user_profile on hi_user_id=up_user_id left join ih_order_info" \
" on hi_house_id=oi_house_id"
sql_total_count = "select count(distinct hi_house_id) count from ih_house_info inner join ih_user_profile on hi_user_id=up_user_id " \
"left join ih_order_info on hi_house_id=oi_house_id"
sql_where = [] # 存储where 后面的条件语句容器
sql_params = {} # 存储参数
if start_date and end_date:
# sql_where.append("(not (oi_begin_date<%(end_date)s and oi_end_date>%(start_date)s))")
sql_where.append(
"((oi_begin_date>%(end_date)s or oi_end_date<%(start_date)s)) or (oi_begin_date is null and oi_end_date is null)")
sql_params["start_date"] = start_date
sql_params["end_date"] = end_date
elif start_date:
sql_where.append("oi_end_date<%(start_date)s")
sql_params["start_date"] = start_date
elif end_date:
sql_where.append("oi_begin_date>%(end_date)s")
sql_params["end_date"] = end_date
if area_id:
sql_where.append("hi_area_id=%(area_id)s")
sql_params["area_id"] = area_id
if sql_where:
sql += " where "
sql += " and ".join(sql_where)
sql_total_count += " where "
sql += " and ".join(sql_where)
# 排序
if "new" == sort_key: # 按最新上传时间排序
sql += " order by hi_ctime desc"
elif "booking" == sort_key: # 最受欢迎
sql += " order by hi_order_count desc"
elif "price-inc" == sort_key: # 价格由低到高
sql += " order by hi_price asc"
elif "price-des" == sort_key: # 价格由高到低
sql += " order by hi_price desc"
# 有了查询条件开始查询数据库
try:
# 先查询总条数
ret = self.db.get(sql_total_count, **sql_params) # 类似字典的
except Exception as e:
logging.error(e)
total_page = -1
else:
total_page = int(math.ceil(ret["count"] / float(constants.HOUSE_LIST_PAGE_CAPACITY)))
page = int(page)
if page > total_page:
return self.write(dict(errcode=RET.OK, errmsg="OK", data=[], total_page=total_page))
# 分页
if 1 == page:
sql += " limit %s" % constants.HOUSE_LIST_PAGE_CAPACITY
else:
sql += " limit %s,%s" % (
# limit (page_Index -1)*pagesize, pagesize
(page - 1) * constants.HOUSE_LIST_PAGE_CAPACITY, constants.HOUSE_LIST_PAGE_CAPACITY)
logging.debug(sql)
try:
ret = self.db.query(sql, **sql_params)
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="查询出错"))
data = []
if ret:
for l in ret:
house = dict(
house_id=l["hi_house_id"],
title=l["hi_title"],
price=l["hi_price"],
room_count=l["hi_room_count"],
address=l["hi_address"],
order_count=l["hi_order_count"],
avatar=constants.QINIU_URL_PREFIX + l["up_avatar"] if l.get("up_avatar") else "",
image_url=constants.QINIU_URL_PREFIX + l["hi_index_image_url"] if l.get(
"hi_index_image_url") else ""
)
data.append(house)
self.write(dict(errcode=RET.OK, errmsg="OK", data=data, total_page=total_page)) | # -*- coding:utf-8 -*-
from .BaseHandler import BaseHandler
from utils.response_code import RET
import logging
import json
import constants
import math
from utils.commons import required_login
class AreaInfoHandler(BaseHandler):
"""获取区域信息"""
def get(self):
# 先查redis
try:
res = self.redis.get("area_info")
# res = json.loads(res)
except Exception as e:
res = None
logging.error(e)
if res:
# return self.write(dict(errcode=RET.OK, errmsg="ok!!", data=res))
return self.write("{'errcode':%s,errmsg:%s,data:%s}" % (RET.OK, "ok!!!", res)) # 少一次序列化操作
print(res)
logging.debug(res)
# 继续执行查询数据库
try:
ret = self.db.query("select ai_area_id,ai_name from ih_area_info")
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="查询出错了!!"))
if not ret:
return self.write(dict(errcode=RET.NODATA, errmsg="no data"))
areas = []
for x in ret:
model = {
"area_id": x["ai_area_id"],
"name": x["ai_name"],
}
areas.append(model)
# 存储到redis
try:
self.redis.setex("area_info", constants.REDIS_AREA_INFO_EXPIRES_SECONDES, json.dumps(areas))
except Exception as e:
logging.error(e)
self.write(dict(errcode=RET.OK, errmsg="ok!!", data=areas))
class MyHousehandler(BaseHandler):
@required_login
def get(self):
"""user_id从session获取而不是前端穿过来更加安全"""
# 这里能使用self.session 的原因是 @required_login装饰器调用了get_current_user()方法
# 而且get_current_user方法里面动态的添加了属性给BaseHandler()
user_id = self.session.data["user_id"]
try:
sql = "select a.hi_house_id,a.hi_title,a.hi_price,a.hi_ctime,b.ai_name,a.hi_index_image_url " \
"from ih_house_info a inner join ih_area_info b on a.hi_area_id=b.ai_area_id where a.hi_user_id=%s;"
ret = self.db.query(sql, user_id)
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="查询出错了!!"))
houses = []
if ret:
for line in ret:
house = {
"house_id": line["hi_house_id"],
"title": line["hi_title"],
"price": line["hi_price"],
"ctime": line["hi_ctime"].strftime("%Y-%m-%d"), # 时间转为字符串
"area_name": line["ai_name"],
"img_url": constants.QINIU_URL_PREFIX + line["hi_index_image_url"] if line[
"hi_index_image_url"] else ""
}
houses.append(house)
house_test = {
"house_id": "测试",
"title": "测试title",
"price": 998,
"ctime": '2018-03-21', # 时间转为字符串
"area_name": '北京',
"img_url": 'http://p5ufc44c8.bkt.clouddn.com/FlsI6fRX-RJ_FFF39hgGT0zb_zlp'
}
houses.append(house_test)
self.write(dict(errcode=RET.OK, errmsg="ok", houses=houses))
class HouseInfoHandler(BaseHandler):
def get(self):
"""拉取房源信息"""
self.write("ok")
@required_login
def post(self):
"""新增房源信息
方便测试
注释登陆装饰器和xsrf校验
"""
user_id = self.session.data["user_id"]
# 测试
# user_id = 11
title = self.json_args.get("title")
price = self.json_args.get("price")
area_id = self.json_args.get("area_id")
address = self.json_args.get("address")
room_count = self.json_args.get("room_count")
acreage = self.json_args.get("acreage")
unit = self.json_args.get("unit")
capacity = self.json_args.get("capacity")
beds = self.json_args.get("beds")
deposit = self.json_args.get("deposit")
min_days = self.json_args.get("min_days")
max_days = self.json_args.get("max_days")
facility = self.json_args.get("facility") # 对一个房屋的设施,是列表类型
# 校验
if not all((title, price, area_id, address, room_count, acreage, unit, capacity, beds, deposit, min_days,
max_days)):
return self.write(dict(errcode=RET.PARAMERR, errmsg="缺少参数"))
try:
price = int(price) * 100
deposit = int(deposit) * 100
except Exception as e:
return self.write(dict(errcode=RET.PARAMERR, errmsg="参数错误"))
# 存储ih_house_info 基本表
try:
sql = "insert into ih_house_info(hi_user_id,hi_title,hi_price,hi_area_id,hi_address,hi_room_count," \
"hi_acreage,hi_house_unit,hi_capacity,hi_beds,hi_deposit,hi_min_days,hi_max_days)" \
"values(%(user_id)s,%(title)s,%(price)s,%(area_id)s,%(address)s,%(room_count)s,%(acreage)s," \
"%(house_unit)s,%(capacity)s,%(beds)s,%(deposit)s,%(min_days)s,%(max_days)s)"
# ret = self.db.execute_rowcount()
house_id = self.db.execute(sql, user_id=user_id, title=title, price=price, area_id=area_id, address=address,
room_count=room_count, acreage=acreage, house_unit=unit, capacity=capacity,
beds=beds, deposit=deposit, min_days=min_days, max_days=max_days)
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="数据错误!"))
# 存储配套设置
try:
sql = "insert into ih_house_facility(hf_house_id,hf_facility_id) values"
sql_tuple = [] # 临时列表容器
temp = [] # 最后转换为元祖
for facility_id in facility:
sql_tuple.append("(%s,%s)") # 2.存到列表里
# sql += "(%s,%s),"
temp.append(house_id) # 元素添加
temp.append(facility_id)
# sql = sql[:-1]
# 2.存到列表里取出来拼接sql
sql += ",".join(sql_tuple)
logging.debug(sql)
temp = tuple(temp) # 最后转换为元祖 元祖解包
new_id = self.db.execute(sql, *temp) # 元祖解包
except Exception as e:
logging.error(e)
try:
# torndb 没有提供事务机制所以必须手动删除
self.db.execute("delete from ih_house_info WHERE hi_house_id=%s", house_id)
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="删除出错"))
else:
return self.write(dict(errcode=RET.DBERR, errmsg="存储房屋基本信息失败!!"))
self.write(dict(errcode=RET.OK, errmsg="OK", house_id=house_id))
class HouseListHandler(BaseHandler):
def get(self):
"""
get 方式 对数据本身不会有什么影响不存在安全问题
传入参数说明
start_date 用户查询的起始时间 sd 非必传 "" "2017-02-28"
end_date 用户查询的终止时间 ed 非必传 ""
area_id 用户查询的区域条件 aid 非必传 ""
sort_key 排序的关键词 sk 非必传 "new" "new" "booking" "price-inc" "price-des"
page 返回的数据页数 p 非必传 1
"""
start_date = self.get_argument("sd", "") # 不设置默认值会报400
end_date = self.get_argument("ed", "")
area_id = self.get_argument("aid", "")
sort_key = self.get_argument("sk", "new")
page = self.get_argument("p", "1")
# 校验参数
# 数据查询
# 涉及到表: ih_house_info 房屋的基本信息 ih_user_profile 房东的用户信息 ih_order_info 房屋订单数据
sql = "select * from ih_house_info AS ihi INNER JOIN ih_user_profile AS iup ON ihi.hi_user_id=iup.up_user_id" \
"left JOIN ih_order_info AS ioi ON ioi.oi_house_id = ihi.hi_house_id "
"""出现在order by 后面的必须出现在distinct 里面
房屋基本信息都一样只要distinct 后面不要出现start_date end_date 就好,理解为联合去重
"""
sql = "select distinct hi_title,hi_house_id,hi_price,hi_room_count,hi_address,hi_order_count,up_avatar,hi_index_image_url,hi_ctime" \
" from ih_house_info inner join ih_user_profile on hi_user_id=up_user_id left join ih_order_info" \
" on hi_house_id=oi_house_id"
sql_total_count = "select count(distinct hi_house_id) count from ih_house_info inner join ih_user_profile on hi_user_id=up_user_id " \
"left join ih_order_info on hi_house_id=oi_house_id"
sql_where = [] # 存储where 后面的条件语句容器
sql_params = {} # 存储参数
if start_date and end_date:
# sql_where.append("(not (oi_begin_date<%(end_date)s and oi_end_date>%(start_date)s))")
sql_where.append(
"((oi_begin_date>%(end_date)s or oi_end_date<%(start_date)s)) or (oi_begin_date is null and oi_end_date is null)")
sql_params["start_date"] = start_date
sql_params["end_date"] = end_date
elif start_date:
sql_where.append("oi_end_date<%(start_date)s")
sql_params["start_date"] = start_date
elif end_date:
sql_where.append("oi_begin_date>%(end_date)s")
sql_params["end_date"] = end_date
if area_id:
sql_where.append("hi_area_id=%(area_id)s")
sql_params["area_id"] = area_id
if sql_where:
sql += " where "
sql += " and ".join(sql_where)
sql_total_count += " where "
sql += " and ".join(sql_where)
# 排序
if "new" == sort_key: # 按最新上传时间排序
sql += " order by hi_ctime desc"
elif "booking" == sort_key: # 最受欢迎
sql += " order by hi_order_count desc"
elif "price-inc" == sort_key: # 价格由低到高
sql += " order by hi_price asc"
elif "price-des" == sort_key: # 价格由高到低
sql += " order by hi_price desc"
# 有了查询条件开始查询数据库
try:
# 先查询总条数
ret = self.db.get(sql_total_count, **sql_params) # 类似字典的
except Exception as e:
logging.error(e)
total_page = -1
else:
total_page = int(math.ceil(ret["count"] / float(constants.HOUSE_LIST_PAGE_CAPACITY)))
page = int(page)
if page > total_page:
return self.write(dict(errcode=RET.OK, errmsg="OK", data=[], total_page=total_page))
# 分页
if 1 == page:
sql += " limit %s" % constants.HOUSE_LIST_PAGE_CAPACITY
else:
sql += " limit %s,%s" % (
# limit (page_Index -1)*pagesize, pagesize
(page - 1) * constants.HOUSE_LIST_PAGE_CAPACITY, constants.HOUSE_LIST_PAGE_CAPACITY)
logging.debug(sql)
try:
ret = self.db.query(sql, **sql_params)
except Exception as e:
logging.error(e)
return self.write(dict(errcode=RET.DBERR, errmsg="查询出错"))
data = []
if ret:
for l in ret:
house = dict(
house_id=l["hi_house_id"],
title=l["hi_title"],
price=l["hi_price"],
room_count=l["hi_room_count"],
address=l["hi_address"],
order_count=l["hi_order_count"],
avatar=constants.QINIU_URL_PREFIX + l["up_avatar"] if l.get("up_avatar") else "",
image_url=constants.QINIU_URL_PREFIX + l["hi_index_image_url"] if l.get(
"hi_index_image_url") else ""
)
data.append(house)
self.write(dict(errcode=RET.OK, errmsg="OK", data=data, total_page=total_page)) | zh | 0.855407 | # -*- coding:utf-8 -*- 获取区域信息 # 先查redis # res = json.loads(res) # return self.write(dict(errcode=RET.OK, errmsg="ok!!", data=res)) # 少一次序列化操作 # 继续执行查询数据库 # 存储到redis user_id从session获取而不是前端穿过来更加安全 # 这里能使用self.session 的原因是 @required_login装饰器调用了get_current_user()方法 # 而且get_current_user方法里面动态的添加了属性给BaseHandler() # 时间转为字符串 # 时间转为字符串 拉取房源信息 新增房源信息 方便测试 注释登陆装饰器和xsrf校验 # 测试 # user_id = 11 # 对一个房屋的设施,是列表类型 # 校验 # 存储ih_house_info 基本表 # ret = self.db.execute_rowcount() # 存储配套设置 # 临时列表容器 # 最后转换为元祖 # 2.存到列表里 # sql += "(%s,%s)," # 元素添加 # sql = sql[:-1] # 2.存到列表里取出来拼接sql # 最后转换为元祖 元祖解包 # 元祖解包 # torndb 没有提供事务机制所以必须手动删除 get 方式 对数据本身不会有什么影响不存在安全问题 传入参数说明 start_date 用户查询的起始时间 sd 非必传 "" "2017-02-28" end_date 用户查询的终止时间 ed 非必传 "" area_id 用户查询的区域条件 aid 非必传 "" sort_key 排序的关键词 sk 非必传 "new" "new" "booking" "price-inc" "price-des" page 返回的数据页数 p 非必传 1 # 不设置默认值会报400 # 校验参数 # 数据查询 # 涉及到表: ih_house_info 房屋的基本信息 ih_user_profile 房东的用户信息 ih_order_info 房屋订单数据 出现在order by 后面的必须出现在distinct 里面 房屋基本信息都一样只要distinct 后面不要出现start_date end_date 就好,理解为联合去重 # 存储where 后面的条件语句容器 # 存储参数 # sql_where.append("(not (oi_begin_date<%(end_date)s and oi_end_date>%(start_date)s))") # 排序 # 按最新上传时间排序 # 最受欢迎 # 价格由低到高 # 价格由高到低 # 有了查询条件开始查询数据库 # 先查询总条数 # 类似字典的 # 分页 # limit (page_Index -1)*pagesize, pagesize | 2.400539 | 2 |
Util/feature_extract.py | XueQiangFan/I-RNAsol | 0 | 6614970 | #!/Users/11834/.conda/envs/Pytorch_GPU/python.exe
# -*- coding: UTF-8 -*-
'''=================================================
@Project -> File :RNASolventAccessibility -> feature_extract
@IDE :PyCharm
@Date :2021/5/9 15:54
=================================================='''
import os, torch, random
import numpy as np
from numba import jit
import warnings
warnings.filterwarnings('ignore')
def set_seed(seed=8):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
set_seed()
class OneHotPSFMSSGetWindowPadheadfoot():
def __init__(self, nucle_name: str, result_dir, win_size=25):
super(OneHotPSFMSSGetWindowPadheadfoot, self).__init__()
self.nucle_name = nucle_name
self.fa_path = os.path.join(result_dir, nucle_name + ".fasta")
self.result_dir = result_dir
self.psfm_path = os.path.join(self.result_dir, self.nucle_name + ".psfm")
self.ss_path = os.path.join(self.result_dir, self.nucle_name + ".ss")
self.onehot_path = os.path.join(self.result_dir, self.nucle_name)
self.win_size = win_size
self.stride = int(win_size / 2)
@jit
def getIthProteinLen(self):
seq = np.loadtxt(self.fa_path, dtype=str)[1]
nucle_length = len(seq)
return nucle_length
@jit
def feature(self):
nucle_length = self.getIthProteinLen()
one_hot = np.loadtxt(self.onehot_path, dtype=float)
psfm = np.loadtxt(self.psfm_path, dtype=float)
ss = np.expand_dims(np.loadtxt(self.ss_path, dtype=float), 1)
one_hot_psfm_ss = np.append(one_hot, psfm, axis=1)
one_hot_psfm_ss = np.append(one_hot_psfm_ss, ss, axis=1)
nucle_length, fea_num_one_hot = one_hot.shape
paddingheader = one_hot[:self.stride, :]
paddingfooter = one_hot[-self.stride:, :]
one_hot = np.append(paddingheader, one_hot, axis=0)
one_hot = np.append(one_hot, paddingfooter, axis=0)
nucle_length, fea_num_psfm = psfm.shape
paddingheader = psfm[:self.stride, :]
paddingfooter = psfm[-self.stride:, :]
psfm = np.append(paddingheader, psfm, axis=0)
psfm = np.append(psfm, paddingfooter, axis=0)
nucle_length, fea_num_ss = ss.shape
paddingheader = ss[:self.stride, :]
paddingfooter = ss[-self.stride:, :]
ss = np.append(paddingheader, ss, axis=0)
ss = np.append(ss, paddingfooter, axis=0)
feature_one_hot = np.zeros((nucle_length, self.win_size * fea_num_one_hot))
feature_psfm = np.zeros((nucle_length, self.win_size * fea_num_psfm))
feature_ss = np.zeros((nucle_length, self.win_size * fea_num_ss))
feature_one_hot_reverse = np.zeros((nucle_length, self.win_size * fea_num_one_hot))
feature_psfm_reverse = np.zeros((nucle_length, self.win_size * fea_num_psfm))
feature_ss_reverse = np.zeros((nucle_length, self.win_size * fea_num_ss))
for i in range(self.stride, nucle_length + self.stride):
feature_one_hot[i - self.stride, :] = one_hot[i - self.stride:i + self.stride + 1, :].flatten()
feature_psfm[i - self.stride, :] = psfm[i - self.stride:i + self.stride + 1, :].flatten()
feature_ss[i - self.stride, :] = ss[i - self.stride:i + self.stride + 1, :].flatten()
feature_one_hot_reverse[i - self.stride, :] = one_hot[i - self.stride:i + self.stride + 1, :].flatten()
feature_psfm_reverse[i - self.stride, :] = psfm[i - self.stride:i + self.stride + 1, :].flatten()
feature_ss_reverse[i - self.stride, :] = ss[i - self.stride:i + self.stride + 1, :].flatten()
feature_one_hot_reverse, feature_psfm_reverse, feature_ss_reverse = np.fliplr(
feature_one_hot_reverse), np.fliplr(feature_psfm_reverse), np.fliplr(feature_ss_reverse)
feature_one_hot_reverse, feature_psfm_reverse, feature_ss_reverse = np.ascontiguousarray(
feature_one_hot_reverse), np.ascontiguousarray(feature_psfm_reverse), np.ascontiguousarray(
feature_ss_reverse)
one_hot_psfm_ss_reverse = np.flip(one_hot_psfm_ss, axis=1)
one_hot_psfm_ss_reverse = np.ascontiguousarray(one_hot_psfm_ss_reverse)
sample = {'fea': (feature_one_hot, feature_psfm, feature_ss, one_hot_psfm_ss), 'fea_reverse': (
feature_one_hot_reverse, feature_psfm_reverse, feature_ss_reverse, one_hot_psfm_ss_reverse),
'nucleotide': self.nucle_name} # construct the dictionary
return sample
@jit
def getIthSampleFea(self):
sample = self.feature()
fea = sample['fea']
fea_reverse = sample['fea_reverse']
nucleotide = sample['nucleotide']
return fea, fea_reverse, nucleotide
| #!/Users/11834/.conda/envs/Pytorch_GPU/python.exe
# -*- coding: UTF-8 -*-
'''=================================================
@Project -> File :RNASolventAccessibility -> feature_extract
@IDE :PyCharm
@Date :2021/5/9 15:54
=================================================='''
import os, torch, random
import numpy as np
from numba import jit
import warnings
warnings.filterwarnings('ignore')
def set_seed(seed=8):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
set_seed()
class OneHotPSFMSSGetWindowPadheadfoot():
def __init__(self, nucle_name: str, result_dir, win_size=25):
super(OneHotPSFMSSGetWindowPadheadfoot, self).__init__()
self.nucle_name = nucle_name
self.fa_path = os.path.join(result_dir, nucle_name + ".fasta")
self.result_dir = result_dir
self.psfm_path = os.path.join(self.result_dir, self.nucle_name + ".psfm")
self.ss_path = os.path.join(self.result_dir, self.nucle_name + ".ss")
self.onehot_path = os.path.join(self.result_dir, self.nucle_name)
self.win_size = win_size
self.stride = int(win_size / 2)
@jit
def getIthProteinLen(self):
seq = np.loadtxt(self.fa_path, dtype=str)[1]
nucle_length = len(seq)
return nucle_length
@jit
def feature(self):
nucle_length = self.getIthProteinLen()
one_hot = np.loadtxt(self.onehot_path, dtype=float)
psfm = np.loadtxt(self.psfm_path, dtype=float)
ss = np.expand_dims(np.loadtxt(self.ss_path, dtype=float), 1)
one_hot_psfm_ss = np.append(one_hot, psfm, axis=1)
one_hot_psfm_ss = np.append(one_hot_psfm_ss, ss, axis=1)
nucle_length, fea_num_one_hot = one_hot.shape
paddingheader = one_hot[:self.stride, :]
paddingfooter = one_hot[-self.stride:, :]
one_hot = np.append(paddingheader, one_hot, axis=0)
one_hot = np.append(one_hot, paddingfooter, axis=0)
nucle_length, fea_num_psfm = psfm.shape
paddingheader = psfm[:self.stride, :]
paddingfooter = psfm[-self.stride:, :]
psfm = np.append(paddingheader, psfm, axis=0)
psfm = np.append(psfm, paddingfooter, axis=0)
nucle_length, fea_num_ss = ss.shape
paddingheader = ss[:self.stride, :]
paddingfooter = ss[-self.stride:, :]
ss = np.append(paddingheader, ss, axis=0)
ss = np.append(ss, paddingfooter, axis=0)
feature_one_hot = np.zeros((nucle_length, self.win_size * fea_num_one_hot))
feature_psfm = np.zeros((nucle_length, self.win_size * fea_num_psfm))
feature_ss = np.zeros((nucle_length, self.win_size * fea_num_ss))
feature_one_hot_reverse = np.zeros((nucle_length, self.win_size * fea_num_one_hot))
feature_psfm_reverse = np.zeros((nucle_length, self.win_size * fea_num_psfm))
feature_ss_reverse = np.zeros((nucle_length, self.win_size * fea_num_ss))
for i in range(self.stride, nucle_length + self.stride):
feature_one_hot[i - self.stride, :] = one_hot[i - self.stride:i + self.stride + 1, :].flatten()
feature_psfm[i - self.stride, :] = psfm[i - self.stride:i + self.stride + 1, :].flatten()
feature_ss[i - self.stride, :] = ss[i - self.stride:i + self.stride + 1, :].flatten()
feature_one_hot_reverse[i - self.stride, :] = one_hot[i - self.stride:i + self.stride + 1, :].flatten()
feature_psfm_reverse[i - self.stride, :] = psfm[i - self.stride:i + self.stride + 1, :].flatten()
feature_ss_reverse[i - self.stride, :] = ss[i - self.stride:i + self.stride + 1, :].flatten()
feature_one_hot_reverse, feature_psfm_reverse, feature_ss_reverse = np.fliplr(
feature_one_hot_reverse), np.fliplr(feature_psfm_reverse), np.fliplr(feature_ss_reverse)
feature_one_hot_reverse, feature_psfm_reverse, feature_ss_reverse = np.ascontiguousarray(
feature_one_hot_reverse), np.ascontiguousarray(feature_psfm_reverse), np.ascontiguousarray(
feature_ss_reverse)
one_hot_psfm_ss_reverse = np.flip(one_hot_psfm_ss, axis=1)
one_hot_psfm_ss_reverse = np.ascontiguousarray(one_hot_psfm_ss_reverse)
sample = {'fea': (feature_one_hot, feature_psfm, feature_ss, one_hot_psfm_ss), 'fea_reverse': (
feature_one_hot_reverse, feature_psfm_reverse, feature_ss_reverse, one_hot_psfm_ss_reverse),
'nucleotide': self.nucle_name} # construct the dictionary
return sample
@jit
def getIthSampleFea(self):
sample = self.feature()
fea = sample['fea']
fea_reverse = sample['fea_reverse']
nucleotide = sample['nucleotide']
return fea, fea_reverse, nucleotide
| en | 0.347109 | #!/Users/11834/.conda/envs/Pytorch_GPU/python.exe # -*- coding: UTF-8 -*- ================================================= @Project -> File :RNASolventAccessibility -> feature_extract @IDE :PyCharm @Date :2021/5/9 15:54 ================================================== # construct the dictionary | 2.119788 | 2 |
iprestrict/models.py | TIPvermogensbeheer/django-iprestrict | 30 | 6614971 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from django.db import models
from django.utils import timezone
from django.utils.safestring import mark_safe
try:
from django.urls import reverse
except ImportError:
# Pre Django 2.x
from django.core.urlresolvers import reverse
from . import ip_utils as ipu
from .geoip import get_geoip, NO_COUNTRY
TYPE_LOCATION = 'location'
TYPE_RANGE = 'range'
geoip = get_geoip()
class IPGroupManager(models.Manager):
def get_queryset(self):
qs = super(IPGroupManager, self).get_queryset()
if self.model.TYPE is not None:
return qs.filter(type=self.model.TYPE)
return qs
class IPGroup(models.Model):
TYPE_CHOICES = ((TYPE_LOCATION, 'Location based'),
(TYPE_RANGE, 'Range based'))
TYPE = None
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
type = models.CharField(max_length=10, default=TYPE_RANGE, choices=TYPE_CHOICES)
class Meta:
verbose_name = 'IP Group'
objects = IPGroupManager()
def __init__(self, *args, **kwargs):
super(IPGroup, self).__init__(*args, **kwargs)
self.load()
def load(self):
pass
def save(self, *args, **kwargs):
if self.TYPE is not None:
self.type = self.TYPE
super(IPGroup, self).save(*args, **kwargs)
def __str__(self):
return self.name
__unicode__ = __str__
def typed_ip_group(ip_group):
obj = None
if ip_group.type == TYPE_RANGE:
obj = RangeBasedIPGroup(pk=ip_group.pk)
elif ip_group.type == TYPE_LOCATION:
obj = LocationBasedIPGroup(pk=ip_group.pk)
else:
raise ValueError("Invalid type '%s'" % ip_group.type)
obj.__dict__.update(ip_group.__dict__)
return obj
class RangeBasedIPGroup(IPGroup):
TYPE = TYPE_RANGE
class Meta:
proxy = True
verbose_name = 'IP Group'
def load_ranges(self):
self._ranges = {ipu.IPv4: [], ipu.IPv6: []}
for r in self.iprange_set.all():
self._ranges[r.ip_type].append(r)
load = load_ranges
def ranges(self, ip_type=None):
if ip_type is None:
return self._ranges[ipu.IPv4] + self._ranges[ipu.IPv6]
return self._ranges[ip_type]
def matches(self, ip):
ip_type = ipu.get_version(ip)
for r in self.ranges(ip_type):
if ip in r:
return True
return False
def details_str(self):
return ', '.join([str(r) for r in self.ranges()])
class LocationBasedIPGroup(IPGroup):
TYPE = TYPE_LOCATION
class Meta:
proxy = True
verbose_name = 'Location Based IP Group'
def load_locations(self):
countries = ", ".join(self.iplocation_set.values_list('country_codes', flat=True)).split(', ')
countries.sort()
self._countries = ', '.join(countries)
load = load_locations
def matches(self, ip):
country_code = geoip.country_code(ip) or NO_COUNTRY
return country_code in self._countries
def details_str(self):
return self._countries
class IPRange(models.Model):
class Meta:
verbose_name = "IP Range"
ip_group = models.ForeignKey(IPGroup, on_delete=models.CASCADE)
first_ip = models.GenericIPAddressField()
cidr_prefix_length = models.PositiveSmallIntegerField(null=True, blank=True)
last_ip = models.GenericIPAddressField(null=True, blank=True)
description = models.CharField(max_length=500, blank=True)
@property
def start(self):
if self.cidr_prefix_length is not None:
start, end = ipu.cidr_to_range(self.first_ip,
self.cidr_prefix_length)
return start
else:
return ipu.to_number(self.first_ip)
@property
def end(self):
if self.last_ip is not None:
return ipu.to_number(self.last_ip)
if self.cidr_prefix_length is not None:
start, end = ipu.cidr_to_range(self.first_ip,
self.cidr_prefix_length)
return end
return self.start
@property
def ip_type(self):
if not self.first_ip:
return ''
return ipu.get_version(self.first_ip)
def __contains__(self, ip):
ip_nr = ipu.to_number(ip)
return self.start <= ip_nr <= self.end
def __str__(self):
result = str(self.first_ip)
if self.cidr_prefix_length is not None:
result += '/' + str(self.cidr_prefix_length)
elif self.last_ip is not None:
result += '-' + str(self.last_ip)
return result
__unicode__ = __str__
class IPLocation(models.Model):
class Meta:
verbose_name = "IP Location"
ip_group = models.ForeignKey(IPGroup, on_delete=models.CASCADE)
country_codes = models.CharField(max_length=2000, help_text='Comma-separated list of 2 character country codes')
def __contains__(self, country_code):
return country_code in re.split(r'[^A-Z]+', self.country_codes)
def __str__(self):
return self.country_codes
__unicode__ = __str__
class Rule(models.Model):
class Meta:
ordering = ['rank', 'id']
ACTION_CHOICES = (
('A', 'ALLOW'),
('D', 'DENY')
)
url_pattern = models.CharField(max_length=500)
ip_group = models.ForeignKey(IPGroup, default=1, on_delete=models.CASCADE)
reverse_ip_group = models.BooleanField(default=False)
action = models.CharField(max_length=1, choices=ACTION_CHOICES, default='D')
rank = models.IntegerField(blank=True)
def __init__(self, *args, **kwargs):
super(Rule, self).__init__(*args, **kwargs)
self.ip_group = typed_ip_group(self.ip_group)
@property
def regex(self):
if not hasattr(self, '_regex'):
self._regex = re.compile(self.url_pattern)
return self._regex
def matches_url(self, url):
if self.url_pattern == 'ALL':
return True
else:
return self.regex.match(url) is not None
def matches_ip(self, ip):
match = typed_ip_group(self.ip_group).matches(ip)
if self.reverse_ip_group:
return not match
return match
def is_restricted(self):
return self.action != 'A'
def is_allowed(self):
return self.action == 'A'
is_allowed.boolean = True
is_allowed.short_description = 'Is allowed?'
def action_str(self):
return 'Allowed' if self.is_allowed() else 'Denied'
def swap_with_rule(self, other):
other.rank, self.rank = self.rank, other.rank
other.save()
self.save()
def move_up(self):
rules_above = Rule.objects.filter(rank__lt=self.rank).order_by('-rank')
if len(rules_above) == 0:
return
self.swap_with_rule(rules_above[0])
def move_up_url(self):
url = reverse('iprestrict:move_rule_up', args=[self.pk])
return mark_safe('<a href="%s">Move Up</a>' % url)
move_up_url.short_description = 'Move Up'
def move_down_url(self):
url = reverse('iprestrict:move_rule_down', args=[self.pk])
return mark_safe('<a href="%s">Move Down</a>' % url)
move_down_url.short_description = 'Move Down'
def move_down(self):
rules_below = Rule.objects.filter(rank__gt=self.rank)
if len(rules_below) == 0:
return
self.swap_with_rule(rules_below[0])
def save(self, *args, **kwargs):
if self.rank is None:
max_aggr = Rule.objects.filter(rank__lt=65000).aggregate(models.Max('rank'))
max_rank = max_aggr.get('rank__max')
if max_rank is None:
max_rank = 0
self.rank = max_rank + 1
super(Rule, self).save(*args, **kwargs)
class ReloadRulesRequest(models.Model):
at = models.DateTimeField(auto_now_add=True)
@classmethod
def request_reload(cls):
rrs = ReloadRulesRequest.objects.all()
if len(rrs) > 0:
obj = rrs[0]
obj.at = timezone.now()
obj.save()
else:
cls.objects.create()
@staticmethod
def last_request():
result = None
rrs = ReloadRulesRequest.objects.all()
if len(rrs) > 0:
result = rrs[0].at
return result
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from django.db import models
from django.utils import timezone
from django.utils.safestring import mark_safe
try:
from django.urls import reverse
except ImportError:
# Pre Django 2.x
from django.core.urlresolvers import reverse
from . import ip_utils as ipu
from .geoip import get_geoip, NO_COUNTRY
TYPE_LOCATION = 'location'
TYPE_RANGE = 'range'
geoip = get_geoip()
class IPGroupManager(models.Manager):
def get_queryset(self):
qs = super(IPGroupManager, self).get_queryset()
if self.model.TYPE is not None:
return qs.filter(type=self.model.TYPE)
return qs
class IPGroup(models.Model):
TYPE_CHOICES = ((TYPE_LOCATION, 'Location based'),
(TYPE_RANGE, 'Range based'))
TYPE = None
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
type = models.CharField(max_length=10, default=TYPE_RANGE, choices=TYPE_CHOICES)
class Meta:
verbose_name = 'IP Group'
objects = IPGroupManager()
def __init__(self, *args, **kwargs):
super(IPGroup, self).__init__(*args, **kwargs)
self.load()
def load(self):
pass
def save(self, *args, **kwargs):
if self.TYPE is not None:
self.type = self.TYPE
super(IPGroup, self).save(*args, **kwargs)
def __str__(self):
return self.name
__unicode__ = __str__
def typed_ip_group(ip_group):
obj = None
if ip_group.type == TYPE_RANGE:
obj = RangeBasedIPGroup(pk=ip_group.pk)
elif ip_group.type == TYPE_LOCATION:
obj = LocationBasedIPGroup(pk=ip_group.pk)
else:
raise ValueError("Invalid type '%s'" % ip_group.type)
obj.__dict__.update(ip_group.__dict__)
return obj
class RangeBasedIPGroup(IPGroup):
TYPE = TYPE_RANGE
class Meta:
proxy = True
verbose_name = 'IP Group'
def load_ranges(self):
self._ranges = {ipu.IPv4: [], ipu.IPv6: []}
for r in self.iprange_set.all():
self._ranges[r.ip_type].append(r)
load = load_ranges
def ranges(self, ip_type=None):
if ip_type is None:
return self._ranges[ipu.IPv4] + self._ranges[ipu.IPv6]
return self._ranges[ip_type]
def matches(self, ip):
ip_type = ipu.get_version(ip)
for r in self.ranges(ip_type):
if ip in r:
return True
return False
def details_str(self):
return ', '.join([str(r) for r in self.ranges()])
class LocationBasedIPGroup(IPGroup):
TYPE = TYPE_LOCATION
class Meta:
proxy = True
verbose_name = 'Location Based IP Group'
def load_locations(self):
countries = ", ".join(self.iplocation_set.values_list('country_codes', flat=True)).split(', ')
countries.sort()
self._countries = ', '.join(countries)
load = load_locations
def matches(self, ip):
country_code = geoip.country_code(ip) or NO_COUNTRY
return country_code in self._countries
def details_str(self):
return self._countries
class IPRange(models.Model):
class Meta:
verbose_name = "IP Range"
ip_group = models.ForeignKey(IPGroup, on_delete=models.CASCADE)
first_ip = models.GenericIPAddressField()
cidr_prefix_length = models.PositiveSmallIntegerField(null=True, blank=True)
last_ip = models.GenericIPAddressField(null=True, blank=True)
description = models.CharField(max_length=500, blank=True)
@property
def start(self):
if self.cidr_prefix_length is not None:
start, end = ipu.cidr_to_range(self.first_ip,
self.cidr_prefix_length)
return start
else:
return ipu.to_number(self.first_ip)
@property
def end(self):
if self.last_ip is not None:
return ipu.to_number(self.last_ip)
if self.cidr_prefix_length is not None:
start, end = ipu.cidr_to_range(self.first_ip,
self.cidr_prefix_length)
return end
return self.start
@property
def ip_type(self):
if not self.first_ip:
return ''
return ipu.get_version(self.first_ip)
def __contains__(self, ip):
ip_nr = ipu.to_number(ip)
return self.start <= ip_nr <= self.end
def __str__(self):
result = str(self.first_ip)
if self.cidr_prefix_length is not None:
result += '/' + str(self.cidr_prefix_length)
elif self.last_ip is not None:
result += '-' + str(self.last_ip)
return result
__unicode__ = __str__
class IPLocation(models.Model):
class Meta:
verbose_name = "IP Location"
ip_group = models.ForeignKey(IPGroup, on_delete=models.CASCADE)
country_codes = models.CharField(max_length=2000, help_text='Comma-separated list of 2 character country codes')
def __contains__(self, country_code):
return country_code in re.split(r'[^A-Z]+', self.country_codes)
def __str__(self):
return self.country_codes
__unicode__ = __str__
class Rule(models.Model):
class Meta:
ordering = ['rank', 'id']
ACTION_CHOICES = (
('A', 'ALLOW'),
('D', 'DENY')
)
url_pattern = models.CharField(max_length=500)
ip_group = models.ForeignKey(IPGroup, default=1, on_delete=models.CASCADE)
reverse_ip_group = models.BooleanField(default=False)
action = models.CharField(max_length=1, choices=ACTION_CHOICES, default='D')
rank = models.IntegerField(blank=True)
def __init__(self, *args, **kwargs):
super(Rule, self).__init__(*args, **kwargs)
self.ip_group = typed_ip_group(self.ip_group)
@property
def regex(self):
if not hasattr(self, '_regex'):
self._regex = re.compile(self.url_pattern)
return self._regex
def matches_url(self, url):
if self.url_pattern == 'ALL':
return True
else:
return self.regex.match(url) is not None
def matches_ip(self, ip):
match = typed_ip_group(self.ip_group).matches(ip)
if self.reverse_ip_group:
return not match
return match
def is_restricted(self):
return self.action != 'A'
def is_allowed(self):
return self.action == 'A'
is_allowed.boolean = True
is_allowed.short_description = 'Is allowed?'
def action_str(self):
return 'Allowed' if self.is_allowed() else 'Denied'
def swap_with_rule(self, other):
other.rank, self.rank = self.rank, other.rank
other.save()
self.save()
def move_up(self):
rules_above = Rule.objects.filter(rank__lt=self.rank).order_by('-rank')
if len(rules_above) == 0:
return
self.swap_with_rule(rules_above[0])
def move_up_url(self):
url = reverse('iprestrict:move_rule_up', args=[self.pk])
return mark_safe('<a href="%s">Move Up</a>' % url)
move_up_url.short_description = 'Move Up'
def move_down_url(self):
url = reverse('iprestrict:move_rule_down', args=[self.pk])
return mark_safe('<a href="%s">Move Down</a>' % url)
move_down_url.short_description = 'Move Down'
def move_down(self):
rules_below = Rule.objects.filter(rank__gt=self.rank)
if len(rules_below) == 0:
return
self.swap_with_rule(rules_below[0])
def save(self, *args, **kwargs):
if self.rank is None:
max_aggr = Rule.objects.filter(rank__lt=65000).aggregate(models.Max('rank'))
max_rank = max_aggr.get('rank__max')
if max_rank is None:
max_rank = 0
self.rank = max_rank + 1
super(Rule, self).save(*args, **kwargs)
class ReloadRulesRequest(models.Model):
at = models.DateTimeField(auto_now_add=True)
@classmethod
def request_reload(cls):
rrs = ReloadRulesRequest.objects.all()
if len(rrs) > 0:
obj = rrs[0]
obj.at = timezone.now()
obj.save()
else:
cls.objects.create()
@staticmethod
def last_request():
result = None
rrs = ReloadRulesRequest.objects.all()
if len(rrs) > 0:
result = rrs[0].at
return result
| en | 0.682419 | # -*- coding: utf-8 -*- # Pre Django 2.x | 2.160205 | 2 |
CommonTools/RecoUtils/test/pfnopu_jets_cfg.py | ckamtsikis/cmssw | 852 | 6614972 | import FWCore.ParameterSet.Config as cms
process = cms.Process("PFJETS")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/user/geisler/QCD_Pt-15to3000_Tune2C_Flat_8TeV_pythia8_AODSIM.root'),
)
### conditions
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'START53_V11::All'
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
### standard includes
process.load('Configuration.Geometry.GeometryPilot2_cff')
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.selectedPrimaryVertexQuality = cms.EDFilter("VertexSelector",
src = cms.InputTag('offlinePrimaryVertices'),
cut = cms.string("isValid & ndof >= 4 & chi2 > 0 & tracksSize > 0 & abs(z) < 24 & abs(position.Rho) < 2."),
filter = cms.bool(False),
)
### PFCandidate AssociationMap-specific includes
from CommonTools.RecoUtils.pfcand_assomap_cfi import PFCandAssoMap
process.PFCand2VertexAM = PFCandAssoMap.clone(
VertexCollection = cms.InputTag('selectedPrimaryVertexQuality'),
)
### PFCandidateCollection-specific includes
from CommonTools.RecoUtils.pfcand_nopu_witham_cfi import FirstVertexPFCandidates
process.PFCand = FirstVertexPFCandidates.clone(
VertexPFCandAssociationMap = cms.InputTag('PFCand2VertexAM'),
VertexCollection = cms.InputTag('selectedPrimaryVertexQuality'),
)
### JetProducer-specific includes
from RecoJets.JetProducers.ak5PFJets_cfi import ak5PFJets
process.ak5PFJetsNew = ak5PFJets.clone(
src = cms.InputTag("PFCand","P2V")
#src = cms.InputTag("PFCand","V2P")
)
process.load("JetMETCorrections.Configuration.JetCorrectionServices_cff")
process.load("JetMETCorrections.Configuration.JetCorrectionServicesAllAlgos_cff")
# L2L3 Correction Producers
process.ak5PFJetsNewL23 = cms.EDProducer('PFJetCorrectionProducer',
src = cms.InputTag('ak5PFJetsNew'),
correctors = cms.vstring('ak5PFL2L3')
)
# L1L2L3 Correction Producers
process.ak5PFJetsNewL123 = cms.EDProducer('PFJetCorrectionProducer',
src = cms.InputTag('ak5PFJetsNew'),
correctors = cms.vstring('ak5PFL1L2L3')
)
### paths & sequences
##sequence to produce the collection of pfcand's associated to the first vertex
process.pfc = cms.Sequence(
process.selectedPrimaryVertexQuality
* process.PFCand2VertexAM
* process.PFCand
)
##sequence to produce the jet collections
process.pfjet = cms.Sequence(
process.ak5PFJetsNew
* process.ak5PFJetsNewL23
* process.ak5PFJetsNewL123
)
process.p = cms.Path(
process.pfc
* process.pfjet
)
process.myOutput = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('myOutput.root'),
outputCommands = cms.untracked.vstring('drop *',
'keep *_*_*_PFJETS'),
)
process.e = cms.EndPath( process.myOutput )
| import FWCore.ParameterSet.Config as cms
process = cms.Process("PFJETS")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/user/geisler/QCD_Pt-15to3000_Tune2C_Flat_8TeV_pythia8_AODSIM.root'),
)
### conditions
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'START53_V11::All'
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
### standard includes
process.load('Configuration.Geometry.GeometryPilot2_cff')
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.selectedPrimaryVertexQuality = cms.EDFilter("VertexSelector",
src = cms.InputTag('offlinePrimaryVertices'),
cut = cms.string("isValid & ndof >= 4 & chi2 > 0 & tracksSize > 0 & abs(z) < 24 & abs(position.Rho) < 2."),
filter = cms.bool(False),
)
### PFCandidate AssociationMap-specific includes
from CommonTools.RecoUtils.pfcand_assomap_cfi import PFCandAssoMap
process.PFCand2VertexAM = PFCandAssoMap.clone(
VertexCollection = cms.InputTag('selectedPrimaryVertexQuality'),
)
### PFCandidateCollection-specific includes
from CommonTools.RecoUtils.pfcand_nopu_witham_cfi import FirstVertexPFCandidates
process.PFCand = FirstVertexPFCandidates.clone(
VertexPFCandAssociationMap = cms.InputTag('PFCand2VertexAM'),
VertexCollection = cms.InputTag('selectedPrimaryVertexQuality'),
)
### JetProducer-specific includes
from RecoJets.JetProducers.ak5PFJets_cfi import ak5PFJets
process.ak5PFJetsNew = ak5PFJets.clone(
src = cms.InputTag("PFCand","P2V")
#src = cms.InputTag("PFCand","V2P")
)
process.load("JetMETCorrections.Configuration.JetCorrectionServices_cff")
process.load("JetMETCorrections.Configuration.JetCorrectionServicesAllAlgos_cff")
# L2L3 Correction Producers
process.ak5PFJetsNewL23 = cms.EDProducer('PFJetCorrectionProducer',
src = cms.InputTag('ak5PFJetsNew'),
correctors = cms.vstring('ak5PFL2L3')
)
# L1L2L3 Correction Producers
process.ak5PFJetsNewL123 = cms.EDProducer('PFJetCorrectionProducer',
src = cms.InputTag('ak5PFJetsNew'),
correctors = cms.vstring('ak5PFL1L2L3')
)
### paths & sequences
##sequence to produce the collection of pfcand's associated to the first vertex
process.pfc = cms.Sequence(
process.selectedPrimaryVertexQuality
* process.PFCand2VertexAM
* process.PFCand
)
##sequence to produce the jet collections
process.pfjet = cms.Sequence(
process.ak5PFJetsNew
* process.ak5PFJetsNewL23
* process.ak5PFJetsNewL123
)
process.p = cms.Path(
process.pfc
* process.pfjet
)
process.myOutput = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('myOutput.root'),
outputCommands = cms.untracked.vstring('drop *',
'keep *_*_*_PFJETS'),
)
process.e = cms.EndPath( process.myOutput )
| en | 0.820001 | ### conditions ### standard includes ### PFCandidate AssociationMap-specific includes ### PFCandidateCollection-specific includes ### JetProducer-specific includes #src = cms.InputTag("PFCand","V2P") # L2L3 Correction Producers # L1L2L3 Correction Producers ### paths & sequences ##sequence to produce the collection of pfcand's associated to the first vertex ##sequence to produce the jet collections | 1.515642 | 2 |
tests/test_txt_renderer/test_calendar/test_header/test_month_row.py | chabErch/Linum | 0 | 6614973 | from datetime import date
from unittest import TestCase
from linum.txt_renderer.calendar.header.month.months_row import MonthsRow
class TestMonthRow(TestCase):
def test_render(self):
# Рендер одного месяца
mr = MonthsRow(date(2020, 1, 31), 1)
self.assertEqual('Jan…', mr.render())
# Рендер на границе двух месяцев
mr = MonthsRow(date(2020, 1, 31), 2)
self.assertEqual('Jan…Feb…', mr.render())
# Расширенный рендер на границе месяцев
mr = MonthsRow(date(2020, 1, 30), 4)
self.assertEqual('January…Februar…', mr.render())
# Расширенный рендер на границе месяцев с внутренними границами
mr = MonthsRow(date(2020, 1, 30), 4)
mr.inner_borders = True
self.assertEqual('January …│February…', mr.render())
# Расширенный рендер на границе месяцев с границами между месяцами
mr = MonthsRow(date(2020, 1, 30), 4)
mr.month_inner_borders = True
self.assertEqual('January…│Februar…', mr.render())
# Расширенный рендер на границе месяцев с левой границей
mr = MonthsRow(date(2020, 1, 30), 4)
mr.left_border = True
self.assertEqual('│January…Februar…', mr.render())
# Расширенный рендер на границе месяцев с правой границей
mr = MonthsRow(date(2020, 1, 30), 4)
mr.right_border = True
self.assertEqual('January…Februar…│', mr.render())
| from datetime import date
from unittest import TestCase
from linum.txt_renderer.calendar.header.month.months_row import MonthsRow
class TestMonthRow(TestCase):
def test_render(self):
# Рендер одного месяца
mr = MonthsRow(date(2020, 1, 31), 1)
self.assertEqual('Jan…', mr.render())
# Рендер на границе двух месяцев
mr = MonthsRow(date(2020, 1, 31), 2)
self.assertEqual('Jan…Feb…', mr.render())
# Расширенный рендер на границе месяцев
mr = MonthsRow(date(2020, 1, 30), 4)
self.assertEqual('January…Februar…', mr.render())
# Расширенный рендер на границе месяцев с внутренними границами
mr = MonthsRow(date(2020, 1, 30), 4)
mr.inner_borders = True
self.assertEqual('January …│February…', mr.render())
# Расширенный рендер на границе месяцев с границами между месяцами
mr = MonthsRow(date(2020, 1, 30), 4)
mr.month_inner_borders = True
self.assertEqual('January…│Februar…', mr.render())
# Расширенный рендер на границе месяцев с левой границей
mr = MonthsRow(date(2020, 1, 30), 4)
mr.left_border = True
self.assertEqual('│January…Februar…', mr.render())
# Расширенный рендер на границе месяцев с правой границей
mr = MonthsRow(date(2020, 1, 30), 4)
mr.right_border = True
self.assertEqual('January…Februar…│', mr.render())
| ru | 0.991152 | # Рендер одного месяца # Рендер на границе двух месяцев # Расширенный рендер на границе месяцев # Расширенный рендер на границе месяцев с внутренними границами # Расширенный рендер на границе месяцев с границами между месяцами # Расширенный рендер на границе месяцев с левой границей # Расширенный рендер на границе месяцев с правой границей | 2.977377 | 3 |
postman_repl/postman_repl_test.py | johnnadratowski/postman-repl | 4 | 6614974 | #!/usr/bin/python
"""
Tests for postman repl
"""
import unittest
import urllib
import postman_repl as pmr
import json
class TestO(unittest.TestCase):
def test_init(self):
test = pmr.O(x=1, y=2)
self.assertDictEqual(test.__dict__, {'x': 1, 'y': 2})
def test_get(self):
test = pmr.O(x=1, y=2)
self.assertEqual(test["x"], 1)
self.assertEqual(test.x, 1)
def test_set(self):
test = pmr.O(x=1, y=2)
test.x = 2
self.assertEqual(test["x"], 2)
self.assertEqual(test.x, 2)
def test_del(self):
test = pmr.O(x=1, y=2)
del test.x
self.assertEqual(test.x, None)
self.assertDictEqual(test.__dict__, {'y': 2})
def test_iter(self):
test = pmr.O(x=1, y=2)
for k in test:
self.assertTrue(k == "x" or k == "y")
self.assertTrue(test[k] == 1 or test[k] == 2)
def test_todict(self):
test = pmr.O(x=1, y=2)
self.assertDictEqual(test.__dict__, {'x': 1, 'y': 2})
def test_todict_recursive(self):
test = pmr.O(x=1, y=2, z=pmr.O(x=1, y=2))
self.assertDictEqual(test._to_dict_recursive(),
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}})
def test_tojson(self):
test = pmr.O(x=1, y=2, z=pmr.O(x=1, y=2))
self.assertDictEqual(json.loads(test._to_json()),
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}})
def test_new_recursive(self):
expect = pmr.O(x=1, y=2, z=pmr.O(x=1, y=2))
test = {'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}}
test = pmr.new_recursive(**test)
self.assertEqual(test._to_dict_recursive(), expect._to_dict_recursive())
def test_new_recursive_list(self):
expect = [
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}},
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}}]
test = [
pmr.O(
x=1, y=2, z=pmr.O(
x=1, y=2)), pmr.O(
x=1, y=2, z=pmr.O(
x=1, y=2))]
test = pmr.new_recursive_list(*test)
self.assertListEqual([x._to_dict_recursive() for x in test], expect)
class TestPostmanRepl(unittest.TestCase):
def setUp(self):
self.coll_file = "../examples/JIRA.json.postman_collection"
self.env_file = "../examples/test.env"
self.mw_file = "../examples/middleware.py"
self.collection = pmr.load_collection(self.coll_file)
self.env = pmr.load_environment(self.env_file)
self.mw = pmr.load_middleware(self.mw_file)
def tearDown(self):
pmr.H.history = []
pmr.R = None
pmr.J = None
pmr.D = None
pmr.MW = pmr.O()
pmr.E = pmr.O()
pmr.P = None
def test_load_collection(self):
self.assertTrue("sprints" in self.collection)
self.assertTrue("META" in self.collection["sprints"])
self.assertTrue("rapidview" in self.collection["sprints"])
self.assertTrue("sprint" in self.collection["sprints"])
self.assertTrue("sprint_issues" in self.collection["sprints"])
self.assertTrue("users" in self.collection)
self.assertTrue("META" in self.collection["users"])
self.assertTrue("search_username" in self.collection["users"])
def test_load_environment(self):
self.assertDictEqual(self.env._to_dict_recursive(), {
"host": "localhost",
"protocol": "http",
"port": "8081",
"username": "user",
"password": "password",
})
def test_middleware(self):
called = [False]
def middleware(run, kwargs, env):
called[0] = True
middlewares = pmr.O(sprints_sprint=middleware)
self.collection["sprints"]["sprint"](env=self.env, middlewares=middlewares)
self.assertTrue(called[0])
def test_get_default_data(self):
expect = {'password': '', 'username': ''}
test = self.collection["sprints"]["sprint"].default_data().__dict__
self.assertDictEqual(expect, test)
def test_call(self):
called = [False, False, False]
i = [0]
def middleware(run, kwargs, env):
called[i[0]] = True
i[0] += 1
middlewares = pmr.O(sprints_sprint=middleware,
sprints_sprint_issues=middleware,
sprints_rapidview=middleware)
self.collection["sprints"]["sprint"](params={'includeHistoricSprints': 'false'}, env=self.env, middlewares=middlewares)
self.collection["sprints"]["sprint_issues"](env=self.env, middlewares=middlewares)
self.collection["sprints"]["rapidview"](env=self.env, middlewares=middlewares)
self.assertTrue(all(called))
url = urllib.parse.urlparse(pmr.H.history[0].url)
self.assertEqual(url.path, "/rest/greenhopper/latest/sprintquery/")
self.assertDictEqual(urllib.parse.parse_qs(url.query), {'includeHistoricSprints': ['false'], 'includeFutureSprints': ['true']})
self.assertEqual(pmr.H.history[1].url, "https://unified.jira.com/rest/greenhopper/latest/rapid/charts/sprintreport")
self.assertEqual(pmr.H.history[2].url, "https://unified.jira.com/rest/greenhopper/latest/rapidviews/list")
def test_history(self):
called = [False, False, False, False]
i = [0]
def middleware(run, kwargs, env):
called[i[0]] = True
i[0] += 1
middlewares = pmr.O(sprints_sprint=middleware,
sprints_sprint_issues=middleware,
sprints_rapidview=middleware)
self.collection["sprints"]["sprint"](env=self.env, middlewares=middlewares)
self.collection["sprints"]["sprint_issues"](env=self.env, middlewares=middlewares)
self.collection["sprints"]["rapidview"](env=self.env, middlewares=middlewares)
self.assertEqual(len(pmr.H.history), 3)
pmr.H(0)
self.assertTrue(all(called))
def test_help(self):
expect = """Sprints / Sprint:
GET https://unified.jira.com/rest/greenhopper/latest/sprintquery/{{rapidViewId}}?includeHistoricSprints=true&includeFutureSprints=true
Get a specific sprint from Jira
Default Headers:
Authorization: Basic am9objphbkdlbDgz
Default Data:
{
"username": "{{username}}",
"password": "{{password}}"
}"""
self.assertEqual(expect, self.collection["sprints"]["sprint"].__doc__)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/python
"""
Tests for postman repl
"""
import unittest
import urllib
import postman_repl as pmr
import json
class TestO(unittest.TestCase):
def test_init(self):
test = pmr.O(x=1, y=2)
self.assertDictEqual(test.__dict__, {'x': 1, 'y': 2})
def test_get(self):
test = pmr.O(x=1, y=2)
self.assertEqual(test["x"], 1)
self.assertEqual(test.x, 1)
def test_set(self):
test = pmr.O(x=1, y=2)
test.x = 2
self.assertEqual(test["x"], 2)
self.assertEqual(test.x, 2)
def test_del(self):
test = pmr.O(x=1, y=2)
del test.x
self.assertEqual(test.x, None)
self.assertDictEqual(test.__dict__, {'y': 2})
def test_iter(self):
test = pmr.O(x=1, y=2)
for k in test:
self.assertTrue(k == "x" or k == "y")
self.assertTrue(test[k] == 1 or test[k] == 2)
def test_todict(self):
test = pmr.O(x=1, y=2)
self.assertDictEqual(test.__dict__, {'x': 1, 'y': 2})
def test_todict_recursive(self):
test = pmr.O(x=1, y=2, z=pmr.O(x=1, y=2))
self.assertDictEqual(test._to_dict_recursive(),
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}})
def test_tojson(self):
test = pmr.O(x=1, y=2, z=pmr.O(x=1, y=2))
self.assertDictEqual(json.loads(test._to_json()),
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}})
def test_new_recursive(self):
expect = pmr.O(x=1, y=2, z=pmr.O(x=1, y=2))
test = {'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}}
test = pmr.new_recursive(**test)
self.assertEqual(test._to_dict_recursive(), expect._to_dict_recursive())
def test_new_recursive_list(self):
expect = [
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}},
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}}]
test = [
pmr.O(
x=1, y=2, z=pmr.O(
x=1, y=2)), pmr.O(
x=1, y=2, z=pmr.O(
x=1, y=2))]
test = pmr.new_recursive_list(*test)
self.assertListEqual([x._to_dict_recursive() for x in test], expect)
class TestPostmanRepl(unittest.TestCase):
def setUp(self):
self.coll_file = "../examples/JIRA.json.postman_collection"
self.env_file = "../examples/test.env"
self.mw_file = "../examples/middleware.py"
self.collection = pmr.load_collection(self.coll_file)
self.env = pmr.load_environment(self.env_file)
self.mw = pmr.load_middleware(self.mw_file)
def tearDown(self):
pmr.H.history = []
pmr.R = None
pmr.J = None
pmr.D = None
pmr.MW = pmr.O()
pmr.E = pmr.O()
pmr.P = None
def test_load_collection(self):
self.assertTrue("sprints" in self.collection)
self.assertTrue("META" in self.collection["sprints"])
self.assertTrue("rapidview" in self.collection["sprints"])
self.assertTrue("sprint" in self.collection["sprints"])
self.assertTrue("sprint_issues" in self.collection["sprints"])
self.assertTrue("users" in self.collection)
self.assertTrue("META" in self.collection["users"])
self.assertTrue("search_username" in self.collection["users"])
def test_load_environment(self):
self.assertDictEqual(self.env._to_dict_recursive(), {
"host": "localhost",
"protocol": "http",
"port": "8081",
"username": "user",
"password": "password",
})
def test_middleware(self):
called = [False]
def middleware(run, kwargs, env):
called[0] = True
middlewares = pmr.O(sprints_sprint=middleware)
self.collection["sprints"]["sprint"](env=self.env, middlewares=middlewares)
self.assertTrue(called[0])
def test_get_default_data(self):
expect = {'password': '', 'username': ''}
test = self.collection["sprints"]["sprint"].default_data().__dict__
self.assertDictEqual(expect, test)
def test_call(self):
called = [False, False, False]
i = [0]
def middleware(run, kwargs, env):
called[i[0]] = True
i[0] += 1
middlewares = pmr.O(sprints_sprint=middleware,
sprints_sprint_issues=middleware,
sprints_rapidview=middleware)
self.collection["sprints"]["sprint"](params={'includeHistoricSprints': 'false'}, env=self.env, middlewares=middlewares)
self.collection["sprints"]["sprint_issues"](env=self.env, middlewares=middlewares)
self.collection["sprints"]["rapidview"](env=self.env, middlewares=middlewares)
self.assertTrue(all(called))
url = urllib.parse.urlparse(pmr.H.history[0].url)
self.assertEqual(url.path, "/rest/greenhopper/latest/sprintquery/")
self.assertDictEqual(urllib.parse.parse_qs(url.query), {'includeHistoricSprints': ['false'], 'includeFutureSprints': ['true']})
self.assertEqual(pmr.H.history[1].url, "https://unified.jira.com/rest/greenhopper/latest/rapid/charts/sprintreport")
self.assertEqual(pmr.H.history[2].url, "https://unified.jira.com/rest/greenhopper/latest/rapidviews/list")
def test_history(self):
called = [False, False, False, False]
i = [0]
def middleware(run, kwargs, env):
called[i[0]] = True
i[0] += 1
middlewares = pmr.O(sprints_sprint=middleware,
sprints_sprint_issues=middleware,
sprints_rapidview=middleware)
self.collection["sprints"]["sprint"](env=self.env, middlewares=middlewares)
self.collection["sprints"]["sprint_issues"](env=self.env, middlewares=middlewares)
self.collection["sprints"]["rapidview"](env=self.env, middlewares=middlewares)
self.assertEqual(len(pmr.H.history), 3)
pmr.H(0)
self.assertTrue(all(called))
def test_help(self):
expect = """Sprints / Sprint:
GET https://unified.jira.com/rest/greenhopper/latest/sprintquery/{{rapidViewId}}?includeHistoricSprints=true&includeFutureSprints=true
Get a specific sprint from Jira
Default Headers:
Authorization: Basic am9objphbkdlbDgz
Default Data:
{
"username": "{{username}}",
"password": "{{password}}"
}"""
self.assertEqual(expect, self.collection["sprints"]["sprint"].__doc__)
if __name__ == '__main__':
unittest.main()
| en | 0.634399 | #!/usr/bin/python Tests for postman repl Sprints / Sprint: GET https://unified.jira.com/rest/greenhopper/latest/sprintquery/{{rapidViewId}}?includeHistoricSprints=true&includeFutureSprints=true Get a specific sprint from Jira Default Headers: Authorization: Basic am9objphbkdlbDgz Default Data: { "username": "{{username}}", "password": "{{password}}" } | 3.067402 | 3 |
src/LPB/LPBGenerator.py | CptSpookz/LPB | 1 | 6614975 | <filename>src/LPB/LPBGenerator.py
import json
class ImageGenerator:
def __init__(self, symbols, path):
self.filename = path
self.symbols = json.loads(symbols)
def generate(self):
self.generate_js()
self.generate_html()
def generate_js(self):
path = self.filename + ".js"
andares = f"let andares = {self.symbols['imovel']['symbols']['n_andares']};\n"
blocos = f"let blocos = {self.symbols['imovel']['symbols']['n_blocos']};\n"
content = """function setup() {
width = windowWidth;
height = windowHeight;
canvas = createCanvas(width, height);
canvas.position(0, 0);
canvas.style('z-index', '-1');
}
function draw() {
let y = 100;
let dy = 30;
let larguraBloco = 500;
let alturaBloco = 500;
for(let i = 0; i < andares; i++){
let x = 100;
for(let j = 0; j < blocos; j++){
let x_ = x;
// desenha
rect(x, y, larguraBloco, alturaBloco);
x += larguraBloco;
}
y += alturaBloco + dy;
}
}\n"""
content = f"{andares}{blocos}{content}"
with open(path, "w") as f:
f.write(content)
def generate_html(self):
path = "index.html"
content = """<!DOCTYPE html>
<html>
<head>
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.7.2/p5.min.js"> </script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.7.2/addons/p5.dom.min.js"> </script>
<script src="{}.js"> </script>
</head>
<body>
</body>
</html>""".format(self.filename)
with open(path, "w") as f:
f.write(content)
| <filename>src/LPB/LPBGenerator.py
import json
class ImageGenerator:
def __init__(self, symbols, path):
self.filename = path
self.symbols = json.loads(symbols)
def generate(self):
self.generate_js()
self.generate_html()
def generate_js(self):
path = self.filename + ".js"
andares = f"let andares = {self.symbols['imovel']['symbols']['n_andares']};\n"
blocos = f"let blocos = {self.symbols['imovel']['symbols']['n_blocos']};\n"
content = """function setup() {
width = windowWidth;
height = windowHeight;
canvas = createCanvas(width, height);
canvas.position(0, 0);
canvas.style('z-index', '-1');
}
function draw() {
let y = 100;
let dy = 30;
let larguraBloco = 500;
let alturaBloco = 500;
for(let i = 0; i < andares; i++){
let x = 100;
for(let j = 0; j < blocos; j++){
let x_ = x;
// desenha
rect(x, y, larguraBloco, alturaBloco);
x += larguraBloco;
}
y += alturaBloco + dy;
}
}\n"""
content = f"{andares}{blocos}{content}"
with open(path, "w") as f:
f.write(content)
def generate_html(self):
path = "index.html"
content = """<!DOCTYPE html>
<html>
<head>
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.7.2/p5.min.js"> </script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.7.2/addons/p5.dom.min.js"> </script>
<script src="{}.js"> </script>
</head>
<body>
</body>
</html>""".format(self.filename)
with open(path, "w") as f:
f.write(content)
| en | 0.299449 | function setup() { width = windowWidth; height = windowHeight; canvas = createCanvas(width, height); canvas.position(0, 0); canvas.style('z-index', '-1'); } function draw() { let y = 100; let dy = 30; let larguraBloco = 500; let alturaBloco = 500; for(let i = 0; i < andares; i++){ let x = 100; for(let j = 0; j < blocos; j++){ let x_ = x; // desenha rect(x, y, larguraBloco, alturaBloco); x += larguraBloco; } y += alturaBloco + dy; } }\n <!DOCTYPE html> <html> <head> <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.7.2/p5.min.js"> </script> <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.7.2/addons/p5.dom.min.js"> </script> <script src="{}.js"> </script> </head> <body> </body> </html> | 2.832745 | 3 |
sdk/python/pulumi_exoscale/secondary_ip_address.py | secustor/pulumi-exoscale | 0 | 6614976 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['SecondaryIPAddressArgs', 'SecondaryIPAddress']
@pulumi.input_type
class SecondaryIPAddressArgs:
def __init__(__self__, *,
compute_id: pulumi.Input[str],
ip_address: pulumi.Input[str]):
"""
The set of arguments for constructing a SecondaryIPAddress resource.
:param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute].
:param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign.
"""
pulumi.set(__self__, "compute_id", compute_id)
pulumi.set(__self__, "ip_address", ip_address)
@property
@pulumi.getter(name="computeId")
def compute_id(self) -> pulumi.Input[str]:
"""
The ID of the [Compute instance][r-compute].
"""
return pulumi.get(self, "compute_id")
@compute_id.setter
def compute_id(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Input[str]:
"""
The [Elastic IP][r-ipaddress] address to assign.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: pulumi.Input[str]):
pulumi.set(self, "ip_address", value)
@pulumi.input_type
class _SecondaryIPAddressState:
def __init__(__self__, *,
compute_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
nic_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SecondaryIPAddress resources.
:param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute].
:param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign.
:param pulumi.Input[str] network_id: The ID of the Network the Compute instance NIC is attached to.
:param pulumi.Input[str] nic_id: The ID of the NIC.
"""
if compute_id is not None:
pulumi.set(__self__, "compute_id", compute_id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if network_id is not None:
pulumi.set(__self__, "network_id", network_id)
if nic_id is not None:
pulumi.set(__self__, "nic_id", nic_id)
@property
@pulumi.getter(name="computeId")
def compute_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the [Compute instance][r-compute].
"""
return pulumi.get(self, "compute_id")
@compute_id.setter
def compute_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The [Elastic IP][r-ipaddress] address to assign.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="networkId")
def network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Network the Compute instance NIC is attached to.
"""
return pulumi.get(self, "network_id")
@network_id.setter
def network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_id", value)
@property
@pulumi.getter(name="nicId")
def nic_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the NIC.
"""
return pulumi.get(self, "nic_id")
@nic_id.setter
def nic_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nic_id", value)
class SecondaryIPAddress(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource for assigning an existing Exoscale [Elastic IP][r-ipaddress] to a [Compute instance][r-compute].
> **NOTE:** The network interfaces of the Compute instance itself still have to be configured accordingly (unless using a *managed* Elastic IP).
!> **WARNING:** This resource is deprecated and will be removed in the next major version.
## Import
This resource is automatically imported when importing an `exoscale_compute` resource. [r-compute]compute.html [r-ipaddress]ipaddress.html
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute].
:param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecondaryIPAddressArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource for assigning an existing Exoscale [Elastic IP][r-ipaddress] to a [Compute instance][r-compute].
> **NOTE:** The network interfaces of the Compute instance itself still have to be configured accordingly (unless using a *managed* Elastic IP).
!> **WARNING:** This resource is deprecated and will be removed in the next major version.
## Import
This resource is automatically imported when importing an `exoscale_compute` resource. [r-compute]compute.html [r-ipaddress]ipaddress.html
:param str resource_name: The name of the resource.
:param SecondaryIPAddressArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecondaryIPAddressArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecondaryIPAddressArgs.__new__(SecondaryIPAddressArgs)
if compute_id is None and not opts.urn:
raise TypeError("Missing required property 'compute_id'")
__props__.__dict__["compute_id"] = compute_id
if ip_address is None and not opts.urn:
raise TypeError("Missing required property 'ip_address'")
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["network_id"] = None
__props__.__dict__["nic_id"] = None
super(SecondaryIPAddress, __self__).__init__(
'exoscale:index/secondaryIPAddress:SecondaryIPAddress',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
compute_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
nic_id: Optional[pulumi.Input[str]] = None) -> 'SecondaryIPAddress':
"""
Get an existing SecondaryIPAddress resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute].
:param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign.
:param pulumi.Input[str] network_id: The ID of the Network the Compute instance NIC is attached to.
:param pulumi.Input[str] nic_id: The ID of the NIC.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecondaryIPAddressState.__new__(_SecondaryIPAddressState)
__props__.__dict__["compute_id"] = compute_id
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["network_id"] = network_id
__props__.__dict__["nic_id"] = nic_id
return SecondaryIPAddress(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="computeId")
def compute_id(self) -> pulumi.Output[str]:
"""
The ID of the [Compute instance][r-compute].
"""
return pulumi.get(self, "compute_id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[str]:
"""
The [Elastic IP][r-ipaddress] address to assign.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="networkId")
def network_id(self) -> pulumi.Output[str]:
"""
The ID of the Network the Compute instance NIC is attached to.
"""
return pulumi.get(self, "network_id")
@property
@pulumi.getter(name="nicId")
def nic_id(self) -> pulumi.Output[str]:
"""
The ID of the NIC.
"""
return pulumi.get(self, "nic_id")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['SecondaryIPAddressArgs', 'SecondaryIPAddress']
@pulumi.input_type
class SecondaryIPAddressArgs:
def __init__(__self__, *,
compute_id: pulumi.Input[str],
ip_address: pulumi.Input[str]):
"""
The set of arguments for constructing a SecondaryIPAddress resource.
:param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute].
:param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign.
"""
pulumi.set(__self__, "compute_id", compute_id)
pulumi.set(__self__, "ip_address", ip_address)
@property
@pulumi.getter(name="computeId")
def compute_id(self) -> pulumi.Input[str]:
"""
The ID of the [Compute instance][r-compute].
"""
return pulumi.get(self, "compute_id")
@compute_id.setter
def compute_id(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Input[str]:
"""
The [Elastic IP][r-ipaddress] address to assign.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: pulumi.Input[str]):
pulumi.set(self, "ip_address", value)
@pulumi.input_type
class _SecondaryIPAddressState:
def __init__(__self__, *,
compute_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
nic_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SecondaryIPAddress resources.
:param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute].
:param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign.
:param pulumi.Input[str] network_id: The ID of the Network the Compute instance NIC is attached to.
:param pulumi.Input[str] nic_id: The ID of the NIC.
"""
if compute_id is not None:
pulumi.set(__self__, "compute_id", compute_id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if network_id is not None:
pulumi.set(__self__, "network_id", network_id)
if nic_id is not None:
pulumi.set(__self__, "nic_id", nic_id)
@property
@pulumi.getter(name="computeId")
def compute_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the [Compute instance][r-compute].
"""
return pulumi.get(self, "compute_id")
@compute_id.setter
def compute_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compute_id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The [Elastic IP][r-ipaddress] address to assign.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="networkId")
def network_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Network the Compute instance NIC is attached to.
"""
return pulumi.get(self, "network_id")
@network_id.setter
def network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_id", value)
@property
@pulumi.getter(name="nicId")
def nic_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the NIC.
"""
return pulumi.get(self, "nic_id")
@nic_id.setter
def nic_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nic_id", value)
class SecondaryIPAddress(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource for assigning an existing Exoscale [Elastic IP][r-ipaddress] to a [Compute instance][r-compute].
> **NOTE:** The network interfaces of the Compute instance itself still have to be configured accordingly (unless using a *managed* Elastic IP).
!> **WARNING:** This resource is deprecated and will be removed in the next major version.
## Import
This resource is automatically imported when importing an `exoscale_compute` resource. [r-compute]compute.html [r-ipaddress]ipaddress.html
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute].
:param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecondaryIPAddressArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource for assigning an existing Exoscale [Elastic IP][r-ipaddress] to a [Compute instance][r-compute].
> **NOTE:** The network interfaces of the Compute instance itself still have to be configured accordingly (unless using a *managed* Elastic IP).
!> **WARNING:** This resource is deprecated and will be removed in the next major version.
## Import
This resource is automatically imported when importing an `exoscale_compute` resource. [r-compute]compute.html [r-ipaddress]ipaddress.html
:param str resource_name: The name of the resource.
:param SecondaryIPAddressArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecondaryIPAddressArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecondaryIPAddressArgs.__new__(SecondaryIPAddressArgs)
if compute_id is None and not opts.urn:
raise TypeError("Missing required property 'compute_id'")
__props__.__dict__["compute_id"] = compute_id
if ip_address is None and not opts.urn:
raise TypeError("Missing required property 'ip_address'")
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["network_id"] = None
__props__.__dict__["nic_id"] = None
super(SecondaryIPAddress, __self__).__init__(
'exoscale:index/secondaryIPAddress:SecondaryIPAddress',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
compute_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
nic_id: Optional[pulumi.Input[str]] = None) -> 'SecondaryIPAddress':
"""
Get an existing SecondaryIPAddress resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute].
:param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign.
:param pulumi.Input[str] network_id: The ID of the Network the Compute instance NIC is attached to.
:param pulumi.Input[str] nic_id: The ID of the NIC.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecondaryIPAddressState.__new__(_SecondaryIPAddressState)
__props__.__dict__["compute_id"] = compute_id
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["network_id"] = network_id
__props__.__dict__["nic_id"] = nic_id
return SecondaryIPAddress(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="computeId")
def compute_id(self) -> pulumi.Output[str]:
"""
The ID of the [Compute instance][r-compute].
"""
return pulumi.get(self, "compute_id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[str]:
"""
The [Elastic IP][r-ipaddress] address to assign.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="networkId")
def network_id(self) -> pulumi.Output[str]:
"""
The ID of the Network the Compute instance NIC is attached to.
"""
return pulumi.get(self, "network_id")
@property
@pulumi.getter(name="nicId")
def nic_id(self) -> pulumi.Output[str]:
"""
The ID of the NIC.
"""
return pulumi.get(self, "nic_id")
| en | 0.712967 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a SecondaryIPAddress resource. :param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute]. :param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign. The ID of the [Compute instance][r-compute]. The [Elastic IP][r-ipaddress] address to assign. Input properties used for looking up and filtering SecondaryIPAddress resources. :param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute]. :param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign. :param pulumi.Input[str] network_id: The ID of the Network the Compute instance NIC is attached to. :param pulumi.Input[str] nic_id: The ID of the NIC. The ID of the [Compute instance][r-compute]. The [Elastic IP][r-ipaddress] address to assign. The ID of the Network the Compute instance NIC is attached to. The ID of the NIC. Provides a resource for assigning an existing Exoscale [Elastic IP][r-ipaddress] to a [Compute instance][r-compute]. > **NOTE:** The network interfaces of the Compute instance itself still have to be configured accordingly (unless using a *managed* Elastic IP). !> **WARNING:** This resource is deprecated and will be removed in the next major version. ## Import This resource is automatically imported when importing an `exoscale_compute` resource. [r-compute]compute.html [r-ipaddress]ipaddress.html :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute]. :param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign. Provides a resource for assigning an existing Exoscale [Elastic IP][r-ipaddress] to a [Compute instance][r-compute]. > **NOTE:** The network interfaces of the Compute instance itself still have to be configured accordingly (unless using a *managed* Elastic IP). !> **WARNING:** This resource is deprecated and will be removed in the next major version. ## Import This resource is automatically imported when importing an `exoscale_compute` resource. [r-compute]compute.html [r-ipaddress]ipaddress.html :param str resource_name: The name of the resource. :param SecondaryIPAddressArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing SecondaryIPAddress resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] compute_id: The ID of the [Compute instance][r-compute]. :param pulumi.Input[str] ip_address: The [Elastic IP][r-ipaddress] address to assign. :param pulumi.Input[str] network_id: The ID of the Network the Compute instance NIC is attached to. :param pulumi.Input[str] nic_id: The ID of the NIC. The ID of the [Compute instance][r-compute]. The [Elastic IP][r-ipaddress] address to assign. The ID of the Network the Compute instance NIC is attached to. The ID of the NIC. | 2.096106 | 2 |
plotly/tests/test_optional/test_jupyter/test_jupyter.py | SamLau95/plotly.py | 6 | 6614977 | """
test__jupyter
"""
import nbformat
from nbconvert import HTMLExporter
from nbconvert.preprocessors import ExecutePreprocessor
from ipykernel import kernelspec
from unittest import TestCase
from os import path
import subprocess
PATH_ROOT = path.dirname(__file__)
PATH_NODE_MODULES = path.join(PATH_ROOT, 'node_modules')
PATH_FIXTURES = path.join(PATH_ROOT, 'fixtures')
PATH_JS_TESTS = path.join(PATH_ROOT, 'js_tests')
class PlotlyJupyterTestDeps(TestCase):
def test_node_modules(self):
self.assertTrue(path.isdir(PATH_NODE_MODULES))
class Common(TestCase):
__test__ = False
name = None
def setUp(self):
self.path_test_nb = path.join(PATH_FIXTURES, self.name + '.ipynb')
self.path_test_html = path.join(PATH_FIXTURES, self.name + '.html')
self.path_test_js = path.join(PATH_JS_TESTS, self.name + '.js')
self.kernel_name = kernelspec.KERNEL_NAME
with open(self.path_test_nb, 'r') as f:
self.nb = nbformat.read(f, as_version=4)
self.ep = ExecutePreprocessor(timeout=600,
kernel_name=self.kernel_name)
self.html_exporter = HTMLExporter()
self.ep.preprocess(self.nb, {'metadata': {'path': '.'}})
(self.body, _) = self.html_exporter.from_notebook_node(self.nb)
with open(self.path_test_html, 'w') as f:
f.write(self.body)
def test_js(self):
cmd = ['npm', 'test', '--', self.path_test_html, self.path_test_js]
proc = subprocess.Popen(cmd,
cwd=PATH_ROOT,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(_, stderr) = proc.communicate()
if stderr:
self.fail('One or more javascript test failed')
class PlotlyJupyterConnectedFalseTestCase(Common):
__test__ = True
name = 'connected_false'
class PlotlyJupyterConnectedTrueTestCase(Common):
__test__ = True
name = 'connected_true'
| """
test__jupyter
"""
import nbformat
from nbconvert import HTMLExporter
from nbconvert.preprocessors import ExecutePreprocessor
from ipykernel import kernelspec
from unittest import TestCase
from os import path
import subprocess
PATH_ROOT = path.dirname(__file__)
PATH_NODE_MODULES = path.join(PATH_ROOT, 'node_modules')
PATH_FIXTURES = path.join(PATH_ROOT, 'fixtures')
PATH_JS_TESTS = path.join(PATH_ROOT, 'js_tests')
class PlotlyJupyterTestDeps(TestCase):
def test_node_modules(self):
self.assertTrue(path.isdir(PATH_NODE_MODULES))
class Common(TestCase):
__test__ = False
name = None
def setUp(self):
self.path_test_nb = path.join(PATH_FIXTURES, self.name + '.ipynb')
self.path_test_html = path.join(PATH_FIXTURES, self.name + '.html')
self.path_test_js = path.join(PATH_JS_TESTS, self.name + '.js')
self.kernel_name = kernelspec.KERNEL_NAME
with open(self.path_test_nb, 'r') as f:
self.nb = nbformat.read(f, as_version=4)
self.ep = ExecutePreprocessor(timeout=600,
kernel_name=self.kernel_name)
self.html_exporter = HTMLExporter()
self.ep.preprocess(self.nb, {'metadata': {'path': '.'}})
(self.body, _) = self.html_exporter.from_notebook_node(self.nb)
with open(self.path_test_html, 'w') as f:
f.write(self.body)
def test_js(self):
cmd = ['npm', 'test', '--', self.path_test_html, self.path_test_js]
proc = subprocess.Popen(cmd,
cwd=PATH_ROOT,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(_, stderr) = proc.communicate()
if stderr:
self.fail('One or more javascript test failed')
class PlotlyJupyterConnectedFalseTestCase(Common):
__test__ = True
name = 'connected_false'
class PlotlyJupyterConnectedTrueTestCase(Common):
__test__ = True
name = 'connected_true'
| en | 0.354582 | test__jupyter | 2.123684 | 2 |
python.d/filebeat.chart.py | srozb/netdata-plugins | 0 | 6614978 | <reponame>srozb/netdata-plugins
# -*- coding: utf-8 -*-
# Description: Filebeat statistics
# Author: srozb
# SPDX-License-Identifier: MIT License
import os
import json
from bases.FrameworkServices.LogService import LogService
# default module values
# update_every = 4
priority = 90000
retries = 60
ORDER = ['bytes', 'events']
CHARTS = {
'bytes': {
'options': [None, 'bytes', 'bytes', 'output', 'output', 'line'],
'lines': [
['read', 'Bytes read', 'absolute', 1, 1],
['write', 'Bytes write', 'absolute', 1, 1],
]
},
'events': {
'options': [None, 'events', 'events', 'output', 'output', 'line'],
'lines': [
['acked', 'events', 'absolute', 1, 1]
]
}
}
class Service(LogService):
def __init__(self, configuration=None, name=None):
LogService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
self.log_path = self.configuration.get('log_path', '/var/log/filebeat/filebeat')
self.data = dict()
def check(self):
if not os.access(self.log_path, os.R_OK):
self.error('{0} is not readable'.format(self.log_path))
return False
return True
def get_data(self):
raw = self._get_raw_data()
if not raw:
return None if raw is None else self.data
for row in raw:
try:
match = json.loads('{'+"{".join(row.split('{')[1:]))
self.data['read'] = match['monitoring']['metrics']['libbeat']['output']['read']['bytes']
self.data['write'] = match['monitoring']['metrics']['libbeat']['output']['write']['bytes']
self.data['acked'] = match['monitoring']['metrics']['libbeat']['output']['events']['acked']
except:
pass
return self.data
| # -*- coding: utf-8 -*-
# Description: Filebeat statistics
# Author: srozb
# SPDX-License-Identifier: MIT License
import os
import json
from bases.FrameworkServices.LogService import LogService
# default module values
# update_every = 4
priority = 90000
retries = 60
ORDER = ['bytes', 'events']
CHARTS = {
'bytes': {
'options': [None, 'bytes', 'bytes', 'output', 'output', 'line'],
'lines': [
['read', 'Bytes read', 'absolute', 1, 1],
['write', 'Bytes write', 'absolute', 1, 1],
]
},
'events': {
'options': [None, 'events', 'events', 'output', 'output', 'line'],
'lines': [
['acked', 'events', 'absolute', 1, 1]
]
}
}
class Service(LogService):
def __init__(self, configuration=None, name=None):
LogService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
self.log_path = self.configuration.get('log_path', '/var/log/filebeat/filebeat')
self.data = dict()
def check(self):
if not os.access(self.log_path, os.R_OK):
self.error('{0} is not readable'.format(self.log_path))
return False
return True
def get_data(self):
raw = self._get_raw_data()
if not raw:
return None if raw is None else self.data
for row in raw:
try:
match = json.loads('{'+"{".join(row.split('{')[1:]))
self.data['read'] = match['monitoring']['metrics']['libbeat']['output']['read']['bytes']
self.data['write'] = match['monitoring']['metrics']['libbeat']['output']['write']['bytes']
self.data['acked'] = match['monitoring']['metrics']['libbeat']['output']['events']['acked']
except:
pass
return self.data | en | 0.31237 | # -*- coding: utf-8 -*- # Description: Filebeat statistics # Author: srozb # SPDX-License-Identifier: MIT License # default module values # update_every = 4 | 2.078454 | 2 |
src/zope/testrunner/tests/testrunner-ex/usecompiled/package/__init__.py | jamesjer/zope.testrunner | 1 | 6614979 | <filename>src/zope/testrunner/tests/testrunner-ex/usecompiled/package/__init__.py
# Makes this a package.
| <filename>src/zope/testrunner/tests/testrunner-ex/usecompiled/package/__init__.py
# Makes this a package.
| en | 0.70121 | # Makes this a package. | 1.084877 | 1 |
file_1.py | dina-deifallah/git_colab | 0 | 6614980 | <gh_stars>0
var1 = 10
var2 = 20
print("the result is = {}".format(var1+var2)
| var1 = 10
var2 = 20
print("the result is = {}".format(var1+var2) | none | 1 | 3.530192 | 4 | |
hummingbird/pipeline/pipeline.py | richardycao/hummingbird_python | 0 | 6614981 | import os
from pathlib import Path
class Pipeline(object):
def __init__(self, id, modules=[]):
"""
modules: list of PipelineNode
"""
self.id = id
self.modules = modules
self.tab_size = 2
def __tabs(self, count):
tab = ''.join([' ' for _ in range(self.tab_size)])
return ''.join([tab for _ in range(count)])
def parse_params(self, io, params, path=''):
for key, value in params.items():
if isinstance(value, dict):
self.parse_params(io, value, path=key+'/')
else:
io.write(" --" + path + str(key) + " " + value)
def build(self):
"""
Create the docker files.
- This is called from anywhere, as long as the path to each of the containers
is provided.
- The 'run' command in each Dockerfile should set its arguments based on arguments passed
into this Pipeline object.
What info is needed about the paths?
- the relative path from the root directory is need for docker-compose.yml
- the main python file to run
- the name of the container - should be the directory name, which is included
in the path
"""
queue_count = 0
for module in self.modules:
path = Path(module.module_path)
with open(str(path.parent) + "/Dockerfile", 'w') as f:
f.write("FROM ubuntu:latest\n")
f.write("\n")
f.write("RUN apt-get update\n")
f.write("RUN apt-get install -y python3.7 python3-pip python3-dev\n")
f.write("RUN apt-get install -y git\n")
f.write("RUN pip3 install --upgrade pip\n")
f.write("\n")
f.write("WORKDIR /usr/src/app\n")
f.write("COPY . .\n")
f.write("RUN pip3 install -r requirements.txt\n")
# I'm not sure why it isn't upgrading. uninstall -> reinstall is the temporary fix
f.write("RUN pip3 uninstall hummingbird\n")
f.write("RUN pip3 install --upgrade git+https://github.com/richardycao/hummingbird_python.git#egg=hummingbird\n")
f.write("\n")
f.write("CMD python3 " + path.name)
# Generating kafka I/O params and writing them to the python command
f.write(" --topics-in " + str(self.id) + "-" + str(queue_count))
f.write(" --topics-out " + str(self.id) + "-" + str(queue_count + 1))
# Writing other params to the python command
params = module.params
for key, value in params.items():
f.write(" --" + key + " " + value)
f.write("\n")
#self.parse_params(f, params)
queue_count += 1
"""
Create the docker-compose file for the pipeline
"""
with open('./docker-compose-' + str(self.id) + '.yml', 'w') as f:
f.write("version: '3.7'\n")
f.write("\n")
f.write("services:\n")
dependencies = []
for module in reversed(self.modules):
path = Path(module.module_path)
label = path.parent.name
f.write(self.__tabs(1) + label + ":\n")
f.write(self.__tabs(2) + "build: " + str(path.parent) + "\n")
f.write(self.__tabs(2) + "container_name: " + label + "\n")
f.write(self.__tabs(2) + "environment:\n")
f.write(self.__tabs(3) + "- \"PYTHONUNBUFFERED=1\"\n")
if len(dependencies) > 0:
f.write(self.__tabs(2) + "depends_on:\n")
# Very temporary way to set dependencies
for dep in dependencies:
f.write(self.__tabs(3) + "- " + dep + "\n")
dependencies.append(label)
"""
Build the docker-compose files
"""
# Build the docker compose for Kafka
os.system('docker-compose -f docker-compose-kafka.yml build')
# Build the docker compose for the pipeline
os.system('docker-compose -f docker-compose-' + str(self.id) + '.yml build')
def run(self):
# Maybe this part should be done manually. Leave it blank for now.
# Run Kafka docker
# Wait
# Run pipeline docker
pass
| import os
from pathlib import Path
class Pipeline(object):
def __init__(self, id, modules=[]):
"""
modules: list of PipelineNode
"""
self.id = id
self.modules = modules
self.tab_size = 2
def __tabs(self, count):
tab = ''.join([' ' for _ in range(self.tab_size)])
return ''.join([tab for _ in range(count)])
def parse_params(self, io, params, path=''):
for key, value in params.items():
if isinstance(value, dict):
self.parse_params(io, value, path=key+'/')
else:
io.write(" --" + path + str(key) + " " + value)
def build(self):
"""
Create the docker files.
- This is called from anywhere, as long as the path to each of the containers
is provided.
- The 'run' command in each Dockerfile should set its arguments based on arguments passed
into this Pipeline object.
What info is needed about the paths?
- the relative path from the root directory is need for docker-compose.yml
- the main python file to run
- the name of the container - should be the directory name, which is included
in the path
"""
queue_count = 0
for module in self.modules:
path = Path(module.module_path)
with open(str(path.parent) + "/Dockerfile", 'w') as f:
f.write("FROM ubuntu:latest\n")
f.write("\n")
f.write("RUN apt-get update\n")
f.write("RUN apt-get install -y python3.7 python3-pip python3-dev\n")
f.write("RUN apt-get install -y git\n")
f.write("RUN pip3 install --upgrade pip\n")
f.write("\n")
f.write("WORKDIR /usr/src/app\n")
f.write("COPY . .\n")
f.write("RUN pip3 install -r requirements.txt\n")
# I'm not sure why it isn't upgrading. uninstall -> reinstall is the temporary fix
f.write("RUN pip3 uninstall hummingbird\n")
f.write("RUN pip3 install --upgrade git+https://github.com/richardycao/hummingbird_python.git#egg=hummingbird\n")
f.write("\n")
f.write("CMD python3 " + path.name)
# Generating kafka I/O params and writing them to the python command
f.write(" --topics-in " + str(self.id) + "-" + str(queue_count))
f.write(" --topics-out " + str(self.id) + "-" + str(queue_count + 1))
# Writing other params to the python command
params = module.params
for key, value in params.items():
f.write(" --" + key + " " + value)
f.write("\n")
#self.parse_params(f, params)
queue_count += 1
"""
Create the docker-compose file for the pipeline
"""
with open('./docker-compose-' + str(self.id) + '.yml', 'w') as f:
f.write("version: '3.7'\n")
f.write("\n")
f.write("services:\n")
dependencies = []
for module in reversed(self.modules):
path = Path(module.module_path)
label = path.parent.name
f.write(self.__tabs(1) + label + ":\n")
f.write(self.__tabs(2) + "build: " + str(path.parent) + "\n")
f.write(self.__tabs(2) + "container_name: " + label + "\n")
f.write(self.__tabs(2) + "environment:\n")
f.write(self.__tabs(3) + "- \"PYTHONUNBUFFERED=1\"\n")
if len(dependencies) > 0:
f.write(self.__tabs(2) + "depends_on:\n")
# Very temporary way to set dependencies
for dep in dependencies:
f.write(self.__tabs(3) + "- " + dep + "\n")
dependencies.append(label)
"""
Build the docker-compose files
"""
# Build the docker compose for Kafka
os.system('docker-compose -f docker-compose-kafka.yml build')
# Build the docker compose for the pipeline
os.system('docker-compose -f docker-compose-' + str(self.id) + '.yml build')
def run(self):
# Maybe this part should be done manually. Leave it blank for now.
# Run Kafka docker
# Wait
# Run pipeline docker
pass
| en | 0.889428 | modules: list of PipelineNode Create the docker files. - This is called from anywhere, as long as the path to each of the containers is provided. - The 'run' command in each Dockerfile should set its arguments based on arguments passed into this Pipeline object. What info is needed about the paths? - the relative path from the root directory is need for docker-compose.yml - the main python file to run - the name of the container - should be the directory name, which is included in the path # I'm not sure why it isn't upgrading. uninstall -> reinstall is the temporary fix #egg=hummingbird\n") # Generating kafka I/O params and writing them to the python command # Writing other params to the python command #self.parse_params(f, params) Create the docker-compose file for the pipeline # Very temporary way to set dependencies Build the docker-compose files # Build the docker compose for Kafka # Build the docker compose for the pipeline # Maybe this part should be done manually. Leave it blank for now. # Run Kafka docker # Wait # Run pipeline docker | 2.781803 | 3 |
alembic/versions/f163a00a02aa_remove_filters.py | KasperHelsted/pajbot | 0 | 6614982 | """remove_filters
Revision ID: f163a00a02aa
Revises: 46d07701a90b
Create Date: 2019-05-30 23:20:40.098349
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '4<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tb_filters')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tb_filters',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('name', mysql.VARCHAR(length=128), nullable=True),
sa.Column('type', mysql.VARCHAR(length=64), nullable=True),
sa.Column('action', mysql.TEXT(), nullable=True),
sa.Column('extra_args', mysql.TEXT(), nullable=True),
sa.Column('filter', mysql.TEXT(), nullable=True),
sa.Column('source', mysql.TEXT(), nullable=True),
sa.Column('enabled', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.Column('num_uses', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
# ### end Alembic commands ###
| """remove_filters
Revision ID: f163a00a02aa
Revises: 46d07701a90b
Create Date: 2019-05-30 23:20:40.098349
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '4<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tb_filters')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tb_filters',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('name', mysql.VARCHAR(length=128), nullable=True),
sa.Column('type', mysql.VARCHAR(length=64), nullable=True),
sa.Column('action', mysql.TEXT(), nullable=True),
sa.Column('extra_args', mysql.TEXT(), nullable=True),
sa.Column('filter', mysql.TEXT(), nullable=True),
sa.Column('source', mysql.TEXT(), nullable=True),
sa.Column('enabled', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.Column('num_uses', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
# ### end Alembic commands ###
| en | 0.521247 | remove_filters Revision ID: f163a00a02aa Revises: 46d07701a90b Create Date: 2019-05-30 23:20:40.098349 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.472709 | 1 |
library/f5bigip_cm_device_group.py | erjac77/ansible-role-f5 | 1 | 6614983 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: f5bigip_cm_device_group
short_description: BIG-IP cm device-group module
description:
- Configures device groups.
version_added: "1.0.0" # of erjac77.f5 role
author:
- "<NAME> (@erjac77)"
options:
asm_sync:
description:
- Specifies whether to synchronize ASM configurations of device group members.
default: disabled
choices: ['enabled', 'disabled']
auto_sync:
description:
- Specifies whether the device group automatically synchronizes configuration data to its members.
default: disabled
choices: ['enabled', 'disabled']
devices:
description:
- Manages the set of devices that are associated with a device group.
full_load_on_sync:
description:
- Specifies that the entire configuration for a device group is sent when configuration synchronization is
performed.
default: false
type: bool
incremental_config_sync_size_max:
description:
- Specifies the maximum size (in KB) to devote to incremental config sync cached transactions.
default: 1024
network_failover:
description:
- When the device group type is failover, specifies whether network failover is used.
default: enabled
choices: ['enabled', 'disabled']
save_on_auto_sync:
description:
- Specifies whether to save the configuration on the remote devices following an automatic configuration
synchronization.
default: false
type: bool
type:
description:
- Specifies the type of device group.
default: sync-only
choices: ['sync-only', 'sync-failover']
extends_documentation_fragment:
- f5_common
- f5_description
- f5_name
- f5_state
"""
EXAMPLES = """
- name: Create CM Device Group
f5bigip_cm_device_group:
provider:
server: "{{ ansible_host }}"
server_port: "{{ http_port | default(443) }}"
user: "{{ http_user }}"
password: "{{ <PASSWORD> }}"
validate_certs: false
name: my_device_group
devices:
- bigip01.localhost
- bigip02.localhost
network_failover: enabled
type: sync-failover
state: present
delegate_to: localhost
"""
RETURN = """ # """
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.erjac77.network.f5.common import F5_ACTIVATION_CHOICES
from ansible.module_utils.erjac77.network.f5.common import F5_NAMED_OBJ_ARGS
from ansible.module_utils.erjac77.network.f5.common import F5_PROVIDER_ARGS
from ansible.module_utils.erjac77.network.f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
asm_sync=dict(type="str", choices=F5_ACTIVATION_CHOICES),
auto_sync=dict(type="str", choices=F5_ACTIVATION_CHOICES),
description=dict(type="str"),
devices=dict(type=list),
full_load_on_sync=dict(type="bool"),
incremental_config_sync_size_max=dict(type="int"),
network_failover=dict(type="str", choices=F5_ACTIVATION_CHOICES),
save_on_auto_sync=dict(type="bool"),
type=dict(type="str", choices=["sync-only", "sync-failover"]),
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
del argument_spec["partition"]
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpCmDeviceGroup(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
"create": self._api.tm.cm.device_groups.device_group.create,
"read": self._api.tm.cm.device_groups.device_group.load,
"update": self._api.tm.cm.device_groups.device_group.update,
"delete": self._api.tm.cm.device_groups.device_group.delete,
"exists": self._api.tm.cm.device_groups.device_group.exists,
}
def main():
params = ModuleParams()
module = AnsibleModule(
argument_spec=params.argument_spec,
supports_check_mode=params.supports_check_mode,
)
try:
obj = F5BigIpCmDeviceGroup(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == "__main__":
main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: f5bigip_cm_device_group
short_description: BIG-IP cm device-group module
description:
- Configures device groups.
version_added: "1.0.0" # of erjac77.f5 role
author:
- "<NAME> (@erjac77)"
options:
asm_sync:
description:
- Specifies whether to synchronize ASM configurations of device group members.
default: disabled
choices: ['enabled', 'disabled']
auto_sync:
description:
- Specifies whether the device group automatically synchronizes configuration data to its members.
default: disabled
choices: ['enabled', 'disabled']
devices:
description:
- Manages the set of devices that are associated with a device group.
full_load_on_sync:
description:
- Specifies that the entire configuration for a device group is sent when configuration synchronization is
performed.
default: false
type: bool
incremental_config_sync_size_max:
description:
- Specifies the maximum size (in KB) to devote to incremental config sync cached transactions.
default: 1024
network_failover:
description:
- When the device group type is failover, specifies whether network failover is used.
default: enabled
choices: ['enabled', 'disabled']
save_on_auto_sync:
description:
- Specifies whether to save the configuration on the remote devices following an automatic configuration
synchronization.
default: false
type: bool
type:
description:
- Specifies the type of device group.
default: sync-only
choices: ['sync-only', 'sync-failover']
extends_documentation_fragment:
- f5_common
- f5_description
- f5_name
- f5_state
"""
EXAMPLES = """
- name: Create CM Device Group
f5bigip_cm_device_group:
provider:
server: "{{ ansible_host }}"
server_port: "{{ http_port | default(443) }}"
user: "{{ http_user }}"
password: "{{ <PASSWORD> }}"
validate_certs: false
name: my_device_group
devices:
- bigip01.localhost
- bigip02.localhost
network_failover: enabled
type: sync-failover
state: present
delegate_to: localhost
"""
RETURN = """ # """
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.erjac77.network.f5.common import F5_ACTIVATION_CHOICES
from ansible.module_utils.erjac77.network.f5.common import F5_NAMED_OBJ_ARGS
from ansible.module_utils.erjac77.network.f5.common import F5_PROVIDER_ARGS
from ansible.module_utils.erjac77.network.f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
asm_sync=dict(type="str", choices=F5_ACTIVATION_CHOICES),
auto_sync=dict(type="str", choices=F5_ACTIVATION_CHOICES),
description=dict(type="str"),
devices=dict(type=list),
full_load_on_sync=dict(type="bool"),
incremental_config_sync_size_max=dict(type="int"),
network_failover=dict(type="str", choices=F5_ACTIVATION_CHOICES),
save_on_auto_sync=dict(type="bool"),
type=dict(type="str", choices=["sync-only", "sync-failover"]),
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
del argument_spec["partition"]
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpCmDeviceGroup(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
"create": self._api.tm.cm.device_groups.device_group.create,
"read": self._api.tm.cm.device_groups.device_group.load,
"update": self._api.tm.cm.device_groups.device_group.update,
"delete": self._api.tm.cm.device_groups.device_group.delete,
"exists": self._api.tm.cm.device_groups.device_group.exists,
}
def main():
params = ModuleParams()
module = AnsibleModule(
argument_spec=params.argument_spec,
supports_check_mode=params.supports_check_mode,
)
try:
obj = F5BigIpCmDeviceGroup(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == "__main__":
main()
| en | 0.667697 | #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2016 <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --- module: f5bigip_cm_device_group short_description: BIG-IP cm device-group module description: - Configures device groups. version_added: "1.0.0" # of erjac77.f5 role author: - "<NAME> (@erjac77)" options: asm_sync: description: - Specifies whether to synchronize ASM configurations of device group members. default: disabled choices: ['enabled', 'disabled'] auto_sync: description: - Specifies whether the device group automatically synchronizes configuration data to its members. default: disabled choices: ['enabled', 'disabled'] devices: description: - Manages the set of devices that are associated with a device group. full_load_on_sync: description: - Specifies that the entire configuration for a device group is sent when configuration synchronization is performed. default: false type: bool incremental_config_sync_size_max: description: - Specifies the maximum size (in KB) to devote to incremental config sync cached transactions. default: 1024 network_failover: description: - When the device group type is failover, specifies whether network failover is used. default: enabled choices: ['enabled', 'disabled'] save_on_auto_sync: description: - Specifies whether to save the configuration on the remote devices following an automatic configuration synchronization. default: false type: bool type: description: - Specifies the type of device group. default: sync-only choices: ['sync-only', 'sync-failover'] extends_documentation_fragment: - f5_common - f5_description - f5_name - f5_state - name: Create CM Device Group f5bigip_cm_device_group: provider: server: "{{ ansible_host }}" server_port: "{{ http_port | default(443) }}" user: "{{ http_user }}" password: "{{ <PASSWORD> }}" validate_certs: false name: my_device_group devices: - bigip01.localhost - bigip02.localhost network_failover: enabled type: sync-failover state: present delegate_to: localhost # | 1.283115 | 1 |
cloud/contabilidad/views/cuenta_gastos.py | vallemrv/tpvB3 | 3 | 6614984 | # -*- coding: utf-8 -*-
# @Author: <NAME> <valle>
# @Date: 01-Jan-2018
# @Email: <EMAIL>
# @Last modified by: valle
# @Last modified time: 23-Mar-2018
# @License: Apache license vesion 2.0
from django.db.models import Q
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required, permission_required
from contabilidad.models import CuentasGastos
from contabilidad.forms import CuentasGastosForm
from contabilidad.utils import vaciar_sesison_subcuentas, vaciar_sesison_cuentas
from tokenapi.http import JsonResponse
# Create your views here.
@login_required(login_url='login_tk')
def cuentas(request, id=-1):
if not request.method == "POST" and id == -1:
f = CuentasGastosForm()
return render(request, 'contabilidad/cuentas/cuentas.html',
{"form": f,
"titulo": "Cuenta nueva" })
elif not request.method == "POST" and id > 0:
f = CuentasGastosForm()
try:
obj = CuentasGastos.objects.get(pk=id)
f = CuentasGastosForm(instance=obj)
except:
pass
return render(request, 'contabilidad/cuentas/cuentas.html',
{"form": f,
"titulo": "Editar cuenta" })
elif id > 0:
f = CuentasGastosForm()
try:
reg = CuentasGastos.objects.get(pk=id)
f = CuentasGastosForm(request.POST, instance=reg)
except:
pass
if f.is_valid():
reg = f.save()
reg.active = True
reg.save()
return redirect("conta:lista_cuentas")
else:
f = CuentasGastosForm(request.POST)
if f.is_valid():
obj = f.save()
return redirect("conta:lista_cuentas")
@login_required(login_url='login_tk')
def rm_cuentas(request, id):
try:
p = CuentasGastos.objects.get(pk=id)
p.activo = False
p.save()
except Exception as e:
print("[ERROR ] %s" % e)
return redirect("conta:lista_cuentas")
@login_required(login_url='login_tk')
def lista_cuentas(request):
if request.method == "POST":
filter = request.POST["filter"]
filter_query = CuentasGastos.objects.filter(Q(nombre__icontains=filter)).exclude(activo=False)
return render(request, "contabilidad/cuentas/lista.html",
{'query': filter_query,
"opcion": "lista"})
else:
filter_query = CuentasGastos.objects.all().exclude(activo=False)
return render(request, "contabilidad/cuentas/lista.html",
{'query': filter_query,
"opcion": "lista"})
@login_required(login_url='login_tk')
def find_cuentas(request):
if request.method == "POST":
filter = request.POST["filter"]
filter_query = CuentasGastos.objects.filter(Q(nombre__icontains=filter)).exclude(activo=False)
return render(request, "contabilidad/cuentas/lista_ajax.html",
{'query': filter_query,
"opcion": 'find'})
else:
filter_query = CuentasGastos.objects.all().exclude(activo=False)
return render(request, "contabilidad/cuentas/lista_ajax.html",
{'query': filter_query,
"opcion": 'find'})
@login_required(login_url='login_tk')
def set_cuenta(request, id):
obj = CuentasGastos.objects.get(pk=id)
vaciar_sesison_subcuentas(request)
request.session["accion_pk_cuenta"] = obj.pk
return JsonResponse("perfect")
@login_required(login_url='login_tk')
def salir_cuentas(request):
vaciar_sesison_subcuentas(request)
vaciar_sesison_cuentas(request)
return redirect("conta:lista_cuentas")
| # -*- coding: utf-8 -*-
# @Author: <NAME> <valle>
# @Date: 01-Jan-2018
# @Email: <EMAIL>
# @Last modified by: valle
# @Last modified time: 23-Mar-2018
# @License: Apache license vesion 2.0
from django.db.models import Q
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required, permission_required
from contabilidad.models import CuentasGastos
from contabilidad.forms import CuentasGastosForm
from contabilidad.utils import vaciar_sesison_subcuentas, vaciar_sesison_cuentas
from tokenapi.http import JsonResponse
# Create your views here.
@login_required(login_url='login_tk')
def cuentas(request, id=-1):
if not request.method == "POST" and id == -1:
f = CuentasGastosForm()
return render(request, 'contabilidad/cuentas/cuentas.html',
{"form": f,
"titulo": "Cuenta nueva" })
elif not request.method == "POST" and id > 0:
f = CuentasGastosForm()
try:
obj = CuentasGastos.objects.get(pk=id)
f = CuentasGastosForm(instance=obj)
except:
pass
return render(request, 'contabilidad/cuentas/cuentas.html',
{"form": f,
"titulo": "Editar cuenta" })
elif id > 0:
f = CuentasGastosForm()
try:
reg = CuentasGastos.objects.get(pk=id)
f = CuentasGastosForm(request.POST, instance=reg)
except:
pass
if f.is_valid():
reg = f.save()
reg.active = True
reg.save()
return redirect("conta:lista_cuentas")
else:
f = CuentasGastosForm(request.POST)
if f.is_valid():
obj = f.save()
return redirect("conta:lista_cuentas")
@login_required(login_url='login_tk')
def rm_cuentas(request, id):
try:
p = CuentasGastos.objects.get(pk=id)
p.activo = False
p.save()
except Exception as e:
print("[ERROR ] %s" % e)
return redirect("conta:lista_cuentas")
@login_required(login_url='login_tk')
def lista_cuentas(request):
if request.method == "POST":
filter = request.POST["filter"]
filter_query = CuentasGastos.objects.filter(Q(nombre__icontains=filter)).exclude(activo=False)
return render(request, "contabilidad/cuentas/lista.html",
{'query': filter_query,
"opcion": "lista"})
else:
filter_query = CuentasGastos.objects.all().exclude(activo=False)
return render(request, "contabilidad/cuentas/lista.html",
{'query': filter_query,
"opcion": "lista"})
@login_required(login_url='login_tk')
def find_cuentas(request):
if request.method == "POST":
filter = request.POST["filter"]
filter_query = CuentasGastos.objects.filter(Q(nombre__icontains=filter)).exclude(activo=False)
return render(request, "contabilidad/cuentas/lista_ajax.html",
{'query': filter_query,
"opcion": 'find'})
else:
filter_query = CuentasGastos.objects.all().exclude(activo=False)
return render(request, "contabilidad/cuentas/lista_ajax.html",
{'query': filter_query,
"opcion": 'find'})
@login_required(login_url='login_tk')
def set_cuenta(request, id):
obj = CuentasGastos.objects.get(pk=id)
vaciar_sesison_subcuentas(request)
request.session["accion_pk_cuenta"] = obj.pk
return JsonResponse("perfect")
@login_required(login_url='login_tk')
def salir_cuentas(request):
vaciar_sesison_subcuentas(request)
vaciar_sesison_cuentas(request)
return redirect("conta:lista_cuentas")
| en | 0.581657 | # -*- coding: utf-8 -*- # @Author: <NAME> <valle> # @Date: 01-Jan-2018 # @Email: <EMAIL> # @Last modified by: valle # @Last modified time: 23-Mar-2018 # @License: Apache license vesion 2.0 # Create your views here. | 2.20939 | 2 |
metric/calculator.py | andreamad8/FSB | 53 | 6614985 | from itertools import zip_longest
from metric.tree import Tree
from typing import Counter, Dict, Optional
import argparse
class Calculator:
def __init__(self, strict: bool = False) -> None:
self.num_gold_nt: int = 0
self.num_pred_nt: int = 0
self.num_matching_nt: int = 0
self.strict: bool = strict
def get_metrics(self):
precision: float = (
self.num_matching_nt / self.num_pred_nt) if self.num_pred_nt else 0
recall: float = (
self.num_matching_nt / self.num_gold_nt) if self.num_gold_nt else 0
f1: float = (2.0 * precision * recall /
(precision + recall)) if precision + recall else 0
return {
"precision": precision,
"recall": recall,
"f1": f1,
}
def add_instance(self, gold_tree: Tree,
pred_tree: Optional[Tree] = None) -> None:
node_info_gold: Counter = self._get_node_info(gold_tree)
self.num_gold_nt += sum(node_info_gold.values())
if pred_tree:
node_info_pred: Counter = self._get_node_info(pred_tree)
self.num_pred_nt += sum(node_info_pred.values())
self.num_matching_nt += sum(
(node_info_gold & node_info_pred).values())
def _get_node_info(self, tree) -> Counter:
nodes = tree.root.list_nonterminals()
node_info: Counter = Counter()
for node in nodes:
node_info[(node.label, self._get_span(node))] += 1
return node_info
def _get_span(self, node):
return node.get_flat_str_spans(
) if self.strict else node.get_token_span()
def evaluate_predictions(gold_list: list, pred_list: list) -> Dict:
instance_count: int = 0
exact_matches: int = 0
invalid_preds: float = 0
exact_match_brutal: int = 0
labeled_bracketing_scores = Calculator(strict=False)
tree_labeled_bracketing_scores = Calculator(strict=True)
for gold_line, pred_line in zip_longest(gold_list, pred_list):
try:
gold_line = gold_line.strip()
pred_line = pred_line.strip()
except AttributeError:
print("WARNING: check format and length of files")
quit()
if gold_line.replace(" ","") == pred_line.replace(" ",""):
exact_match_brutal += 1
try:
gold_tree = Tree(gold_line)
instance_count += 1
except ValueError:
print("FATAL: found invalid line in gold file:", gold_line)
quit()
try:
pred_tree = Tree(pred_line)
labeled_bracketing_scores.add_instance(gold_tree, pred_tree)
tree_labeled_bracketing_scores.add_instance(
gold_tree, pred_tree)
except ValueError:
# print("WARNING: found invalid line in pred file:", pred_line)
invalid_preds += 1
labeled_bracketing_scores.add_instance(gold_tree)
tree_labeled_bracketing_scores.add_instance(gold_tree)
continue
if str(gold_tree) == str(pred_tree):
exact_matches += 1
exact_match_fraction: float = (
exact_matches / instance_count) if instance_count else 0
tree_validity_fraction: float = (
1 - (invalid_preds / instance_count)) if instance_count else 0
exact_match_fraction_brutal: float = (
exact_match_brutal / instance_count) if instance_count else 0
return {
"instance_count":
instance_count,
"exact_match":
exact_match_fraction,
"labeled_bracketing_scores":
labeled_bracketing_scores.get_metrics(),
"tree_labeled_bracketing_scores":
tree_labeled_bracketing_scores.get_metrics(),
"tree_validity":
tree_validity_fraction,
"exact_match_brutal": exact_match_fraction_brutal
}
| from itertools import zip_longest
from metric.tree import Tree
from typing import Counter, Dict, Optional
import argparse
class Calculator:
def __init__(self, strict: bool = False) -> None:
self.num_gold_nt: int = 0
self.num_pred_nt: int = 0
self.num_matching_nt: int = 0
self.strict: bool = strict
def get_metrics(self):
precision: float = (
self.num_matching_nt / self.num_pred_nt) if self.num_pred_nt else 0
recall: float = (
self.num_matching_nt / self.num_gold_nt) if self.num_gold_nt else 0
f1: float = (2.0 * precision * recall /
(precision + recall)) if precision + recall else 0
return {
"precision": precision,
"recall": recall,
"f1": f1,
}
def add_instance(self, gold_tree: Tree,
pred_tree: Optional[Tree] = None) -> None:
node_info_gold: Counter = self._get_node_info(gold_tree)
self.num_gold_nt += sum(node_info_gold.values())
if pred_tree:
node_info_pred: Counter = self._get_node_info(pred_tree)
self.num_pred_nt += sum(node_info_pred.values())
self.num_matching_nt += sum(
(node_info_gold & node_info_pred).values())
def _get_node_info(self, tree) -> Counter:
nodes = tree.root.list_nonterminals()
node_info: Counter = Counter()
for node in nodes:
node_info[(node.label, self._get_span(node))] += 1
return node_info
def _get_span(self, node):
return node.get_flat_str_spans(
) if self.strict else node.get_token_span()
def evaluate_predictions(gold_list: list, pred_list: list) -> Dict:
instance_count: int = 0
exact_matches: int = 0
invalid_preds: float = 0
exact_match_brutal: int = 0
labeled_bracketing_scores = Calculator(strict=False)
tree_labeled_bracketing_scores = Calculator(strict=True)
for gold_line, pred_line in zip_longest(gold_list, pred_list):
try:
gold_line = gold_line.strip()
pred_line = pred_line.strip()
except AttributeError:
print("WARNING: check format and length of files")
quit()
if gold_line.replace(" ","") == pred_line.replace(" ",""):
exact_match_brutal += 1
try:
gold_tree = Tree(gold_line)
instance_count += 1
except ValueError:
print("FATAL: found invalid line in gold file:", gold_line)
quit()
try:
pred_tree = Tree(pred_line)
labeled_bracketing_scores.add_instance(gold_tree, pred_tree)
tree_labeled_bracketing_scores.add_instance(
gold_tree, pred_tree)
except ValueError:
# print("WARNING: found invalid line in pred file:", pred_line)
invalid_preds += 1
labeled_bracketing_scores.add_instance(gold_tree)
tree_labeled_bracketing_scores.add_instance(gold_tree)
continue
if str(gold_tree) == str(pred_tree):
exact_matches += 1
exact_match_fraction: float = (
exact_matches / instance_count) if instance_count else 0
tree_validity_fraction: float = (
1 - (invalid_preds / instance_count)) if instance_count else 0
exact_match_fraction_brutal: float = (
exact_match_brutal / instance_count) if instance_count else 0
return {
"instance_count":
instance_count,
"exact_match":
exact_match_fraction,
"labeled_bracketing_scores":
labeled_bracketing_scores.get_metrics(),
"tree_labeled_bracketing_scores":
tree_labeled_bracketing_scores.get_metrics(),
"tree_validity":
tree_validity_fraction,
"exact_match_brutal": exact_match_fraction_brutal
}
| en | 0.761443 | # print("WARNING: found invalid line in pred file:", pred_line) | 2.673143 | 3 |
backend/apps/csyllabusapi/urls.py | CSyllabus/webapp | 3 | 6614986 | from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^countries/(?P<country_id>[^/]+)/universities', views.UniversitiesViewCountry.as_view(), name='universities'),
url(r'^countries/(?P<country_id>[^/]+)/cities', views.CitiesView.as_view(), name='cities'),
url(r'^countries', views.CountriesView.as_view(), name='countries'),
url(r'^country', views.CountryView.as_view(), name='country'),
url(r'^city', views.CityView.as_view(), name='city'),
url(r'^cities/(?P<city_id>[^/]+)/universities', views.CityUniversitiesView.as_view(), name='universities'),
url(r'^university', views.UniversityView.as_view(), name='university'),
url(r'^universities/(?P<university_id>[^/]+)/faculties', views.FacultyView.as_view(), name='faculties'),
url(r'^universities/(?P<university_id>[^/]+)/courses', views.CourseByUniversityView.as_view(), name='courses'),
url(r'^universities/(?P<university_id>[^/]+)/programs', views.ProgramUnivView.as_view(), name='programs'),
url(r'^universities/(?P<university_id>[^/]+)', views.UniversitiesView.as_view(), name='universities'),
url(r'^universities', views.UniversitiesView.as_view(), name='universities'),
url(r'^faculties/(?P<faculty_id>[^/]+)/courses', views.CourseByFacultyView.as_view(), name='courses'),
url(r'^faculties/(?P<faculty_id>[^/]+)/programs', views.ProgramView.as_view(), name='programs'),
url(r'^faculties/', views.FacultyViewAll.as_view(), name='faculties'),
url(r'^programs/(?P<program_id>[^/]+)/courses', views.CourseByProgramView.as_view(), name='courses'),
url(r'^courses/(?P<course_id>[^/]+)/comments', views.CommentsByCourseView.as_view(), name='comments'),
url(r'^courses/(?P<course_id>[^/]+)', views.CourseView.as_view(), name='courses'),
url(r'^courses', views.CourseView.as_view(), name='courses'),
url(r'^simplecourses', views.CoursesAllSimpleView.as_view(), name='courses'),
url(r'^comments/(?P<comment_id>[^/]+)', views.CommentsView.as_view(), name='comments'),
url(r'^comments', views.CommentsView.as_view(), name='comments'),
url(r'^explorer', views.explorer, name='explorer'),
url(r'^comparator_text_input', views.comparator_text_input, name='comparator'),
url(r'^comparator', views.comparator, name='comparator'),
url(r'^users/self', views.UserViewSelf.as_view(), name='user'),
url(r'^users/courses', views.UserViewCourse.as_view(), name='user'),
url(r'^users/check_email', views.check_email, name='check_email'),
url(r'^users/check_username', views.check_username, name='check_username'),
url(r'^users/check/(?P<course_id>[^/]+)', views.UserCheckCourseView.as_view(), name='user'),
url(r'^users/check', views.UserCheckView.as_view(), name='user'),
url(r'^users/(?P<user_id>[^/]+)', views.UserView.as_view(), name='user'),
url(r'^users/', views.UserView.as_view(), name='user'),
url(r'^event_log/', views.EventLogView.as_view(), name='event_log'),
]
# /universitiesall != /universities
| from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^countries/(?P<country_id>[^/]+)/universities', views.UniversitiesViewCountry.as_view(), name='universities'),
url(r'^countries/(?P<country_id>[^/]+)/cities', views.CitiesView.as_view(), name='cities'),
url(r'^countries', views.CountriesView.as_view(), name='countries'),
url(r'^country', views.CountryView.as_view(), name='country'),
url(r'^city', views.CityView.as_view(), name='city'),
url(r'^cities/(?P<city_id>[^/]+)/universities', views.CityUniversitiesView.as_view(), name='universities'),
url(r'^university', views.UniversityView.as_view(), name='university'),
url(r'^universities/(?P<university_id>[^/]+)/faculties', views.FacultyView.as_view(), name='faculties'),
url(r'^universities/(?P<university_id>[^/]+)/courses', views.CourseByUniversityView.as_view(), name='courses'),
url(r'^universities/(?P<university_id>[^/]+)/programs', views.ProgramUnivView.as_view(), name='programs'),
url(r'^universities/(?P<university_id>[^/]+)', views.UniversitiesView.as_view(), name='universities'),
url(r'^universities', views.UniversitiesView.as_view(), name='universities'),
url(r'^faculties/(?P<faculty_id>[^/]+)/courses', views.CourseByFacultyView.as_view(), name='courses'),
url(r'^faculties/(?P<faculty_id>[^/]+)/programs', views.ProgramView.as_view(), name='programs'),
url(r'^faculties/', views.FacultyViewAll.as_view(), name='faculties'),
url(r'^programs/(?P<program_id>[^/]+)/courses', views.CourseByProgramView.as_view(), name='courses'),
url(r'^courses/(?P<course_id>[^/]+)/comments', views.CommentsByCourseView.as_view(), name='comments'),
url(r'^courses/(?P<course_id>[^/]+)', views.CourseView.as_view(), name='courses'),
url(r'^courses', views.CourseView.as_view(), name='courses'),
url(r'^simplecourses', views.CoursesAllSimpleView.as_view(), name='courses'),
url(r'^comments/(?P<comment_id>[^/]+)', views.CommentsView.as_view(), name='comments'),
url(r'^comments', views.CommentsView.as_view(), name='comments'),
url(r'^explorer', views.explorer, name='explorer'),
url(r'^comparator_text_input', views.comparator_text_input, name='comparator'),
url(r'^comparator', views.comparator, name='comparator'),
url(r'^users/self', views.UserViewSelf.as_view(), name='user'),
url(r'^users/courses', views.UserViewCourse.as_view(), name='user'),
url(r'^users/check_email', views.check_email, name='check_email'),
url(r'^users/check_username', views.check_username, name='check_username'),
url(r'^users/check/(?P<course_id>[^/]+)', views.UserCheckCourseView.as_view(), name='user'),
url(r'^users/check', views.UserCheckView.as_view(), name='user'),
url(r'^users/(?P<user_id>[^/]+)', views.UserView.as_view(), name='user'),
url(r'^users/', views.UserView.as_view(), name='user'),
url(r'^event_log/', views.EventLogView.as_view(), name='event_log'),
]
# /universitiesall != /universities
| en | 0.577419 | # /universitiesall != /universities | 1.910623 | 2 |
tests/inspector/create_regression.py | Giskard-AI/ai-inspector | 12 | 6614987 | import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import ElasticNet
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, RobustScaler
df_train = pd.read_csv("./sample_data/regression/house-prices/train.csv")
df_test = pd.read_csv("./sample_data/regression/house-prices/test.csv")
numeric_columns = [
"LotFrontage",
"LotArea",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"FullBath",
"HalfBath",
"BedroomAbvGr",
"KitchenAbvGr",
"TotRmsAbvGrd",
"Fireplaces",
"GarageYrBlt",
"GarageCars",
]
categorical_columns = [
"MSSubClass",
"MSZoning",
"Street",
"Alley",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"FireplaceQu",
]
selected_columns = numeric_columns + categorical_columns
target = df_train["SalePrice"]
categorical_encoder = Pipeline(
steps=[
("imputation", SimpleImputer(strategy="constant", fill_value="unknown")),
("one_hot", OneHotEncoder(handle_unknown="ignore")),
]
)
numeric_encoder = Pipeline(
steps=[("imputation", SimpleImputer(strategy="median")), ("rescaling", RobustScaler())]
)
preprocessor = ColumnTransformer(
transformers=[
("category", categorical_encoder, categorical_columns),
("numeric", numeric_encoder, numeric_columns),
]
)
prediction_pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", ElasticNet())])
prediction_pipeline.fit(df_train[selected_columns], target)
| import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import ElasticNet
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, RobustScaler
df_train = pd.read_csv("./sample_data/regression/house-prices/train.csv")
df_test = pd.read_csv("./sample_data/regression/house-prices/test.csv")
numeric_columns = [
"LotFrontage",
"LotArea",
"TotalBsmtSF",
"1stFlrSF",
"2ndFlrSF",
"FullBath",
"HalfBath",
"BedroomAbvGr",
"KitchenAbvGr",
"TotRmsAbvGrd",
"Fireplaces",
"GarageYrBlt",
"GarageCars",
]
categorical_columns = [
"MSSubClass",
"MSZoning",
"Street",
"Alley",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"FireplaceQu",
]
selected_columns = numeric_columns + categorical_columns
target = df_train["SalePrice"]
categorical_encoder = Pipeline(
steps=[
("imputation", SimpleImputer(strategy="constant", fill_value="unknown")),
("one_hot", OneHotEncoder(handle_unknown="ignore")),
]
)
numeric_encoder = Pipeline(
steps=[("imputation", SimpleImputer(strategy="median")), ("rescaling", RobustScaler())]
)
preprocessor = ColumnTransformer(
transformers=[
("category", categorical_encoder, categorical_columns),
("numeric", numeric_encoder, numeric_columns),
]
)
prediction_pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", ElasticNet())])
prediction_pipeline.fit(df_train[selected_columns], target)
| none | 1 | 2.859393 | 3 | |
game.py | allannad/rockpaperscissors | 0 | 6614988 | <filename>game.py
#Rock Paper Scissors
print("Rock, Paper, Scissors, Shoot!")
import random
import sys
print("---------------")
options = ['Rock', 'Paper', 'Scissors']
#if user inputs word in wrong case, convert to lower
user_choice = input("Rock, Paper, or Scissors?").title()
if user_choice in options:
print("Your selection is: ", user_choice)
else:
print("Your input is not valid. Please select Rock, Paper or Scissors.")
sys.exit()
comp_choice = random.choice(options)
print("Computer selection is: ", comp_choice)
#determine who wins
if user_choice == comp_choice:
print("It's a tie!")
elif user_choice == 'Scissors' and comp_choice == 'Rock':
print("You lose!")
elif user_choice =='Rock' and comp_choice=='Paper':
print("You lose!")
elif user_choice == 'Paper' and comp_choice == 'Scissors':
print("You lose!")
elif user_choice == 'Paper' and comp_choice == 'Rock':
print("You win!")
elif user_choice == 'Rock' and comp_choice == 'Scissors':
print("You win!")
else:
user_choice == 'Scissors' and comp_choice == 'Paper'
print("You win!")
| <filename>game.py
#Rock Paper Scissors
print("Rock, Paper, Scissors, Shoot!")
import random
import sys
print("---------------")
options = ['Rock', 'Paper', 'Scissors']
#if user inputs word in wrong case, convert to lower
user_choice = input("Rock, Paper, or Scissors?").title()
if user_choice in options:
print("Your selection is: ", user_choice)
else:
print("Your input is not valid. Please select Rock, Paper or Scissors.")
sys.exit()
comp_choice = random.choice(options)
print("Computer selection is: ", comp_choice)
#determine who wins
if user_choice == comp_choice:
print("It's a tie!")
elif user_choice == 'Scissors' and comp_choice == 'Rock':
print("You lose!")
elif user_choice =='Rock' and comp_choice=='Paper':
print("You lose!")
elif user_choice == 'Paper' and comp_choice == 'Scissors':
print("You lose!")
elif user_choice == 'Paper' and comp_choice == 'Rock':
print("You win!")
elif user_choice == 'Rock' and comp_choice == 'Scissors':
print("You win!")
else:
user_choice == 'Scissors' and comp_choice == 'Paper'
print("You win!")
| en | 0.797466 | #Rock Paper Scissors #if user inputs word in wrong case, convert to lower #determine who wins | 4.214995 | 4 |
zhaquirks/keenhome/__init__.py | WolfRevo/zha-device-handlers | 213 | 6614989 | <reponame>WolfRevo/zha-device-handlers<gh_stars>100-1000
"""Module for keen home vents and sensors."""
| """Module for keen home vents and sensors.""" | en | 0.526756 | Module for keen home vents and sensors. | 0.985899 | 1 |
votrfront/front.py | LordLoles/votr | 8 | 6614990 |
import base64
import os
import json
import time
import traceback
from werkzeug.routing import Rule
from werkzeug.wrappers import Response
from . import sessions
template = '''
<!DOCTYPE html>
<html lang="sk">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Votr</title>
<link rel="stylesheet" type="text/css" href="%(css)s">
<link href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,700&subset=latin-ext" rel="stylesheet">
<meta name="description" content="Votr ponúka študentom jednoduchší a \
pohodlnejší spôsob, ako robiť najčastejšie činnosti zo systému AIS. Zapíšte \
sa na skúšky, prezrite si vaše hodnotenia a skontrolujte si počet kreditov \
bez zbytočného klikania.">
%(analytics)s
</head>
<body>
<span id="votr"></span>
<noscript>
<div class="login-page">
<div class="login-content">
<h1>Votr</h1>
<p><strong>Votr</strong> ponúka študentom jednoduchší a pohodlnejší spôsob, ako
robiť najčastejšie činnosti zo systému AIS. Zapíšte sa na skúšky, prezrite si
vaše hodnotenia a skontrolujte si počet kreditov bez zbytočného klikania.</p>
<p><strong>Na používanie Votr musí byť zapnutý JavaScript.</strong></p>
</div>
</div>
</noscript>
<script nonce="%(nonce)s">Votr = %(init_json)s</script>
%(scripts)s
</body>
</html>
'''.lstrip()
analytics_template = '''
<script nonce="%(nonce)s">
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', '%(ua_code)s', 'auto');
</script>
'''.strip()
static_path = os.path.join(os.path.dirname(__file__), 'static/')
def static_url(filename):
mtime = int(os.path.getmtime(static_path + filename))
return 'static/{}?v={}'.format(filename, mtime)
def app_response(request, **my_data):
url_root = request.url_root
instance_name = request.app.settings.instance_name
my_data['url_root'] = url_root
my_data['instance_name'] = instance_name
my_data['anketa_cookie_name'] = request.app.settings.anketa_cookie_name
my_data['anketa_cookie_hide_date'] = request.app.settings.anketa_cookie_hide_date
if 'csrf_token' not in my_data:
my_data['servers'] = request.app.settings.servers
for i in range(60):
try:
with open(static_path + 'status') as f:
status = f.read().strip()
except FileNotFoundError:
return Response('Missing static files.', status=500)
if status != 'busy':
break
time.sleep(0.1)
else:
return Response('Timed out waiting for webpack.', status=500)
debug = request.cookies.get('votr_debug')
if status == 'failed':
return Response('Webpack build failed.', status=500)
elif status == 'ok_dev' or (status == 'ok_both' and debug):
scripts = ['prologue.dev.js', 'votr.dev.js', 'vendors_votr.dev.js']
elif status == 'ok_prod' or (status == 'ok_both' and not debug):
scripts = ['prologue.min.js', 'votr.min.js', 'vendors_votr.min.js']
else:
return Response('Unexpected webpack status.', status=500)
nonce = base64.b64encode(os.urandom(18)).decode('ascii')
content = template % dict(
nonce=nonce,
init_json=json.dumps({ 'settings': my_data }).replace('</', '<\\/'),
css=static_url('style.css'),
scripts='\n'.join(
'<script nonce="{}" src="{}"></script>'.format(
nonce, static_url(script))
for script in scripts),
analytics=(
'' if not request.app.settings.ua_code else
analytics_template %
dict(nonce=nonce, ua_code=request.app.settings.ua_code)),
)
return Response(content,
content_type='text/html; charset=UTF-8',
headers={
# no-store == force refresh even after pressing the back button.
# http://blog.55minutes.com/2011/10/how-to-defeat-the-browser-back-button-cache/
'Cache-Control': 'no-cache, max-age=0, must-revalidate, no-store',
# based on https://csp.withgoogle.com/docs/strict-csp.html
# object-src 'self' - they say 'none' may block Chrome's PDF reader.
# TODO: Revisit object-src if http://crbug.com/271452 gets fixed.
# TODO: Remove "-Report-Only" if results look good.
'Content-Security-Policy-Report-Only':
"object-src 'self'; " +
"script-src 'nonce-%s' 'strict-dynamic' " % nonce +
"'unsafe-inline' https: http: 'report-sample'; " +
"base-uri 'none'; " +
"report-uri %sreport?type=csp; " % request.url_root +
"report-to csp_%s" % instance_name,
'Report-To': json.dumps({
'group': 'csp_%s' % instance_name,
'max_age': 24 * 60 * 60,
'endpoints': [{ 'url': '%sreport?type=csp-rt' % url_root }]
}),
})
def front(request):
csrf_token = None
connection_error = None
# If the user has no session cookie, just show the login form.
if not sessions.get_session_cookie(request):
return app_response(request)
try:
with sessions.logged_transaction(request) as session:
log = session['client'].context.log
try:
log('front', 'Front session check started',
request.full_path)
csrf_token = session['csrf_token']
session['client'].check_connection()
except Exception as e:
log('front',
'Front session check failed with {}'.format(
type(e).__name__),
traceback.format_exc())
raise
log('front', 'Front session check finished')
except Exception:
connection_error = traceback.format_exc()
# If we can't open the session at all, show the login form, but complain.
if not csrf_token:
return app_response(request, invalid_session=True)
# If the session is real but check_connection() failed, complain.
if connection_error:
return app_response(request,
csrf_token=csrf_token, error=connection_error)
# Otherwise, everything works and we can open the app.
return app_response(request, csrf_token=csrf_token)
def die(request):
# allows easy access to debugger on /500 if it's enabled.
raise Exception()
def get_routes():
yield Rule('/', methods=['GET'], endpoint=front)
yield Rule('/500', endpoint=die)
|
import base64
import os
import json
import time
import traceback
from werkzeug.routing import Rule
from werkzeug.wrappers import Response
from . import sessions
template = '''
<!DOCTYPE html>
<html lang="sk">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Votr</title>
<link rel="stylesheet" type="text/css" href="%(css)s">
<link href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,700&subset=latin-ext" rel="stylesheet">
<meta name="description" content="Votr ponúka študentom jednoduchší a \
pohodlnejší spôsob, ako robiť najčastejšie činnosti zo systému AIS. Zapíšte \
sa na skúšky, prezrite si vaše hodnotenia a skontrolujte si počet kreditov \
bez zbytočného klikania.">
%(analytics)s
</head>
<body>
<span id="votr"></span>
<noscript>
<div class="login-page">
<div class="login-content">
<h1>Votr</h1>
<p><strong>Votr</strong> ponúka študentom jednoduchší a pohodlnejší spôsob, ako
robiť najčastejšie činnosti zo systému AIS. Zapíšte sa na skúšky, prezrite si
vaše hodnotenia a skontrolujte si počet kreditov bez zbytočného klikania.</p>
<p><strong>Na používanie Votr musí byť zapnutý JavaScript.</strong></p>
</div>
</div>
</noscript>
<script nonce="%(nonce)s">Votr = %(init_json)s</script>
%(scripts)s
</body>
</html>
'''.lstrip()
analytics_template = '''
<script nonce="%(nonce)s">
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', '%(ua_code)s', 'auto');
</script>
'''.strip()
static_path = os.path.join(os.path.dirname(__file__), 'static/')
def static_url(filename):
mtime = int(os.path.getmtime(static_path + filename))
return 'static/{}?v={}'.format(filename, mtime)
def app_response(request, **my_data):
url_root = request.url_root
instance_name = request.app.settings.instance_name
my_data['url_root'] = url_root
my_data['instance_name'] = instance_name
my_data['anketa_cookie_name'] = request.app.settings.anketa_cookie_name
my_data['anketa_cookie_hide_date'] = request.app.settings.anketa_cookie_hide_date
if 'csrf_token' not in my_data:
my_data['servers'] = request.app.settings.servers
for i in range(60):
try:
with open(static_path + 'status') as f:
status = f.read().strip()
except FileNotFoundError:
return Response('Missing static files.', status=500)
if status != 'busy':
break
time.sleep(0.1)
else:
return Response('Timed out waiting for webpack.', status=500)
debug = request.cookies.get('votr_debug')
if status == 'failed':
return Response('Webpack build failed.', status=500)
elif status == 'ok_dev' or (status == 'ok_both' and debug):
scripts = ['prologue.dev.js', 'votr.dev.js', 'vendors_votr.dev.js']
elif status == 'ok_prod' or (status == 'ok_both' and not debug):
scripts = ['prologue.min.js', 'votr.min.js', 'vendors_votr.min.js']
else:
return Response('Unexpected webpack status.', status=500)
nonce = base64.b64encode(os.urandom(18)).decode('ascii')
content = template % dict(
nonce=nonce,
init_json=json.dumps({ 'settings': my_data }).replace('</', '<\\/'),
css=static_url('style.css'),
scripts='\n'.join(
'<script nonce="{}" src="{}"></script>'.format(
nonce, static_url(script))
for script in scripts),
analytics=(
'' if not request.app.settings.ua_code else
analytics_template %
dict(nonce=nonce, ua_code=request.app.settings.ua_code)),
)
return Response(content,
content_type='text/html; charset=UTF-8',
headers={
# no-store == force refresh even after pressing the back button.
# http://blog.55minutes.com/2011/10/how-to-defeat-the-browser-back-button-cache/
'Cache-Control': 'no-cache, max-age=0, must-revalidate, no-store',
# based on https://csp.withgoogle.com/docs/strict-csp.html
# object-src 'self' - they say 'none' may block Chrome's PDF reader.
# TODO: Revisit object-src if http://crbug.com/271452 gets fixed.
# TODO: Remove "-Report-Only" if results look good.
'Content-Security-Policy-Report-Only':
"object-src 'self'; " +
"script-src 'nonce-%s' 'strict-dynamic' " % nonce +
"'unsafe-inline' https: http: 'report-sample'; " +
"base-uri 'none'; " +
"report-uri %sreport?type=csp; " % request.url_root +
"report-to csp_%s" % instance_name,
'Report-To': json.dumps({
'group': 'csp_%s' % instance_name,
'max_age': 24 * 60 * 60,
'endpoints': [{ 'url': '%sreport?type=csp-rt' % url_root }]
}),
})
def front(request):
csrf_token = None
connection_error = None
# If the user has no session cookie, just show the login form.
if not sessions.get_session_cookie(request):
return app_response(request)
try:
with sessions.logged_transaction(request) as session:
log = session['client'].context.log
try:
log('front', 'Front session check started',
request.full_path)
csrf_token = session['csrf_token']
session['client'].check_connection()
except Exception as e:
log('front',
'Front session check failed with {}'.format(
type(e).__name__),
traceback.format_exc())
raise
log('front', 'Front session check finished')
except Exception:
connection_error = traceback.format_exc()
# If we can't open the session at all, show the login form, but complain.
if not csrf_token:
return app_response(request, invalid_session=True)
# If the session is real but check_connection() failed, complain.
if connection_error:
return app_response(request,
csrf_token=csrf_token, error=connection_error)
# Otherwise, everything works and we can open the app.
return app_response(request, csrf_token=csrf_token)
def die(request):
# allows easy access to debugger on /500 if it's enabled.
raise Exception()
def get_routes():
yield Rule('/', methods=['GET'], endpoint=front)
yield Rule('/500', endpoint=die)
| en | 0.1749 | <!DOCTYPE html> <html lang="sk"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>Votr</title> <link rel="stylesheet" type="text/css" href="%(css)s"> <link href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,700&subset=latin-ext" rel="stylesheet"> <meta name="description" content="Votr ponúka študentom jednoduchší a \ pohodlnejší spôsob, ako robiť najčastejšie činnosti zo systému AIS. Zapíšte \ sa na skúšky, prezrite si vaše hodnotenia a skontrolujte si počet kreditov \ bez zbytočného klikania."> %(analytics)s </head> <body> <span id="votr"></span> <noscript> <div class="login-page"> <div class="login-content"> <h1>Votr</h1> <p><strong>Votr</strong> ponúka študentom jednoduchší a pohodlnejší spôsob, ako robiť najčastejšie činnosti zo systému AIS. Zapíšte sa na skúšky, prezrite si vaše hodnotenia a skontrolujte si počet kreditov bez zbytočného klikania.</p> <p><strong>Na používanie Votr musí byť zapnutý JavaScript.</strong></p> </div> </div> </noscript> <script nonce="%(nonce)s">Votr = %(init_json)s</script> %(scripts)s </body> </html> <script nonce="%(nonce)s"> (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); ga('create', '%(ua_code)s', 'auto'); </script> # no-store == force refresh even after pressing the back button. # http://blog.55minutes.com/2011/10/how-to-defeat-the-browser-back-button-cache/ # based on https://csp.withgoogle.com/docs/strict-csp.html # object-src 'self' - they say 'none' may block Chrome's PDF reader. # TODO: Revisit object-src if http://crbug.com/271452 gets fixed. # TODO: Remove "-Report-Only" if results look good. # If the user has no session cookie, just show the login form. # If we can't open the session at all, show the login form, but complain. # If the session is real but check_connection() failed, complain. # Otherwise, everything works and we can open the app. # allows easy access to debugger on /500 if it's enabled. | 2.11887 | 2 |
cloud99/main.py | cisco-oss-eng/Cloud99 | 28 | 6614991 | # Copyright 2016 Cisco Systems, Inc.
# All Rights R6served.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import pykka
import warnings
import yaml
from cloud99 import ACTOR_CLASSES
from cloud99.logging_setup import LOGGER_NAME
import logging
LOGGER = logging.getLogger(LOGGER_NAME)
warnings.filterwarnings("ignore")
class Cloud99(pykka.ThreadingActor):
def __init__(self, config_file):
super(Cloud99, self).__init__()
with open(config_file) as config_yam:
self.config = dict(yaml.safe_load(config_yam))
self.monitors_to_start = len(self.config.get("monitors"))
self.monitors = []
self.loaders_to_start = len(self.config.get("loaders"))
self.loaders_to_wait = 0
self.loaders = []
self.disruptors = []
self.disruptors_to_wait = len(self.config.get("disruptors"))
self.clean_run = False
def start_tests(self):
self.create_all("monitors")
self.create_all("loaders")
self.create_all("disruptors")
self.actors_perform_command("monitors", "start")
def on_receive(self, message):
LOGGER.debug("On receive: {}".format(message))
msg = message.get("msg")
if msg == "monitor_started":
self.monitors_to_start -= 1
LOGGER.debug("{count} monitor(s) left to start".format(
count=self.monitors_to_start))
if self.monitors_to_start <= 0:
self.actors_perform_command("loaders", "start",
{"times": 10000}) # TODO
elif msg == "loader_started":
# TODO better check
self.loaders_to_start -= 1
self.loaders_to_wait += 1
LOGGER.debug("{count} Loader(s) left to start".format(
count=self.loaders_to_start))
if self.loaders_to_start <= 0 and not self.clean_run:
LOGGER.debug("Starting all disruptors.")
self.actors_perform_command("disruptors", "start")
elif msg == "loader_finished":
self.loaders_to_wait -= 1
LOGGER.debug("Loader finished. Loaders to wait {count}".format(
count=self.loaders_to_wait
))
if self.loaders_to_wait <= 0:
if not self.clean_run:
self.clean_run = True
self.loaders_to_start = len(self.config.get("loaders"))
self.loaders_to_wait = 0
# This is tmp. TODO (dratushnyy) better way for params
times = message.get("times") or 10
LOGGER.debug("Starting clean run: no disruption. "
"Times is {times}".format(times=times))
self.actors_perform_command("loaders", "start",
{"times": times})
else:
self.stop()
elif msg == "disruption_finished":
self.disruptors_to_wait -= 1
LOGGER.debug("Disruptors to wait {0}".format(
self.disruptors_to_wait))
if self.disruptors_to_wait <= 0:
LOGGER.debug("Disruption is finished")
self.actors_perform_command("loaders", "stop_task")
def create_all(self, actor_type):
LOGGER.debug("Creating all {actors}.".format(actors=actor_type))
openrc = copy.deepcopy(self.config["openrc"])
inventory = copy.deepcopy(self.config["inventory"])
for actor in self.config[actor_type]:
# TODO (dratushnyy) support actor without params (list)
for actor_class, actor_params in actor.items():
try:
actor_class = ACTOR_CLASSES.get(actor_class)
actor_ref = actor_class.start(self.actor_ref, openrc,
inventory, **actor_params)
actor_collection = getattr(self, actor_type)
actor_collection.append(actor_ref)
msg = "Actor started, actor class {actor_class}, " \
"actor params {params}" \
.format(class_name=actor_class,
actor_class=actor_class, params=actor_params)
LOGGER.debug(msg)
except Exception as e:
LOGGER.exception(e)
self.stop()
LOGGER.debug("All {actors} are created.".format(actors=actor_type))
def stop(self):
self.stop_all("disruptors")
self.stop_all("loaders")
self.stop_all("monitors")
super(Cloud99, self).stop()
def stop_all(self, actor_type):
actor_collection = getattr(self, actor_type)
LOGGER.debug("Stopping all {actors}.".format(actors=actor_type))
for actor in actor_collection:
if actor.is_alive():
LOGGER.debug("Stopping {actor}".format(actor=actor))
actor.ask({"msg": "stop"})
LOGGER.debug("Stopped {actor}".format(actor=actor))
for actor in actor_collection:
if actor.is_alive():
LOGGER.error("Actor {actor} is still alive"
.format(actor=actor))
LOGGER.debug("All {actors} stopped.".format(actors=actor_type))
setattr(self, actor_type, [])
def alive(self, actor_type):
actor_collection = getattr(self, actor_type)
alive = False
for actor_ref in actor_collection:
alive = actor_ref.is_alive()
return alive
def actors_perform_command(self, actor_type, command, params=None):
LOGGER.debug("Sending {command} to all {actors}".format(
command=command, actors=actor_type))
actor_collection = getattr(self, actor_type)
for actor_ref in actor_collection:
actor_ref.tell({"msg": command, "params": params})
LOGGER.debug("Sent {command} to {actor}".format(
command=command, actor=actor_ref))
def actors_perform_command_seq(self, actor_type, command):
actor_collection = getattr(self, actor_type)
for actor_ref in actor_collection:
if actor_ref.is_alive():
actor_ref.ask({"msg": command})
| # Copyright 2016 Cisco Systems, Inc.
# All Rights R6served.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import pykka
import warnings
import yaml
from cloud99 import ACTOR_CLASSES
from cloud99.logging_setup import LOGGER_NAME
import logging
LOGGER = logging.getLogger(LOGGER_NAME)
warnings.filterwarnings("ignore")
class Cloud99(pykka.ThreadingActor):
def __init__(self, config_file):
super(Cloud99, self).__init__()
with open(config_file) as config_yam:
self.config = dict(yaml.safe_load(config_yam))
self.monitors_to_start = len(self.config.get("monitors"))
self.monitors = []
self.loaders_to_start = len(self.config.get("loaders"))
self.loaders_to_wait = 0
self.loaders = []
self.disruptors = []
self.disruptors_to_wait = len(self.config.get("disruptors"))
self.clean_run = False
def start_tests(self):
self.create_all("monitors")
self.create_all("loaders")
self.create_all("disruptors")
self.actors_perform_command("monitors", "start")
def on_receive(self, message):
LOGGER.debug("On receive: {}".format(message))
msg = message.get("msg")
if msg == "monitor_started":
self.monitors_to_start -= 1
LOGGER.debug("{count} monitor(s) left to start".format(
count=self.monitors_to_start))
if self.monitors_to_start <= 0:
self.actors_perform_command("loaders", "start",
{"times": 10000}) # TODO
elif msg == "loader_started":
# TODO better check
self.loaders_to_start -= 1
self.loaders_to_wait += 1
LOGGER.debug("{count} Loader(s) left to start".format(
count=self.loaders_to_start))
if self.loaders_to_start <= 0 and not self.clean_run:
LOGGER.debug("Starting all disruptors.")
self.actors_perform_command("disruptors", "start")
elif msg == "loader_finished":
self.loaders_to_wait -= 1
LOGGER.debug("Loader finished. Loaders to wait {count}".format(
count=self.loaders_to_wait
))
if self.loaders_to_wait <= 0:
if not self.clean_run:
self.clean_run = True
self.loaders_to_start = len(self.config.get("loaders"))
self.loaders_to_wait = 0
# This is tmp. TODO (dratushnyy) better way for params
times = message.get("times") or 10
LOGGER.debug("Starting clean run: no disruption. "
"Times is {times}".format(times=times))
self.actors_perform_command("loaders", "start",
{"times": times})
else:
self.stop()
elif msg == "disruption_finished":
self.disruptors_to_wait -= 1
LOGGER.debug("Disruptors to wait {0}".format(
self.disruptors_to_wait))
if self.disruptors_to_wait <= 0:
LOGGER.debug("Disruption is finished")
self.actors_perform_command("loaders", "stop_task")
def create_all(self, actor_type):
LOGGER.debug("Creating all {actors}.".format(actors=actor_type))
openrc = copy.deepcopy(self.config["openrc"])
inventory = copy.deepcopy(self.config["inventory"])
for actor in self.config[actor_type]:
# TODO (dratushnyy) support actor without params (list)
for actor_class, actor_params in actor.items():
try:
actor_class = ACTOR_CLASSES.get(actor_class)
actor_ref = actor_class.start(self.actor_ref, openrc,
inventory, **actor_params)
actor_collection = getattr(self, actor_type)
actor_collection.append(actor_ref)
msg = "Actor started, actor class {actor_class}, " \
"actor params {params}" \
.format(class_name=actor_class,
actor_class=actor_class, params=actor_params)
LOGGER.debug(msg)
except Exception as e:
LOGGER.exception(e)
self.stop()
LOGGER.debug("All {actors} are created.".format(actors=actor_type))
def stop(self):
self.stop_all("disruptors")
self.stop_all("loaders")
self.stop_all("monitors")
super(Cloud99, self).stop()
def stop_all(self, actor_type):
actor_collection = getattr(self, actor_type)
LOGGER.debug("Stopping all {actors}.".format(actors=actor_type))
for actor in actor_collection:
if actor.is_alive():
LOGGER.debug("Stopping {actor}".format(actor=actor))
actor.ask({"msg": "stop"})
LOGGER.debug("Stopped {actor}".format(actor=actor))
for actor in actor_collection:
if actor.is_alive():
LOGGER.error("Actor {actor} is still alive"
.format(actor=actor))
LOGGER.debug("All {actors} stopped.".format(actors=actor_type))
setattr(self, actor_type, [])
def alive(self, actor_type):
actor_collection = getattr(self, actor_type)
alive = False
for actor_ref in actor_collection:
alive = actor_ref.is_alive()
return alive
def actors_perform_command(self, actor_type, command, params=None):
LOGGER.debug("Sending {command} to all {actors}".format(
command=command, actors=actor_type))
actor_collection = getattr(self, actor_type)
for actor_ref in actor_collection:
actor_ref.tell({"msg": command, "params": params})
LOGGER.debug("Sent {command} to {actor}".format(
command=command, actor=actor_ref))
def actors_perform_command_seq(self, actor_type, command):
actor_collection = getattr(self, actor_type)
for actor_ref in actor_collection:
if actor_ref.is_alive():
actor_ref.ask({"msg": command})
| en | 0.825912 | # Copyright 2016 Cisco Systems, Inc. # All Rights R6served. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO # TODO better check # This is tmp. TODO (dratushnyy) better way for params # TODO (dratushnyy) support actor without params (list) | 1.901963 | 2 |
plywood/plugins/_break.py | colinta/plywood | 1 | 6614992 | '''
Any plugin that wants to support the ``break`` and ``continue`` clauses can
import them from here and add ``try/catch`` blocks that catch ``BreakException``
and ``ContinueException`` and use ``break`` and ``continue`` in their python
code.
'''
from plywood.env import PlywoodEnv
from plywood.exceptions import BreakException, ContinueException
@PlywoodEnv.register_fn('break')
def _break():
raise BreakException()
@PlywoodEnv.register_fn('continue')
def _continue():
raise ContinueException()
| '''
Any plugin that wants to support the ``break`` and ``continue`` clauses can
import them from here and add ``try/catch`` blocks that catch ``BreakException``
and ``ContinueException`` and use ``break`` and ``continue`` in their python
code.
'''
from plywood.env import PlywoodEnv
from plywood.exceptions import BreakException, ContinueException
@PlywoodEnv.register_fn('break')
def _break():
raise BreakException()
@PlywoodEnv.register_fn('continue')
def _continue():
raise ContinueException()
| en | 0.905817 | Any plugin that wants to support the ``break`` and ``continue`` clauses can import them from here and add ``try/catch`` blocks that catch ``BreakException`` and ``ContinueException`` and use ``break`` and ``continue`` in their python code. | 1.828375 | 2 |
src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/tests/test_sf_select.py | viananth/azure-cli | 0 | 6614993 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
import azure.cli.command_modules.sf.custom as sf_c
from azure.cli.core.util import CLIError
class SfSelectTests(unittest.TestCase):
def assert_cli_error(self, endpoint="http://derp", cert=None, key=None, pem=None, ca=None,
no_verify=False):
with self.assertRaises(CLIError):
sf_c.sf_select_verify(endpoint, cert, key, pem, ca, no_verify)
def select_nohttp_raises_cli_error_test(self):
self.assert_cli_error(endpoint="xrp://derp")
def multiple_auth_raises_cli_error_test(self):
self.assert_cli_error(cert="path_a", key="path_b", pem="path_c")
def ca_no_cert_raises_cli_error_test(self):
self.assert_cli_error(ca="path_a")
def no_verify_and_no_cert_raises_cli_error_test(self):
self.assert_cli_error(no_verify=True)
def missing_key_raises_cli_error_test(self):
self.assert_cli_error(cert="path_a")
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
import azure.cli.command_modules.sf.custom as sf_c
from azure.cli.core.util import CLIError
class SfSelectTests(unittest.TestCase):
def assert_cli_error(self, endpoint="http://derp", cert=None, key=None, pem=None, ca=None,
no_verify=False):
with self.assertRaises(CLIError):
sf_c.sf_select_verify(endpoint, cert, key, pem, ca, no_verify)
def select_nohttp_raises_cli_error_test(self):
self.assert_cli_error(endpoint="xrp://derp")
def multiple_auth_raises_cli_error_test(self):
self.assert_cli_error(cert="path_a", key="path_b", pem="path_c")
def ca_no_cert_raises_cli_error_test(self):
self.assert_cli_error(ca="path_a")
def no_verify_and_no_cert_raises_cli_error_test(self):
self.assert_cli_error(no_verify=True)
def missing_key_raises_cli_error_test(self):
self.assert_cli_error(cert="path_a")
| en | 0.42147 | # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- | 2.220491 | 2 |
speakeasy/convert.py | ayankashyap/speakeasy | 0 | 6614994 | <reponame>ayankashyap/speakeasy
from snowballstemmer import stemmer
from speakeasy.conversion_rules import (
UNIT_NUMBERS,
TENS_NUMBERS,
SCALE_NUMBERS,
CURRENCY_SYMBOLS,
QUANTIFIERS,
stem_dict,
)
UNIT_NUMBERS = stem_dict(UNIT_NUMBERS)
TENS_NUMBERS = stem_dict(TENS_NUMBERS)
SCALE_NUMBERS = stem_dict(SCALE_NUMBERS)
CURRENCY_SYMBOLS = stem_dict(CURRENCY_SYMBOLS)
QUANTIFIERS = stem_dict(QUANTIFIERS)
class SpeakEasyConvertor(object):
def __init__(self):
self.__stemmer = stemmer("english")
@staticmethod
def __get_family(wrd):
if wrd in dict(**UNIT_NUMBERS, **TENS_NUMBERS, **SCALE_NUMBERS).keys():
return 0
elif wrd in QUANTIFIERS.keys():
return 1
elif wrd in CURRENCY_SYMBOLS.keys():
return 2
elif len(wrd) == 1 and wrd.isalpha():
return 3
else:
return -1
@staticmethod
def __words_to_numbers(buffer):
reduced_buffer = []
n = len(buffer)
i = n - 1
while i >= 0:
if buffer[i][1] == "s":
cur = 1
for j in range(i, -1, -1):
cur = buffer[j][0] * cur
if buffer[j][1] != "s":
i -= i - j
break
reduced_buffer.append((cur, "s"))
else:
reduced_buffer.append(buffer[i])
i -= 1
reduced_buffer.reverse()
final_buffer = []
for i in range(len(reduced_buffer)):
cur = reduced_buffer[i]
if i > 0:
if final_buffer[0][1] == "s":
final_buffer[0] = (final_buffer[0][0] + cur[0], cur[1])
elif final_buffer[0][1] == "t":
if cur[1] == "t":
final_buffer[0] = (int(f"{final_buffer[0][0]}{cur[0]}"), cur[1])
else:
final_buffer[0] = (final_buffer[0][0] + cur[0], cur[1])
else:
final_buffer[0] = (int(f"{final_buffer[0][0]}{cur[0]}"), cur[1])
else:
final_buffer.append(cur)
return int(final_buffer[0][0])
def convert(self, text):
sentence = []
buffer = []
i = 0
text = text.split()
prev = None
while i < len(text):
wrd = self.__stemmer.stemWord(text[i].lower())
family = self.__get_family(wrd)
if family == 3 and prev not in (-1, 3) and i != 0:
family = -1
if family == -1:
if prev == 1:
if len(wrd) == 1:
sentence.append(QUANTIFIERS[buffer[0]] * text[i])
else:
sentence.extend([buffer[0][0], text[i]])
buffer.clear()
elif prev == 0:
if wrd != "and":
sentence.extend([self.__words_to_numbers(buffer), text[i]])
buffer.clear()
elif prev == 3:
sentence.append(buffer[0])
sentence.append(text[i])
buffer.clear()
else:
sentence.append(text[i])
elif family == 0:
if prev == 3:
sentence.append("".join(buffer))
buffer.clear()
if wrd in UNIT_NUMBERS:
n = UNIT_NUMBERS[wrd]
buffer.append((n, "u"))
elif wrd in TENS_NUMBERS:
n = TENS_NUMBERS[wrd]
buffer.append((n, "t"))
else:
n = SCALE_NUMBERS[wrd]
buffer.append((n, "s"))
elif family == 1:
if prev == 3:
sentence.append("".join(buffer))
buffer.clear()
if prev == 0:
sentence.append(self.__words_to_numbers(buffer))
elif prev == 1:
sentence.append(buffer[0])
buffer.clear()
buffer.append(wrd)
elif family == 2:
if prev == 3:
sentence.append("".join(buffer))
buffer.clear()
if prev == 0:
if wrd == "cent":
sentence.append(
f"{self.__words_to_numbers(buffer)}{CURRENCY_SYMBOLS[wrd]}"
)
else:
sentence.append(
f"{CURRENCY_SYMBOLS[wrd]}{self.__words_to_numbers(buffer)}"
)
buffer.clear()
else:
sentence.append(text[i])
elif family == 3:
buffer.append(text[i])
prev = family
i += 1
if len(buffer) > 0:
if prev == 0:
sentence.append(self.__words_to_numbers(buffer))
elif prev == 1:
sentence.append(text[i - 1])
elif prev == 3:
sentence.append("".join(buffer))
return " ".join(map(str, sentence))
| from snowballstemmer import stemmer
from speakeasy.conversion_rules import (
UNIT_NUMBERS,
TENS_NUMBERS,
SCALE_NUMBERS,
CURRENCY_SYMBOLS,
QUANTIFIERS,
stem_dict,
)
UNIT_NUMBERS = stem_dict(UNIT_NUMBERS)
TENS_NUMBERS = stem_dict(TENS_NUMBERS)
SCALE_NUMBERS = stem_dict(SCALE_NUMBERS)
CURRENCY_SYMBOLS = stem_dict(CURRENCY_SYMBOLS)
QUANTIFIERS = stem_dict(QUANTIFIERS)
class SpeakEasyConvertor(object):
def __init__(self):
self.__stemmer = stemmer("english")
@staticmethod
def __get_family(wrd):
if wrd in dict(**UNIT_NUMBERS, **TENS_NUMBERS, **SCALE_NUMBERS).keys():
return 0
elif wrd in QUANTIFIERS.keys():
return 1
elif wrd in CURRENCY_SYMBOLS.keys():
return 2
elif len(wrd) == 1 and wrd.isalpha():
return 3
else:
return -1
@staticmethod
def __words_to_numbers(buffer):
reduced_buffer = []
n = len(buffer)
i = n - 1
while i >= 0:
if buffer[i][1] == "s":
cur = 1
for j in range(i, -1, -1):
cur = buffer[j][0] * cur
if buffer[j][1] != "s":
i -= i - j
break
reduced_buffer.append((cur, "s"))
else:
reduced_buffer.append(buffer[i])
i -= 1
reduced_buffer.reverse()
final_buffer = []
for i in range(len(reduced_buffer)):
cur = reduced_buffer[i]
if i > 0:
if final_buffer[0][1] == "s":
final_buffer[0] = (final_buffer[0][0] + cur[0], cur[1])
elif final_buffer[0][1] == "t":
if cur[1] == "t":
final_buffer[0] = (int(f"{final_buffer[0][0]}{cur[0]}"), cur[1])
else:
final_buffer[0] = (final_buffer[0][0] + cur[0], cur[1])
else:
final_buffer[0] = (int(f"{final_buffer[0][0]}{cur[0]}"), cur[1])
else:
final_buffer.append(cur)
return int(final_buffer[0][0])
def convert(self, text):
sentence = []
buffer = []
i = 0
text = text.split()
prev = None
while i < len(text):
wrd = self.__stemmer.stemWord(text[i].lower())
family = self.__get_family(wrd)
if family == 3 and prev not in (-1, 3) and i != 0:
family = -1
if family == -1:
if prev == 1:
if len(wrd) == 1:
sentence.append(QUANTIFIERS[buffer[0]] * text[i])
else:
sentence.extend([buffer[0][0], text[i]])
buffer.clear()
elif prev == 0:
if wrd != "and":
sentence.extend([self.__words_to_numbers(buffer), text[i]])
buffer.clear()
elif prev == 3:
sentence.append(buffer[0])
sentence.append(text[i])
buffer.clear()
else:
sentence.append(text[i])
elif family == 0:
if prev == 3:
sentence.append("".join(buffer))
buffer.clear()
if wrd in UNIT_NUMBERS:
n = UNIT_NUMBERS[wrd]
buffer.append((n, "u"))
elif wrd in TENS_NUMBERS:
n = TENS_NUMBERS[wrd]
buffer.append((n, "t"))
else:
n = SCALE_NUMBERS[wrd]
buffer.append((n, "s"))
elif family == 1:
if prev == 3:
sentence.append("".join(buffer))
buffer.clear()
if prev == 0:
sentence.append(self.__words_to_numbers(buffer))
elif prev == 1:
sentence.append(buffer[0])
buffer.clear()
buffer.append(wrd)
elif family == 2:
if prev == 3:
sentence.append("".join(buffer))
buffer.clear()
if prev == 0:
if wrd == "cent":
sentence.append(
f"{self.__words_to_numbers(buffer)}{CURRENCY_SYMBOLS[wrd]}"
)
else:
sentence.append(
f"{CURRENCY_SYMBOLS[wrd]}{self.__words_to_numbers(buffer)}"
)
buffer.clear()
else:
sentence.append(text[i])
elif family == 3:
buffer.append(text[i])
prev = family
i += 1
if len(buffer) > 0:
if prev == 0:
sentence.append(self.__words_to_numbers(buffer))
elif prev == 1:
sentence.append(text[i - 1])
elif prev == 3:
sentence.append("".join(buffer))
return " ".join(map(str, sentence)) | none | 1 | 2.547375 | 3 | |
dupfinder/__main__.py | joseph-iussa/dupfinder | 0 | 6614995 | <reponame>joseph-iussa/dupfinder
from dupfinder.commandline import run
run() | from dupfinder.commandline import run
run() | none | 1 | 1.173482 | 1 | |
pygempick/modeling.py | jmarsil/pygempick | 1 | 6614996 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 17:01:04 2018
@author: joseph
"""
import numpy as np
import cv2
import random
import pandas as pd
import scipy.optimize as opt
import matplotlib.pyplot as plt
#import pygempick module(s)
import pygempick.core as core
import pygempick.spatialstats as spa
def draw(n, test_number, noise, images):
'''
function to draws test micrograph sets that will be used in subsequent
efficiency or separation tests.
1. Test number 1 is draw only circles, 2 is draw both circles and ellipses.
2. Noise if == 'yes' then, randomly distibuted gaussian noise will be drawn
according to mu1, sig1.
3. images are the number of images in the set - used with n which is number of
particles detected in the actual set to calulate the particle density of model
set.
'''
row = 776 #image height
col = 1018 #image width
radrange = np.arange(4,8,1)
mu = n/images #mean particle number across your images
sigma = np.sqrt(mu) #standard deviation of the mean from your data
##creates a new normal distribution based on your data (particles,images)
pick = np.random.normal(mu,sigma)
#height = np.arange(26,750) ##array of possible particle heights
#width = np.arange(26,992) ##array of possible particle widths
height = 750
width = 990
count = 0
circles = 0
elipses = 0
#mu1 = .05
#sig1 = .02
image = 255*np.ones((row,col), np.float32)
##convert to BGR
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if noise == 'yes':
mu1 = input('Input mean of Gaussian Distributed Noise')
sig1 = input('Input std of Gaussian Distributed Noise')
##adding random gaussian distributed noise to image...
for q in range(row):
for w in range(col):
image[q][w] = np.float32(np.int(255*np.random.normal(mu1,sig1)))
##change this value for high variability in background conditions..
if test_number == 1:
for j in range(np.int(pick)):
count+=1
##picks a random particle radius between 4 and 8 pixels
r = random.choice(radrange)
##chooses a random center position for the circle
#h = random.choice(height)
#w = random.choice(width)
w = np.random.uniform(20,width)
h = np.random.uniform(20,height)
#w = np.int(col*np.random.rand()) #first method used to choose random width/height...
##ensure that no particles are drawn on the edges of the image
##figure out how to void borders...
##draw a black circle
cv2.circle(image,(h,w), np.int(r), (0,0,0), -1)
image = (image).astype('uint8')
print('Complete')
return image, count
elif test_number == 2:
q = np.int(pick)
count = 0
while count <= q:
##picks a random particle radius between 4 and 8 pixels
axis = random.choice(radrange)
#N = width * height / 4
##chooses a random center position for the circle
w = np.int(np.random.uniform(20,width))
h = np.int(np.random.uniform(20,height))
##bernouli trial to draw either circle or elippse...
flip = np.random.rand()
if flip < 0.5:
#draw a circle
cv2.circle(image,(h,w), np.int(axis), (0,0,0), -1)
circles +=1
else:
#draw an elippse...
elipses += 1
cv2.ellipse(image,(h,w),(int(axis)*2,int(axis)),0,0,360,(0,0,0),-1)
count += 1
count = circles + elipses
image = (image).astype('uint8')
return image, int(circles), int(elipses)
def imgclass(inv_img):
'''
Uses a compressed grayscale image from cvt_color(RGB2GRAY) and returns
the intensity histogram and related bins position w/ im_class.
Can optimize this function to a greater extent.
Recieves following input from:
gray_img = cv2.cvtColor(orig_img, cv2.COLOR_RGB2GRAY)
'''
##can edit to make a histogram from of the pixle image intensities of the image...
hist, bins = np.histogram(inv_img.flatten(),256,[0,256])
#bincenters = 0.5*(bins[1:]+bins[:-1])
##apending max histogram intensities into a list
histx = np.argmax(hist)
if histx < 110:
im_class = 1
elif 110 <= histx < 120:
im_class = 2
elif 120 <= histx < 125:
im_class = 3
elif 125 <= histx < 130:
im_class= 4
elif 130 <= histx < 135:
im_class= 5
elif 135 <= histx < 140:
im_class= 6
elif 140 <= histx < 145:
im_class= 7
elif 145 <= histx < 150:
im_class= 8
elif 150 <= histx < 160:
im_class= 9
elif histx >= 160:
im_class= 10
return im_class, histx
def septest(p,image):
'''
let p be a range of integers ranging from [1, x], for the publication x
is set to 31
let image be a grayscale image produced after original image compression and
conversion to grayscale using OpenCv's function
image = gray_img = cv2.cvtColor(orig_img, cv2.COLOR_RGB2GRAY)
'''
detected_bin = np.zeros(len(p))
detected_lap = np.zeros(len(p))
detected_dog = np.zeros(len(p))
detected_log = np.zeros(len(p))
#the background conditions of various image sets will varry -
#go back and plot
for i in range(len(p)):
#same scaling factor as used by SIFT on the simple scale
output_bin, _ = core.bin_filt(p[i], image)
output_lap = core.hclap_filt(p[i],image, 'no')
output_dog = core.dog_filt(p[i],image)
output_log = core.hlog_filt(p[i], image, 'no')
keypoints_bin = core.pick(output_bin, 31, .83, .61 , .61, 0)
keypoints_lap = core.pick(output_lap, 31, .83, .61 , .61, 0)
keypoints_dog = core.pick(output_dog, 31, .83, .61 , .61, 0)
keypoints_log = core.pick(output_log, 31, .83, .61 , .61, 0)
if len(keypoints_lap) > 0:
detected_lap[i] = len(keypoints_lap)
else:
detected_lap[i] = 0
if len(keypoints_dog) > 0:
detected_dog[i] = len(keypoints_dog)
else:
detected_dog[i] = 0
if len(keypoints_bin)>0:
detected_bin[i] = len(keypoints_bin)
else:
detected_bin[i] = 0
if len(keypoints_log)>0:
detected_log[i] = len(keypoints_log)
else:
detected_log[i] = 0
#returns an array of the number of particles detected per filtering method...
#took out detected_dog for a more in depth test...
return detected_bin, detected_lap, detected_dog, detected_log
def septest2(p, image, hlogkey):
'''
let p be a range of integers ranging from [1, x], for the publication x
is set to 31
let image be a grayscale image produced after original image compression and
conversion to grayscale using OpenCv's function
hlogkey the keypoints of detected image fitered with HLOG filter - this ensures
faster particle detection since we aren't running the same filtering step more
than once!
'''
count = np.zeros(len(p))
duplicates = np.zeros(len(p))
keypoints2 = hlogkey
for i in range(len(p)):
output1 = core.hclap_filt(p[i], image, 'no')
keypoints1 = core.pick(output1, 31, .83, .5, .5, 0)
keypoints1, dup = core.key_filt(keypoints1, keypoints2)
if len(keypoints1) != 0 and len(keypoints2) ==0:
count[i] = len(keypoints1)
elif len(keypoints1) != 0 and len(keypoints2) !=0:
count[i] = len(keypoints1) + len(keypoints2)
elif len(keypoints1) == 0 and len(keypoints2) !=0:
count[i] = len(keypoints2)
else:
count[i] = 0
duplicates[i] = dup
return count, duplicates
def fitpcfs(data):
'''
data1 = pd.read_csv('/home/joseph/Documents/PHY479/pcf-dr5-error.csv', header=None, skiprows=1)
Function initially created to plot graphs from V30M and CD1 positve controls ()
please add modifications and change to suit your needs.
**Note: pcf-dr5-error.csv is a file outputted from keypoints2pcf()
look to that function to see how that output is formatted.
Output : built to produce one graph, with fitted curve for positive control(s).
Equation fitted to probability distribution for Complete Spatial Randomness of
the distribution of IGEM particles across EM micrographs.
'''
data = pd.DataFrame(data)
data = data.fillna(0)
#determine guess filtering parameters
pcfp1 = np.array([100.,1.,1.])
pcfp2 = np.array([10.,1., 1.])
x = data[2].values
y = data[0].values
dy = data[1].values
x1 = data[5].values
y1 = data[3].values
dy1 = data[4].values
popt1, pcov1 = opt.curve_fit(spa.pcf , x, y, p0 = pcfp1)
popt2, pcov2 = opt.curve_fit(spa.pcf , x1, y1, p0 = pcfp2)
popt1 = np.around(popt1, decimals=2)
popt2 = np.around(popt2, decimals=2)
#The probability of locating the N t h {\displaystyle N^{\mathrm {th} }}
#N^{{{\mathrm {th}}}} neighbor of any given point, at some radial distance r
#{\displaystyle r} r is:
plt.figure()
plt.title('Probability of Gold Particle Colocolization on TTR micrographs' )
#CSR of CD1 Micgrgrap set
plt.plot(x,y,'xr') #keypoints of CD1 micrographs
plt.plot(np.arange(0,110,1), spa.pcf(np.arange(0,110,1), popt1[0], popt1[1], popt1[2]),
'r-', label='CD1 CSR, N = {} +/- {}, L = {} +/- {}'.format(popt1[0],
np.around(np.sqrt(pcov1[0,0]), decimals=3),
popt1[1], np.around(np.sqrt(pcov1[1,1]), decimals=3)))
plt.errorbar(x, y, yerr=dy, fmt='xr')
plt.plot(x1,y1, 'og') ##keypoints of V30M micrographs
plt.plot(np.arange(0,110,1), spa.pcf(np.arange(0,110,1), popt2[0], popt2[1], popt2[2]),
'g-', label='V30M CSR, N = {} +/- {}, L = {} +/- {}'.format(popt2[0],
np.around(np.sqrt(pcov2[0,0]), decimals=3),
popt2[1], np.around(np.sqrt(pcov2[1,1]), decimals=3)))
plt.errorbar(x1, y1, yerr=dy1, fmt='og')
plt.legend(loc='best')
plt.grid(True)
plt.xlabel('Radius (r)')
#Probability Nth point at distance r
plt.ylabel('P(r)')
def fitpcf(data, N, p0, p1):
data = pd.DataFrame(data)
data = data.fillna(0)
#determine guess filtering parameters
pcfp1 = np.array([N,p0,p1])
x = data[2].values
y = data[0].values
dy = data[1].values
popt1, pcov1 = opt.curve_fit(spa.pcf , x, y, p0 = pcfp1)
popt1 = np.around(popt1, decimals=2)
plt.figure()
plt.title('Probability of Gold Particle Colocolization on TTR micrographs' )
#CSR of CD1 Micgrgrap set
plt.plot(x,y,'xr') #keypoints of CD1 micrographs
plt.plot(np.arange(0,210,1), spa.pcf(np.arange(0,210,1), popt1[0], popt1[1], popt1[2]),
'g-', label='V30M CSR, N = {} +/- {}, L = {} +/- {}'.format(popt1[0],
np.around(np.sqrt(pcov1[0,0]), decimals=3),
popt1[1], np.around(np.sqrt(pcov1[1,1]), decimals=3)))
plt.errorbar(x, y, yerr=dy, fmt='og')
plt.legend(loc='best')
plt.grid(True)
plt.xlabel('Radius (r)')
#Probability Nth point at distance r
plt.ylabel('P(r)')
plt.show()
return popt1, np.around(np.sqrt(pcov1), decimals=3)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 17:01:04 2018
@author: joseph
"""
import numpy as np
import cv2
import random
import pandas as pd
import scipy.optimize as opt
import matplotlib.pyplot as plt
#import pygempick module(s)
import pygempick.core as core
import pygempick.spatialstats as spa
def draw(n, test_number, noise, images):
'''
function to draws test micrograph sets that will be used in subsequent
efficiency or separation tests.
1. Test number 1 is draw only circles, 2 is draw both circles and ellipses.
2. Noise if == 'yes' then, randomly distibuted gaussian noise will be drawn
according to mu1, sig1.
3. images are the number of images in the set - used with n which is number of
particles detected in the actual set to calulate the particle density of model
set.
'''
row = 776 #image height
col = 1018 #image width
radrange = np.arange(4,8,1)
mu = n/images #mean particle number across your images
sigma = np.sqrt(mu) #standard deviation of the mean from your data
##creates a new normal distribution based on your data (particles,images)
pick = np.random.normal(mu,sigma)
#height = np.arange(26,750) ##array of possible particle heights
#width = np.arange(26,992) ##array of possible particle widths
height = 750
width = 990
count = 0
circles = 0
elipses = 0
#mu1 = .05
#sig1 = .02
image = 255*np.ones((row,col), np.float32)
##convert to BGR
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if noise == 'yes':
mu1 = input('Input mean of Gaussian Distributed Noise')
sig1 = input('Input std of Gaussian Distributed Noise')
##adding random gaussian distributed noise to image...
for q in range(row):
for w in range(col):
image[q][w] = np.float32(np.int(255*np.random.normal(mu1,sig1)))
##change this value for high variability in background conditions..
if test_number == 1:
for j in range(np.int(pick)):
count+=1
##picks a random particle radius between 4 and 8 pixels
r = random.choice(radrange)
##chooses a random center position for the circle
#h = random.choice(height)
#w = random.choice(width)
w = np.random.uniform(20,width)
h = np.random.uniform(20,height)
#w = np.int(col*np.random.rand()) #first method used to choose random width/height...
##ensure that no particles are drawn on the edges of the image
##figure out how to void borders...
##draw a black circle
cv2.circle(image,(h,w), np.int(r), (0,0,0), -1)
image = (image).astype('uint8')
print('Complete')
return image, count
elif test_number == 2:
q = np.int(pick)
count = 0
while count <= q:
##picks a random particle radius between 4 and 8 pixels
axis = random.choice(radrange)
#N = width * height / 4
##chooses a random center position for the circle
w = np.int(np.random.uniform(20,width))
h = np.int(np.random.uniform(20,height))
##bernouli trial to draw either circle or elippse...
flip = np.random.rand()
if flip < 0.5:
#draw a circle
cv2.circle(image,(h,w), np.int(axis), (0,0,0), -1)
circles +=1
else:
#draw an elippse...
elipses += 1
cv2.ellipse(image,(h,w),(int(axis)*2,int(axis)),0,0,360,(0,0,0),-1)
count += 1
count = circles + elipses
image = (image).astype('uint8')
return image, int(circles), int(elipses)
def imgclass(inv_img):
'''
Uses a compressed grayscale image from cvt_color(RGB2GRAY) and returns
the intensity histogram and related bins position w/ im_class.
Can optimize this function to a greater extent.
Recieves following input from:
gray_img = cv2.cvtColor(orig_img, cv2.COLOR_RGB2GRAY)
'''
##can edit to make a histogram from of the pixle image intensities of the image...
hist, bins = np.histogram(inv_img.flatten(),256,[0,256])
#bincenters = 0.5*(bins[1:]+bins[:-1])
##apending max histogram intensities into a list
histx = np.argmax(hist)
if histx < 110:
im_class = 1
elif 110 <= histx < 120:
im_class = 2
elif 120 <= histx < 125:
im_class = 3
elif 125 <= histx < 130:
im_class= 4
elif 130 <= histx < 135:
im_class= 5
elif 135 <= histx < 140:
im_class= 6
elif 140 <= histx < 145:
im_class= 7
elif 145 <= histx < 150:
im_class= 8
elif 150 <= histx < 160:
im_class= 9
elif histx >= 160:
im_class= 10
return im_class, histx
def septest(p,image):
'''
let p be a range of integers ranging from [1, x], for the publication x
is set to 31
let image be a grayscale image produced after original image compression and
conversion to grayscale using OpenCv's function
image = gray_img = cv2.cvtColor(orig_img, cv2.COLOR_RGB2GRAY)
'''
detected_bin = np.zeros(len(p))
detected_lap = np.zeros(len(p))
detected_dog = np.zeros(len(p))
detected_log = np.zeros(len(p))
#the background conditions of various image sets will varry -
#go back and plot
for i in range(len(p)):
#same scaling factor as used by SIFT on the simple scale
output_bin, _ = core.bin_filt(p[i], image)
output_lap = core.hclap_filt(p[i],image, 'no')
output_dog = core.dog_filt(p[i],image)
output_log = core.hlog_filt(p[i], image, 'no')
keypoints_bin = core.pick(output_bin, 31, .83, .61 , .61, 0)
keypoints_lap = core.pick(output_lap, 31, .83, .61 , .61, 0)
keypoints_dog = core.pick(output_dog, 31, .83, .61 , .61, 0)
keypoints_log = core.pick(output_log, 31, .83, .61 , .61, 0)
if len(keypoints_lap) > 0:
detected_lap[i] = len(keypoints_lap)
else:
detected_lap[i] = 0
if len(keypoints_dog) > 0:
detected_dog[i] = len(keypoints_dog)
else:
detected_dog[i] = 0
if len(keypoints_bin)>0:
detected_bin[i] = len(keypoints_bin)
else:
detected_bin[i] = 0
if len(keypoints_log)>0:
detected_log[i] = len(keypoints_log)
else:
detected_log[i] = 0
#returns an array of the number of particles detected per filtering method...
#took out detected_dog for a more in depth test...
return detected_bin, detected_lap, detected_dog, detected_log
def septest2(p, image, hlogkey):
'''
let p be a range of integers ranging from [1, x], for the publication x
is set to 31
let image be a grayscale image produced after original image compression and
conversion to grayscale using OpenCv's function
hlogkey the keypoints of detected image fitered with HLOG filter - this ensures
faster particle detection since we aren't running the same filtering step more
than once!
'''
count = np.zeros(len(p))
duplicates = np.zeros(len(p))
keypoints2 = hlogkey
for i in range(len(p)):
output1 = core.hclap_filt(p[i], image, 'no')
keypoints1 = core.pick(output1, 31, .83, .5, .5, 0)
keypoints1, dup = core.key_filt(keypoints1, keypoints2)
if len(keypoints1) != 0 and len(keypoints2) ==0:
count[i] = len(keypoints1)
elif len(keypoints1) != 0 and len(keypoints2) !=0:
count[i] = len(keypoints1) + len(keypoints2)
elif len(keypoints1) == 0 and len(keypoints2) !=0:
count[i] = len(keypoints2)
else:
count[i] = 0
duplicates[i] = dup
return count, duplicates
def fitpcfs(data):
'''
data1 = pd.read_csv('/home/joseph/Documents/PHY479/pcf-dr5-error.csv', header=None, skiprows=1)
Function initially created to plot graphs from V30M and CD1 positve controls ()
please add modifications and change to suit your needs.
**Note: pcf-dr5-error.csv is a file outputted from keypoints2pcf()
look to that function to see how that output is formatted.
Output : built to produce one graph, with fitted curve for positive control(s).
Equation fitted to probability distribution for Complete Spatial Randomness of
the distribution of IGEM particles across EM micrographs.
'''
data = pd.DataFrame(data)
data = data.fillna(0)
#determine guess filtering parameters
pcfp1 = np.array([100.,1.,1.])
pcfp2 = np.array([10.,1., 1.])
x = data[2].values
y = data[0].values
dy = data[1].values
x1 = data[5].values
y1 = data[3].values
dy1 = data[4].values
popt1, pcov1 = opt.curve_fit(spa.pcf , x, y, p0 = pcfp1)
popt2, pcov2 = opt.curve_fit(spa.pcf , x1, y1, p0 = pcfp2)
popt1 = np.around(popt1, decimals=2)
popt2 = np.around(popt2, decimals=2)
#The probability of locating the N t h {\displaystyle N^{\mathrm {th} }}
#N^{{{\mathrm {th}}}} neighbor of any given point, at some radial distance r
#{\displaystyle r} r is:
plt.figure()
plt.title('Probability of Gold Particle Colocolization on TTR micrographs' )
#CSR of CD1 Micgrgrap set
plt.plot(x,y,'xr') #keypoints of CD1 micrographs
plt.plot(np.arange(0,110,1), spa.pcf(np.arange(0,110,1), popt1[0], popt1[1], popt1[2]),
'r-', label='CD1 CSR, N = {} +/- {}, L = {} +/- {}'.format(popt1[0],
np.around(np.sqrt(pcov1[0,0]), decimals=3),
popt1[1], np.around(np.sqrt(pcov1[1,1]), decimals=3)))
plt.errorbar(x, y, yerr=dy, fmt='xr')
plt.plot(x1,y1, 'og') ##keypoints of V30M micrographs
plt.plot(np.arange(0,110,1), spa.pcf(np.arange(0,110,1), popt2[0], popt2[1], popt2[2]),
'g-', label='V30M CSR, N = {} +/- {}, L = {} +/- {}'.format(popt2[0],
np.around(np.sqrt(pcov2[0,0]), decimals=3),
popt2[1], np.around(np.sqrt(pcov2[1,1]), decimals=3)))
plt.errorbar(x1, y1, yerr=dy1, fmt='og')
plt.legend(loc='best')
plt.grid(True)
plt.xlabel('Radius (r)')
#Probability Nth point at distance r
plt.ylabel('P(r)')
def fitpcf(data, N, p0, p1):
data = pd.DataFrame(data)
data = data.fillna(0)
#determine guess filtering parameters
pcfp1 = np.array([N,p0,p1])
x = data[2].values
y = data[0].values
dy = data[1].values
popt1, pcov1 = opt.curve_fit(spa.pcf , x, y, p0 = pcfp1)
popt1 = np.around(popt1, decimals=2)
plt.figure()
plt.title('Probability of Gold Particle Colocolization on TTR micrographs' )
#CSR of CD1 Micgrgrap set
plt.plot(x,y,'xr') #keypoints of CD1 micrographs
plt.plot(np.arange(0,210,1), spa.pcf(np.arange(0,210,1), popt1[0], popt1[1], popt1[2]),
'g-', label='V30M CSR, N = {} +/- {}, L = {} +/- {}'.format(popt1[0],
np.around(np.sqrt(pcov1[0,0]), decimals=3),
popt1[1], np.around(np.sqrt(pcov1[1,1]), decimals=3)))
plt.errorbar(x, y, yerr=dy, fmt='og')
plt.legend(loc='best')
plt.grid(True)
plt.xlabel('Radius (r)')
#Probability Nth point at distance r
plt.ylabel('P(r)')
plt.show()
return popt1, np.around(np.sqrt(pcov1), decimals=3) | en | 0.76627 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Wed May 9 17:01:04 2018 @author: joseph #import pygempick module(s) function to draws test micrograph sets that will be used in subsequent efficiency or separation tests. 1. Test number 1 is draw only circles, 2 is draw both circles and ellipses. 2. Noise if == 'yes' then, randomly distibuted gaussian noise will be drawn according to mu1, sig1. 3. images are the number of images in the set - used with n which is number of particles detected in the actual set to calulate the particle density of model set. #image height #image width #mean particle number across your images #standard deviation of the mean from your data ##creates a new normal distribution based on your data (particles,images) #height = np.arange(26,750) ##array of possible particle heights #width = np.arange(26,992) ##array of possible particle widths #mu1 = .05 #sig1 = .02 ##convert to BGR ##adding random gaussian distributed noise to image... ##change this value for high variability in background conditions.. ##picks a random particle radius between 4 and 8 pixels ##chooses a random center position for the circle #h = random.choice(height) #w = random.choice(width) #w = np.int(col*np.random.rand()) #first method used to choose random width/height... ##ensure that no particles are drawn on the edges of the image ##figure out how to void borders... ##draw a black circle ##picks a random particle radius between 4 and 8 pixels #N = width * height / 4 ##chooses a random center position for the circle ##bernouli trial to draw either circle or elippse... #draw a circle #draw an elippse... Uses a compressed grayscale image from cvt_color(RGB2GRAY) and returns the intensity histogram and related bins position w/ im_class. Can optimize this function to a greater extent. Recieves following input from: gray_img = cv2.cvtColor(orig_img, cv2.COLOR_RGB2GRAY) ##can edit to make a histogram from of the pixle image intensities of the image... #bincenters = 0.5*(bins[1:]+bins[:-1]) ##apending max histogram intensities into a list let p be a range of integers ranging from [1, x], for the publication x is set to 31 let image be a grayscale image produced after original image compression and conversion to grayscale using OpenCv's function image = gray_img = cv2.cvtColor(orig_img, cv2.COLOR_RGB2GRAY) #the background conditions of various image sets will varry - #go back and plot #same scaling factor as used by SIFT on the simple scale #returns an array of the number of particles detected per filtering method... #took out detected_dog for a more in depth test... let p be a range of integers ranging from [1, x], for the publication x is set to 31 let image be a grayscale image produced after original image compression and conversion to grayscale using OpenCv's function hlogkey the keypoints of detected image fitered with HLOG filter - this ensures faster particle detection since we aren't running the same filtering step more than once! data1 = pd.read_csv('/home/joseph/Documents/PHY479/pcf-dr5-error.csv', header=None, skiprows=1) Function initially created to plot graphs from V30M and CD1 positve controls () please add modifications and change to suit your needs. **Note: pcf-dr5-error.csv is a file outputted from keypoints2pcf() look to that function to see how that output is formatted. Output : built to produce one graph, with fitted curve for positive control(s). Equation fitted to probability distribution for Complete Spatial Randomness of the distribution of IGEM particles across EM micrographs. #determine guess filtering parameters #The probability of locating the N t h {\displaystyle N^{\mathrm {th} }} #N^{{{\mathrm {th}}}} neighbor of any given point, at some radial distance r #{\displaystyle r} r is: #CSR of CD1 Micgrgrap set #keypoints of CD1 micrographs ##keypoints of V30M micrographs #Probability Nth point at distance r #determine guess filtering parameters #CSR of CD1 Micgrgrap set #keypoints of CD1 micrographs #Probability Nth point at distance r | 3.274576 | 3 |
ctree/cilk/dotgen.py | zanellia/ctree | 16 | 6614997 | """
DOT labeller for Cilk.
"""
from ctree.dotgen import DotGenLabeller
# ---------------------------------------------------------------------------
# DOT labeller
class CilkDotLabeller(DotGenLabeller):
"""
Visitor to label DOT nodes.
"""
pass
| """
DOT labeller for Cilk.
"""
from ctree.dotgen import DotGenLabeller
# ---------------------------------------------------------------------------
# DOT labeller
class CilkDotLabeller(DotGenLabeller):
"""
Visitor to label DOT nodes.
"""
pass
| en | 0.302977 | DOT labeller for Cilk. # --------------------------------------------------------------------------- # DOT labeller Visitor to label DOT nodes. | 1.504252 | 2 |
Pages/checkout_page.py | tienn321/saucedemo | 0 | 6614998 | from Locators.checkout_locators import CheckoutLocators
from Objects.product import Product
from Pages.base_page import BasePage
from Utils.utilities import Utilities
class CheckoutPage(BasePage):
def __init__(self, driver):
super().__init__(driver)
def get_product_info(self, index):
name = self.get_text(CheckoutLocators.LABEL_PRODUCT_NAME(index))
desc = self.get_text(CheckoutLocators.LABEL_PRODUCT_DESC(index))
price = self.get_text(CheckoutLocators.LABEL_PRODUCT_PRICE(index))
return Product(name, desc, price)
def click_cancel_button(self):
self.click(CheckoutLocators.BUTTON_CANCEL)
def click_finish_button(self):
self.click(CheckoutLocators.BUTTON_FINISH)
# def calculate_total_price(self):
def get_item_price(self):
item_total = Utilities.get_number(self.get_text(CheckoutLocators.LABEL_ITEM_TOTAL))
return float(item_total)
def get_tax(self):
tax = Utilities.get_number(self.get_text(CheckoutLocators.LABEL_TAX))
return float(tax)
def get_total(self):
total = Utilities.get_number(self.get_text(CheckoutLocators.LABEL_TOTAL))
return float(total)
def calculate_the_price(item_price, tax, total):
correct = False
tax_rate = 0.08
calculated_tax = round(item_price * tax_rate, 2)
if (tax == calculated_tax) and (total == calculated_tax + item_price):
correct = True
return correct
| from Locators.checkout_locators import CheckoutLocators
from Objects.product import Product
from Pages.base_page import BasePage
from Utils.utilities import Utilities
class CheckoutPage(BasePage):
def __init__(self, driver):
super().__init__(driver)
def get_product_info(self, index):
name = self.get_text(CheckoutLocators.LABEL_PRODUCT_NAME(index))
desc = self.get_text(CheckoutLocators.LABEL_PRODUCT_DESC(index))
price = self.get_text(CheckoutLocators.LABEL_PRODUCT_PRICE(index))
return Product(name, desc, price)
def click_cancel_button(self):
self.click(CheckoutLocators.BUTTON_CANCEL)
def click_finish_button(self):
self.click(CheckoutLocators.BUTTON_FINISH)
# def calculate_total_price(self):
def get_item_price(self):
item_total = Utilities.get_number(self.get_text(CheckoutLocators.LABEL_ITEM_TOTAL))
return float(item_total)
def get_tax(self):
tax = Utilities.get_number(self.get_text(CheckoutLocators.LABEL_TAX))
return float(tax)
def get_total(self):
total = Utilities.get_number(self.get_text(CheckoutLocators.LABEL_TOTAL))
return float(total)
def calculate_the_price(item_price, tax, total):
correct = False
tax_rate = 0.08
calculated_tax = round(item_price * tax_rate, 2)
if (tax == calculated_tax) and (total == calculated_tax + item_price):
correct = True
return correct
| en | 0.398645 | # def calculate_total_price(self): | 2.560436 | 3 |
raspi/robot.py | CalPolyUROV/UROV2019 | 4 | 6614999 | # """ Code that runs on the Raspberry Pi inside the robot
# This is the python program meant to run on the Raspberry Pi 3B+ located on
# the robot. This program acts as a intermediary between the Raspberry Pi on
# the surface unit and the Arduino/Teensy on the robot. The scheduling module
# used in this program manages the serial and sockets connections to the
# Arduino/Teensy and topside raspberry Pi respectively.
# """
# import settings
# from internal_temp import IntTempMon
# from robot_controls import ControlsProcessor
# from snr.comms.serial.serial_coms import SerialConnection
# from snr.comms.sockets.client import SocketsClient
# from snr.comms.sockets.server import SocketsServer
# from snr.node import Node
# from snr.task import SomeTasks, Task, TaskPriority, TaskType
# from snr.utils.utils import debug_delay
# class Robot(Node):
# def __init__(self, mode: str):
# super().__init__(self.handle_task, self.get_new_tasks)
# self.mode = mode
# self.controls_processor = ControlsProcessor(self.profiler)
# if settings.USE_CONTROLS_SOCKETS:
# debug("sockets", "Using sockets as enabled in settings")
# if self.mode.__eq__("debug"):
# debug("robot", "Running in debug mode: server IP is localhost")
# settings.CONTROLS_SOCKETS_CONFIG.ip = "localhost"
# # Make sockets client object using our implementation
# self.socket_connection = SocketsClient(
# settings.CONTROLS_SOCKETS_CONFIG,
# self.schedule_task)
# if settings.USE_TELEMETRY_SOCKETS:
# # Start sockets server endpoint
# if mode.__eq__("debug"):
# settings.TELEMETRY_SOCKETS_CONFIG.ip = "localhost"
# self.telemetry_server = SocketsServer(
# settings.TELEMETRY_SOCKETS_CONFIG,
# self.serve_telemetry_data,
# self.profiler)
# if settings.USE_ROBOT_PI_TEMP_MON:
# self.temp_mon = IntTempMon(
# settings.ROBOT_INT_TEMP_NAME,
# self.store_int_temp_data,
# self.profiler)
# def handle_task(self, t: Task) -> SomeTasks:
# debug("execute_task_verbose", "Executing task: {}", [t])
# sched_list = []
# # Read sensor data
# if t.task_type == TaskType.get_telemetry:
# debug("execute_task", "Executing task: {}", [t.val_list])
# # TODO: Read sensor values from serial and store in datastore
# data = {}
# data["throttle_data"] = self.controls_processor.throttle
# data["motor_data"] = self.controls_processor.motor_control.\
# motor_values
# data["current_camera"] = self.controls_processor.cameras.\
# current_camera
# data["int_temp_data"] = self.get_data(settings.ROBOT_INT_TEMP_NAME)
# self.store_data(settings.TELEMETRY_DATA_NAME, data)
# # Send serial data
# # Debug string command
# elif t.task_type == TaskType.debug_str:
# debug("execute_task", "Executing task: {}", t.val_list)
# # Terminate robot
# elif t.task_type == TaskType.terminate_robot:
# debug("robot_control",
# "Robot {} program terminated by command",
# settings.ROBOT_NAME)
# self.terminate()
# else: # Catch all
# debug("execute_task", "Unable to handle TaskType: {}", t.task_type)
# if self.mode.__eq__("debug"):
# debug_delay()
# return sched_list
# def get_new_tasks(self) -> SomeTasks:
# """Task source function passed to Schedule constructor
# """
# sched_list = []
# if settings.USE_CONTROLS_SOCKETS:
# t = Task(TaskType.get_controls, TaskPriority.high, [])
# sched_list.append(t)
# else:
# debug("robot", "Sockets disabled, queuing blink task")
# t = Task(TaskType.blink_test, TaskPriority.high, [1, 1])
# sched_list.append(t)
# t = Task(TaskType.get_telemetry, TaskPriority.normal, [])
# sched_list.append(t)
# return sched_list
# def terminate(self) -> None:
# """Close the sockets connection
# """
# if settings.USE_CONTROLS_SOCKETS:
# self.socket_connection.terminate()
# if settings.USE_TELEMETRY_SOCKETS:
# self.telemetry_server.terminate()
# self.serial_connection.terminate()
# self.set_terminate_flag()
# def store_throttle_data(self, throttle_data: dict):
# self.store_data(settings.THROTTLE_DATA_NAME, throttle_data)
# def serve_throttle_data(self):
# return self.get_data(settings.THROTTLE_DATA_NAME)
# def serve_telemetry_data(self) -> dict:
# return self.get_data(settings.TELEMETRY_DATA_NAME)
# def store_int_temp_data(self, int_temp: float):
# self.store_data(settings.ROBOT_INT_TEMP_NAME, int_temp)
| # """ Code that runs on the Raspberry Pi inside the robot
# This is the python program meant to run on the Raspberry Pi 3B+ located on
# the robot. This program acts as a intermediary between the Raspberry Pi on
# the surface unit and the Arduino/Teensy on the robot. The scheduling module
# used in this program manages the serial and sockets connections to the
# Arduino/Teensy and topside raspberry Pi respectively.
# """
# import settings
# from internal_temp import IntTempMon
# from robot_controls import ControlsProcessor
# from snr.comms.serial.serial_coms import SerialConnection
# from snr.comms.sockets.client import SocketsClient
# from snr.comms.sockets.server import SocketsServer
# from snr.node import Node
# from snr.task import SomeTasks, Task, TaskPriority, TaskType
# from snr.utils.utils import debug_delay
# class Robot(Node):
# def __init__(self, mode: str):
# super().__init__(self.handle_task, self.get_new_tasks)
# self.mode = mode
# self.controls_processor = ControlsProcessor(self.profiler)
# if settings.USE_CONTROLS_SOCKETS:
# debug("sockets", "Using sockets as enabled in settings")
# if self.mode.__eq__("debug"):
# debug("robot", "Running in debug mode: server IP is localhost")
# settings.CONTROLS_SOCKETS_CONFIG.ip = "localhost"
# # Make sockets client object using our implementation
# self.socket_connection = SocketsClient(
# settings.CONTROLS_SOCKETS_CONFIG,
# self.schedule_task)
# if settings.USE_TELEMETRY_SOCKETS:
# # Start sockets server endpoint
# if mode.__eq__("debug"):
# settings.TELEMETRY_SOCKETS_CONFIG.ip = "localhost"
# self.telemetry_server = SocketsServer(
# settings.TELEMETRY_SOCKETS_CONFIG,
# self.serve_telemetry_data,
# self.profiler)
# if settings.USE_ROBOT_PI_TEMP_MON:
# self.temp_mon = IntTempMon(
# settings.ROBOT_INT_TEMP_NAME,
# self.store_int_temp_data,
# self.profiler)
# def handle_task(self, t: Task) -> SomeTasks:
# debug("execute_task_verbose", "Executing task: {}", [t])
# sched_list = []
# # Read sensor data
# if t.task_type == TaskType.get_telemetry:
# debug("execute_task", "Executing task: {}", [t.val_list])
# # TODO: Read sensor values from serial and store in datastore
# data = {}
# data["throttle_data"] = self.controls_processor.throttle
# data["motor_data"] = self.controls_processor.motor_control.\
# motor_values
# data["current_camera"] = self.controls_processor.cameras.\
# current_camera
# data["int_temp_data"] = self.get_data(settings.ROBOT_INT_TEMP_NAME)
# self.store_data(settings.TELEMETRY_DATA_NAME, data)
# # Send serial data
# # Debug string command
# elif t.task_type == TaskType.debug_str:
# debug("execute_task", "Executing task: {}", t.val_list)
# # Terminate robot
# elif t.task_type == TaskType.terminate_robot:
# debug("robot_control",
# "Robot {} program terminated by command",
# settings.ROBOT_NAME)
# self.terminate()
# else: # Catch all
# debug("execute_task", "Unable to handle TaskType: {}", t.task_type)
# if self.mode.__eq__("debug"):
# debug_delay()
# return sched_list
# def get_new_tasks(self) -> SomeTasks:
# """Task source function passed to Schedule constructor
# """
# sched_list = []
# if settings.USE_CONTROLS_SOCKETS:
# t = Task(TaskType.get_controls, TaskPriority.high, [])
# sched_list.append(t)
# else:
# debug("robot", "Sockets disabled, queuing blink task")
# t = Task(TaskType.blink_test, TaskPriority.high, [1, 1])
# sched_list.append(t)
# t = Task(TaskType.get_telemetry, TaskPriority.normal, [])
# sched_list.append(t)
# return sched_list
# def terminate(self) -> None:
# """Close the sockets connection
# """
# if settings.USE_CONTROLS_SOCKETS:
# self.socket_connection.terminate()
# if settings.USE_TELEMETRY_SOCKETS:
# self.telemetry_server.terminate()
# self.serial_connection.terminate()
# self.set_terminate_flag()
# def store_throttle_data(self, throttle_data: dict):
# self.store_data(settings.THROTTLE_DATA_NAME, throttle_data)
# def serve_throttle_data(self):
# return self.get_data(settings.THROTTLE_DATA_NAME)
# def serve_telemetry_data(self) -> dict:
# return self.get_data(settings.TELEMETRY_DATA_NAME)
# def store_int_temp_data(self, int_temp: float):
# self.store_data(settings.ROBOT_INT_TEMP_NAME, int_temp)
| en | 0.366376 | # """ Code that runs on the Raspberry Pi inside the robot # This is the python program meant to run on the Raspberry Pi 3B+ located on # the robot. This program acts as a intermediary between the Raspberry Pi on # the surface unit and the Arduino/Teensy on the robot. The scheduling module # used in this program manages the serial and sockets connections to the # Arduino/Teensy and topside raspberry Pi respectively. # """ # import settings # from internal_temp import IntTempMon # from robot_controls import ControlsProcessor # from snr.comms.serial.serial_coms import SerialConnection # from snr.comms.sockets.client import SocketsClient # from snr.comms.sockets.server import SocketsServer # from snr.node import Node # from snr.task import SomeTasks, Task, TaskPriority, TaskType # from snr.utils.utils import debug_delay # class Robot(Node): # def __init__(self, mode: str): # super().__init__(self.handle_task, self.get_new_tasks) # self.mode = mode # self.controls_processor = ControlsProcessor(self.profiler) # if settings.USE_CONTROLS_SOCKETS: # debug("sockets", "Using sockets as enabled in settings") # if self.mode.__eq__("debug"): # debug("robot", "Running in debug mode: server IP is localhost") # settings.CONTROLS_SOCKETS_CONFIG.ip = "localhost" # # Make sockets client object using our implementation # self.socket_connection = SocketsClient( # settings.CONTROLS_SOCKETS_CONFIG, # self.schedule_task) # if settings.USE_TELEMETRY_SOCKETS: # # Start sockets server endpoint # if mode.__eq__("debug"): # settings.TELEMETRY_SOCKETS_CONFIG.ip = "localhost" # self.telemetry_server = SocketsServer( # settings.TELEMETRY_SOCKETS_CONFIG, # self.serve_telemetry_data, # self.profiler) # if settings.USE_ROBOT_PI_TEMP_MON: # self.temp_mon = IntTempMon( # settings.ROBOT_INT_TEMP_NAME, # self.store_int_temp_data, # self.profiler) # def handle_task(self, t: Task) -> SomeTasks: # debug("execute_task_verbose", "Executing task: {}", [t]) # sched_list = [] # # Read sensor data # if t.task_type == TaskType.get_telemetry: # debug("execute_task", "Executing task: {}", [t.val_list]) # # TODO: Read sensor values from serial and store in datastore # data = {} # data["throttle_data"] = self.controls_processor.throttle # data["motor_data"] = self.controls_processor.motor_control.\ # motor_values # data["current_camera"] = self.controls_processor.cameras.\ # current_camera # data["int_temp_data"] = self.get_data(settings.ROBOT_INT_TEMP_NAME) # self.store_data(settings.TELEMETRY_DATA_NAME, data) # # Send serial data # # Debug string command # elif t.task_type == TaskType.debug_str: # debug("execute_task", "Executing task: {}", t.val_list) # # Terminate robot # elif t.task_type == TaskType.terminate_robot: # debug("robot_control", # "Robot {} program terminated by command", # settings.ROBOT_NAME) # self.terminate() # else: # Catch all # debug("execute_task", "Unable to handle TaskType: {}", t.task_type) # if self.mode.__eq__("debug"): # debug_delay() # return sched_list # def get_new_tasks(self) -> SomeTasks: # """Task source function passed to Schedule constructor # """ # sched_list = [] # if settings.USE_CONTROLS_SOCKETS: # t = Task(TaskType.get_controls, TaskPriority.high, []) # sched_list.append(t) # else: # debug("robot", "Sockets disabled, queuing blink task") # t = Task(TaskType.blink_test, TaskPriority.high, [1, 1]) # sched_list.append(t) # t = Task(TaskType.get_telemetry, TaskPriority.normal, []) # sched_list.append(t) # return sched_list # def terminate(self) -> None: # """Close the sockets connection # """ # if settings.USE_CONTROLS_SOCKETS: # self.socket_connection.terminate() # if settings.USE_TELEMETRY_SOCKETS: # self.telemetry_server.terminate() # self.serial_connection.terminate() # self.set_terminate_flag() # def store_throttle_data(self, throttle_data: dict): # self.store_data(settings.THROTTLE_DATA_NAME, throttle_data) # def serve_throttle_data(self): # return self.get_data(settings.THROTTLE_DATA_NAME) # def serve_telemetry_data(self) -> dict: # return self.get_data(settings.TELEMETRY_DATA_NAME) # def store_int_temp_data(self, int_temp: float): # self.store_data(settings.ROBOT_INT_TEMP_NAME, int_temp) | 2.860369 | 3 |
lab01/app/views.py | EstefaniDavila/TECSUP-DAE-2021-2 | 0 | 6615000 | <gh_stars>0
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("Saludos desde la vista app")
def suma(request,num1,num2):
op1 = int(num1)
op2 = int(num2)
html = "<html><body>La suma de los numeros es: %s </body></html>" % (op1+op2)
return HttpResponse(html)
def resta(request,num1,num2):
op1 = int(num1)
op2 = int(num2)
html = "<html><body>La resta de los numeros es: %s </body></html>" % (op1-op2)
return HttpResponse(html)
def multiplicacion(request,num1,num2):
op1 = int(num1)
op2 = int(num2)
html = "<html><body>El resultado de la multliplicación es: %s </body></html>" % (op1*op2)
return HttpResponse(html) | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("Saludos desde la vista app")
def suma(request,num1,num2):
op1 = int(num1)
op2 = int(num2)
html = "<html><body>La suma de los numeros es: %s </body></html>" % (op1+op2)
return HttpResponse(html)
def resta(request,num1,num2):
op1 = int(num1)
op2 = int(num2)
html = "<html><body>La resta de los numeros es: %s </body></html>" % (op1-op2)
return HttpResponse(html)
def multiplicacion(request,num1,num2):
op1 = int(num1)
op2 = int(num2)
html = "<html><body>El resultado de la multliplicación es: %s </body></html>" % (op1*op2)
return HttpResponse(html) | en | 0.968116 | # Create your views here. | 2.840882 | 3 |
main.py | vulkomilev/ARBITER | 0 | 6615001 | <reponame>vulkomilev/ARBITER
from arbiter import Arbiter
from utils.utils import DataUnit
from utils.utils import REGRESSION, CATEGORY
from utils.utils import image_loader
print('Loading images ...')
# TODO:add different types of target like linear ,classifier ,timeseries etc
'''
data_schema = [ DataUnit('str', (), None, 'image_name'),
DataUnit('int', (), None, 'bar'),
DataUnit('int', (), None, 'buble'),
DataUnit('int', (), None, 'fill'),
DataUnit('int', (), None, 'rotate'),
DataUnit('2D_F',(64,64), None, 'Image')]
data_schema = [DataUnit('str', (), None, 'timestamp'),
DataUnit('int', (1,), None, 'Asset_ID'),
DataUnit('int', (1,), None, 'Count'),
DataUnit('float', (1,), None, 'Open'),
DataUnit('float', (1,), None, 'High'),
DataUnit('float', (1,), None, 'Low'),
DataUnit('float', (1,), None, 'Close'),
DataUnit('float', (1,), None, 'Volume'),
DataUnit('float', (1,), None, 'VWAP')]
data_schema = [DataUnit('str', (), None, 'Id'),
DataUnit('int', (), None, 'Subject Focus'),
DataUnit('int', (), None, 'Eyes'),
DataUnit('int', (), None, 'Face'),
DataUnit('int', (), None, 'Near'),
DataUnit('int', (), None, 'Action'),
DataUnit('int', (), None, 'Accessory'),
DataUnit('int', (), None, 'Group'),
DataUnit('int', (), None, 'Collage'),
DataUnit('int', (), None, 'Human'),
DataUnit('int', (), None, 'Occlusion'),
DataUnit('int', (), None, 'Info'),
DataUnit('int', (), None, 'Blur'),
DataUnit('float', (), None, 'Pawpularity'),
DataUnit('2D_F',(64,64,3), None, 'Image')]
agent_router = [{'SwinTransformer':{'inputs':['Image'],
'outputs':[{'name':'Pawpularity',
'type':REGRESSION}]}}]
agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'],
'outputs':[{'name':'VWAP','type':REGRESSION}]}}]
agent_router = [{'FunctionalAutoencoder':{'inputs':['Image'],
'outputs':[{'name':'Image','type':REGRESSION}]}}]
agent_router = [{'ConvMultihead':{'inputs':['Image'],
'outputs':[{'name':'Image','type':REGRESSION}]}}]
data_schema = [ DataUnit('str', (), None, 'image_name'),
DataUnit('int', (), None, 'bar'),
DataUnit('int', (), None, 'buble'),
DataUnit('int', (), None, 'fill'),
DataUnit('int', (), None, 'rotate'),
DataUnit('2D_F',(64,64), None, 'Image')]
agent_router = [{'ConvMultihead':{'inputs':['Image'],
'outputs':[{'name':'Image','type':REGRESSION}]}}]
data_schema = [ DataUnit('str', (), None, 'Image')]
agent_router = [{'MarkSpaces':{'inputs':['Image'],
'outputs':[{'name':'Image','type':CATEGORY}]}}]
'''
# data_schema = [
# DataUnit('str', (), None, 'name'),
# DataUnit('int', (), None, 'letter'),
# DataUnit('2D_F', (64,64), None, 'Image')]
# agent_router = [{'MyResNet50':{'inputs':['Image'],
# 'outputs':[{'name':'letter','type':CATEGORY}]}}]
# KerasOcrFineTune
# MyResNet50
'''
data_schema = [
DataUnit('int', (), None, 'id'),
DataUnit('int', (), None, 'case_num'),
DataUnit('int', (), None, 'pn_num'),
DataUnit('int', (), None, 'feature_num'),
DataUnit('str', (), None, 'annotation'),
DataUnit('str', (), None, 'location'),
]
agent_router = [{'DDASINDy':{'inputs':['annotation'],
'outputs':[{'name':'location','type':CATEGORY}]}}]
'''
'''
data_schema = [DataUnit('str', (), None, 'timestamp'),
DataUnit('int', (1,), None, 'Asset_ID'),
DataUnit('int', (1,), None, 'Count'),
DataUnit('float', (1,), None, 'Open'),
DataUnit('float', (1,), None, 'High'),
DataUnit('float', (1,), None, 'Low'),
DataUnit('float', (1,), None, 'Close'),
DataUnit('float', (1,), None, 'Volume'),
DataUnit('float', (1,), None, 'VWAP')]
agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'],
'outputs':[{'name':'VWAP','type':REGRESSION}]}}]
'''
'''
data_schema = [ DataUnit('str', (), None, 'image_name'),
DataUnit('int', (), None, 'bar'),
DataUnit('int', (), None, 'buble'),
DataUnit('int', (), None, 'fill'),
DataUnit('int', (), None, 'rotate'),
DataUnit('2D_F',(64,64), None, 'Image')]
agent_router = [{'FunctionalAutoencoder':{'inputs':['Image'],
'outputs':[{'name':'Image','type':REGRESSION}]}}]
'''
'''
data_schema = [ DataUnit('str', (), None, 'image_name'),
DataUnit('int', (), None, 'bar'),
DataUnit('int', (), None, 'buble'),
DataUnit('int', (), None, 'fill'),
DataUnit('int', (), None, 'rotate'),
DataUnit('2D_F',(64,64), None, 'Image')]
agent_router = [{'GraphSearch':{'inputs':['Image'],
'outputs':[{'name':'Image','type':REGRESSION}]}}]
'''
'''
data_schema = [
DataUnit('str', (), None, 'name'),
DataUnit('int', (), None, 'number'),
DataUnit('2D_F', (64,64), None, 'Image')]
agent_router = [{'ConstructNetwork':{'inputs':['Image'],
'outputs':[{'name':'letter','type':CATEGORY}]}}]
'''
'''
data_schema = [DataUnit('str', (), None, 'timestamp',is_id=True),
DataUnit('int', (1,), None, 'Asset_ID',is_id=True),
DataUnit('int', (1,), None, 'Count'),
DataUnit('float', (1,), None, 'Open'),
DataUnit('float', (1,), None, 'High'),
DataUnit('float', (1,), None, 'Low'),
DataUnit('float', (1,), None, 'Close'),
DataUnit('float', (1,), None, 'Volume'),
DataUnit('float', (1,), None, 'VWAP')]
agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'],
'outputs':[{'name':'VWAP','type':REGRESSION}]}}]
'''
data_schema_input = [DataUnit('str', (), None, 'timestamp',is_id=True),
DataUnit('int', (1,), None, 'Asset_ID',is_id=True),
DataUnit('int', (1,), None, 'Count'),
DataUnit('float', (1,), None, 'Open'),
DataUnit('float', (1,), None, 'High'),
DataUnit('float', (1,), None, 'Low'),
DataUnit('float', (1,), None, 'Close'),
DataUnit('float', (1,), None, 'Volume'),
DataUnit('float', (1,), None, 'VWAP'),
DataUnit('float', (1,), None, 'Target'),
DataUnit('int', (1,), None, 'group_num',is_id=True),
DataUnit('int', (1,), None, 'row_id',is_id=True)]
data_schema_output = [DataUnit('int', (), None, 'group_num',is_id=True),
DataUnit('int', (1,), None, 'row_id',is_id=True),
DataUnit('float', (1,), None, 'Target')]
agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'],
'outputs':[{'name':'Target','type':REGRESSION}]}}]
target_type = CATEGORY
# ./data_sets/solvedCaptchas/
#./data_sets/g-research-crypto-forecasting/
# MAKE A ARCH SEARCH OR SOMETHING OTHER SEARCH BASED ON GENETIC ALGORITHM SO THE PC WILL EXPLORE WHILE YOU ARE GONE
def runner(dataset_path, train_name='train', restrict=True, \
size=10, target_name='letter', no_ids=False,
data_schema_input= data_schema_input,
data_schema_output = data_schema_output,
submit_file = 'test',
train_file = 'train',
split=True,THREAD_COUNT = 32):
#image_collection_submit = image_loader(dataset_path
# , train_name='test', restrict=restrict, \
# size=800, target_name='letter', no_ids=False,
# data_schema=data_schema, split=False,THREAD_COUNT_V = THREAD_COUNT)
image_collection_train, image_collection_test = image_loader(dataset_path
, train_name=train_file, restrict=restrict, \
size=200, target_name='letter', no_ids=False,
data_schema=data_schema_input, split=True,THREAD_COUNT_V = THREAD_COUNT)
arbiter = Arbiter(data_schema_input=data_schema_input,
data_schema_output=data_schema_output, target_type=target_type, class_num=image_collection_train['num_classes'],
router_agent=agent_router, skip_arbiter=True)
for i in range(10):
arbiter.train(image_collection_train['image_arr'], train_target='letter', force_train=True, train_arbiter=False)
#arbiter.evaluate(image_collection_train['image_arr'])
image_collection_submit = image_loader(dataset_path
, train_name=submit_file, restrict=False, \
size=20000, target_name='letter', no_ids=False,
data_schema=data_schema_input, split=False,THREAD_COUNT_V = THREAD_COUNT)
arbiter.submit(image_collection_submit['image_arr'])
| from arbiter import Arbiter
from utils.utils import DataUnit
from utils.utils import REGRESSION, CATEGORY
from utils.utils import image_loader
print('Loading images ...')
# TODO:add different types of target like linear ,classifier ,timeseries etc
'''
data_schema = [ DataUnit('str', (), None, 'image_name'),
DataUnit('int', (), None, 'bar'),
DataUnit('int', (), None, 'buble'),
DataUnit('int', (), None, 'fill'),
DataUnit('int', (), None, 'rotate'),
DataUnit('2D_F',(64,64), None, 'Image')]
data_schema = [DataUnit('str', (), None, 'timestamp'),
DataUnit('int', (1,), None, 'Asset_ID'),
DataUnit('int', (1,), None, 'Count'),
DataUnit('float', (1,), None, 'Open'),
DataUnit('float', (1,), None, 'High'),
DataUnit('float', (1,), None, 'Low'),
DataUnit('float', (1,), None, 'Close'),
DataUnit('float', (1,), None, 'Volume'),
DataUnit('float', (1,), None, 'VWAP')]
data_schema = [DataUnit('str', (), None, 'Id'),
DataUnit('int', (), None, 'Subject Focus'),
DataUnit('int', (), None, 'Eyes'),
DataUnit('int', (), None, 'Face'),
DataUnit('int', (), None, 'Near'),
DataUnit('int', (), None, 'Action'),
DataUnit('int', (), None, 'Accessory'),
DataUnit('int', (), None, 'Group'),
DataUnit('int', (), None, 'Collage'),
DataUnit('int', (), None, 'Human'),
DataUnit('int', (), None, 'Occlusion'),
DataUnit('int', (), None, 'Info'),
DataUnit('int', (), None, 'Blur'),
DataUnit('float', (), None, 'Pawpularity'),
DataUnit('2D_F',(64,64,3), None, 'Image')]
agent_router = [{'SwinTransformer':{'inputs':['Image'],
'outputs':[{'name':'Pawpularity',
'type':REGRESSION}]}}]
agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'],
'outputs':[{'name':'VWAP','type':REGRESSION}]}}]
agent_router = [{'FunctionalAutoencoder':{'inputs':['Image'],
'outputs':[{'name':'Image','type':REGRESSION}]}}]
agent_router = [{'ConvMultihead':{'inputs':['Image'],
'outputs':[{'name':'Image','type':REGRESSION}]}}]
data_schema = [ DataUnit('str', (), None, 'image_name'),
DataUnit('int', (), None, 'bar'),
DataUnit('int', (), None, 'buble'),
DataUnit('int', (), None, 'fill'),
DataUnit('int', (), None, 'rotate'),
DataUnit('2D_F',(64,64), None, 'Image')]
agent_router = [{'ConvMultihead':{'inputs':['Image'],
'outputs':[{'name':'Image','type':REGRESSION}]}}]
data_schema = [ DataUnit('str', (), None, 'Image')]
agent_router = [{'MarkSpaces':{'inputs':['Image'],
'outputs':[{'name':'Image','type':CATEGORY}]}}]
'''
# data_schema = [
# DataUnit('str', (), None, 'name'),
# DataUnit('int', (), None, 'letter'),
# DataUnit('2D_F', (64,64), None, 'Image')]
# agent_router = [{'MyResNet50':{'inputs':['Image'],
# 'outputs':[{'name':'letter','type':CATEGORY}]}}]
# KerasOcrFineTune
# MyResNet50
'''
data_schema = [
DataUnit('int', (), None, 'id'),
DataUnit('int', (), None, 'case_num'),
DataUnit('int', (), None, 'pn_num'),
DataUnit('int', (), None, 'feature_num'),
DataUnit('str', (), None, 'annotation'),
DataUnit('str', (), None, 'location'),
]
agent_router = [{'DDASINDy':{'inputs':['annotation'],
'outputs':[{'name':'location','type':CATEGORY}]}}]
'''
'''
data_schema = [DataUnit('str', (), None, 'timestamp'),
DataUnit('int', (1,), None, 'Asset_ID'),
DataUnit('int', (1,), None, 'Count'),
DataUnit('float', (1,), None, 'Open'),
DataUnit('float', (1,), None, 'High'),
DataUnit('float', (1,), None, 'Low'),
DataUnit('float', (1,), None, 'Close'),
DataUnit('float', (1,), None, 'Volume'),
DataUnit('float', (1,), None, 'VWAP')]
agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'],
'outputs':[{'name':'VWAP','type':REGRESSION}]}}]
'''
'''
data_schema = [ DataUnit('str', (), None, 'image_name'),
DataUnit('int', (), None, 'bar'),
DataUnit('int', (), None, 'buble'),
DataUnit('int', (), None, 'fill'),
DataUnit('int', (), None, 'rotate'),
DataUnit('2D_F',(64,64), None, 'Image')]
agent_router = [{'FunctionalAutoencoder':{'inputs':['Image'],
'outputs':[{'name':'Image','type':REGRESSION}]}}]
'''
'''
data_schema = [ DataUnit('str', (), None, 'image_name'),
DataUnit('int', (), None, 'bar'),
DataUnit('int', (), None, 'buble'),
DataUnit('int', (), None, 'fill'),
DataUnit('int', (), None, 'rotate'),
DataUnit('2D_F',(64,64), None, 'Image')]
agent_router = [{'GraphSearch':{'inputs':['Image'],
'outputs':[{'name':'Image','type':REGRESSION}]}}]
'''
'''
data_schema = [
DataUnit('str', (), None, 'name'),
DataUnit('int', (), None, 'number'),
DataUnit('2D_F', (64,64), None, 'Image')]
agent_router = [{'ConstructNetwork':{'inputs':['Image'],
'outputs':[{'name':'letter','type':CATEGORY}]}}]
'''
'''
data_schema = [DataUnit('str', (), None, 'timestamp',is_id=True),
DataUnit('int', (1,), None, 'Asset_ID',is_id=True),
DataUnit('int', (1,), None, 'Count'),
DataUnit('float', (1,), None, 'Open'),
DataUnit('float', (1,), None, 'High'),
DataUnit('float', (1,), None, 'Low'),
DataUnit('float', (1,), None, 'Close'),
DataUnit('float', (1,), None, 'Volume'),
DataUnit('float', (1,), None, 'VWAP')]
agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'],
'outputs':[{'name':'VWAP','type':REGRESSION}]}}]
'''
data_schema_input = [DataUnit('str', (), None, 'timestamp',is_id=True),
DataUnit('int', (1,), None, 'Asset_ID',is_id=True),
DataUnit('int', (1,), None, 'Count'),
DataUnit('float', (1,), None, 'Open'),
DataUnit('float', (1,), None, 'High'),
DataUnit('float', (1,), None, 'Low'),
DataUnit('float', (1,), None, 'Close'),
DataUnit('float', (1,), None, 'Volume'),
DataUnit('float', (1,), None, 'VWAP'),
DataUnit('float', (1,), None, 'Target'),
DataUnit('int', (1,), None, 'group_num',is_id=True),
DataUnit('int', (1,), None, 'row_id',is_id=True)]
data_schema_output = [DataUnit('int', (), None, 'group_num',is_id=True),
DataUnit('int', (1,), None, 'row_id',is_id=True),
DataUnit('float', (1,), None, 'Target')]
agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'],
'outputs':[{'name':'Target','type':REGRESSION}]}}]
target_type = CATEGORY
# ./data_sets/solvedCaptchas/
#./data_sets/g-research-crypto-forecasting/
# MAKE A ARCH SEARCH OR SOMETHING OTHER SEARCH BASED ON GENETIC ALGORITHM SO THE PC WILL EXPLORE WHILE YOU ARE GONE
def runner(dataset_path, train_name='train', restrict=True, \
size=10, target_name='letter', no_ids=False,
data_schema_input= data_schema_input,
data_schema_output = data_schema_output,
submit_file = 'test',
train_file = 'train',
split=True,THREAD_COUNT = 32):
#image_collection_submit = image_loader(dataset_path
# , train_name='test', restrict=restrict, \
# size=800, target_name='letter', no_ids=False,
# data_schema=data_schema, split=False,THREAD_COUNT_V = THREAD_COUNT)
image_collection_train, image_collection_test = image_loader(dataset_path
, train_name=train_file, restrict=restrict, \
size=200, target_name='letter', no_ids=False,
data_schema=data_schema_input, split=True,THREAD_COUNT_V = THREAD_COUNT)
arbiter = Arbiter(data_schema_input=data_schema_input,
data_schema_output=data_schema_output, target_type=target_type, class_num=image_collection_train['num_classes'],
router_agent=agent_router, skip_arbiter=True)
for i in range(10):
arbiter.train(image_collection_train['image_arr'], train_target='letter', force_train=True, train_arbiter=False)
#arbiter.evaluate(image_collection_train['image_arr'])
image_collection_submit = image_loader(dataset_path
, train_name=submit_file, restrict=False, \
size=20000, target_name='letter', no_ids=False,
data_schema=data_schema_input, split=False,THREAD_COUNT_V = THREAD_COUNT)
arbiter.submit(image_collection_submit['image_arr']) | en | 0.139282 | # TODO:add different types of target like linear ,classifier ,timeseries etc data_schema = [ DataUnit('str', (), None, 'image_name'), DataUnit('int', (), None, 'bar'), DataUnit('int', (), None, 'buble'), DataUnit('int', (), None, 'fill'), DataUnit('int', (), None, 'rotate'), DataUnit('2D_F',(64,64), None, 'Image')] data_schema = [DataUnit('str', (), None, 'timestamp'), DataUnit('int', (1,), None, 'Asset_ID'), DataUnit('int', (1,), None, 'Count'), DataUnit('float', (1,), None, 'Open'), DataUnit('float', (1,), None, 'High'), DataUnit('float', (1,), None, 'Low'), DataUnit('float', (1,), None, 'Close'), DataUnit('float', (1,), None, 'Volume'), DataUnit('float', (1,), None, 'VWAP')] data_schema = [DataUnit('str', (), None, 'Id'), DataUnit('int', (), None, 'Subject Focus'), DataUnit('int', (), None, 'Eyes'), DataUnit('int', (), None, 'Face'), DataUnit('int', (), None, 'Near'), DataUnit('int', (), None, 'Action'), DataUnit('int', (), None, 'Accessory'), DataUnit('int', (), None, 'Group'), DataUnit('int', (), None, 'Collage'), DataUnit('int', (), None, 'Human'), DataUnit('int', (), None, 'Occlusion'), DataUnit('int', (), None, 'Info'), DataUnit('int', (), None, 'Blur'), DataUnit('float', (), None, 'Pawpularity'), DataUnit('2D_F',(64,64,3), None, 'Image')] agent_router = [{'SwinTransformer':{'inputs':['Image'], 'outputs':[{'name':'Pawpularity', 'type':REGRESSION}]}}] agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'], 'outputs':[{'name':'VWAP','type':REGRESSION}]}}] agent_router = [{'FunctionalAutoencoder':{'inputs':['Image'], 'outputs':[{'name':'Image','type':REGRESSION}]}}] agent_router = [{'ConvMultihead':{'inputs':['Image'], 'outputs':[{'name':'Image','type':REGRESSION}]}}] data_schema = [ DataUnit('str', (), None, 'image_name'), DataUnit('int', (), None, 'bar'), DataUnit('int', (), None, 'buble'), DataUnit('int', (), None, 'fill'), DataUnit('int', (), None, 'rotate'), DataUnit('2D_F',(64,64), None, 'Image')] agent_router = [{'ConvMultihead':{'inputs':['Image'], 'outputs':[{'name':'Image','type':REGRESSION}]}}] data_schema = [ DataUnit('str', (), None, 'Image')] agent_router = [{'MarkSpaces':{'inputs':['Image'], 'outputs':[{'name':'Image','type':CATEGORY}]}}] # data_schema = [ # DataUnit('str', (), None, 'name'), # DataUnit('int', (), None, 'letter'), # DataUnit('2D_F', (64,64), None, 'Image')] # agent_router = [{'MyResNet50':{'inputs':['Image'], # 'outputs':[{'name':'letter','type':CATEGORY}]}}] # KerasOcrFineTune # MyResNet50 data_schema = [ DataUnit('int', (), None, 'id'), DataUnit('int', (), None, 'case_num'), DataUnit('int', (), None, 'pn_num'), DataUnit('int', (), None, 'feature_num'), DataUnit('str', (), None, 'annotation'), DataUnit('str', (), None, 'location'), ] agent_router = [{'DDASINDy':{'inputs':['annotation'], 'outputs':[{'name':'location','type':CATEGORY}]}}] data_schema = [DataUnit('str', (), None, 'timestamp'), DataUnit('int', (1,), None, 'Asset_ID'), DataUnit('int', (1,), None, 'Count'), DataUnit('float', (1,), None, 'Open'), DataUnit('float', (1,), None, 'High'), DataUnit('float', (1,), None, 'Low'), DataUnit('float', (1,), None, 'Close'), DataUnit('float', (1,), None, 'Volume'), DataUnit('float', (1,), None, 'VWAP')] agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'], 'outputs':[{'name':'VWAP','type':REGRESSION}]}}] data_schema = [ DataUnit('str', (), None, 'image_name'), DataUnit('int', (), None, 'bar'), DataUnit('int', (), None, 'buble'), DataUnit('int', (), None, 'fill'), DataUnit('int', (), None, 'rotate'), DataUnit('2D_F',(64,64), None, 'Image')] agent_router = [{'FunctionalAutoencoder':{'inputs':['Image'], 'outputs':[{'name':'Image','type':REGRESSION}]}}] data_schema = [ DataUnit('str', (), None, 'image_name'), DataUnit('int', (), None, 'bar'), DataUnit('int', (), None, 'buble'), DataUnit('int', (), None, 'fill'), DataUnit('int', (), None, 'rotate'), DataUnit('2D_F',(64,64), None, 'Image')] agent_router = [{'GraphSearch':{'inputs':['Image'], 'outputs':[{'name':'Image','type':REGRESSION}]}}] data_schema = [ DataUnit('str', (), None, 'name'), DataUnit('int', (), None, 'number'), DataUnit('2D_F', (64,64), None, 'Image')] agent_router = [{'ConstructNetwork':{'inputs':['Image'], 'outputs':[{'name':'letter','type':CATEGORY}]}}] data_schema = [DataUnit('str', (), None, 'timestamp',is_id=True), DataUnit('int', (1,), None, 'Asset_ID',is_id=True), DataUnit('int', (1,), None, 'Count'), DataUnit('float', (1,), None, 'Open'), DataUnit('float', (1,), None, 'High'), DataUnit('float', (1,), None, 'Low'), DataUnit('float', (1,), None, 'Close'), DataUnit('float', (1,), None, 'Volume'), DataUnit('float', (1,), None, 'VWAP')] agent_router = [{'LSTM':{'inputs':['Count','Open','High','Low','Close','Volume'], 'outputs':[{'name':'VWAP','type':REGRESSION}]}}] # ./data_sets/solvedCaptchas/ #./data_sets/g-research-crypto-forecasting/ # MAKE A ARCH SEARCH OR SOMETHING OTHER SEARCH BASED ON GENETIC ALGORITHM SO THE PC WILL EXPLORE WHILE YOU ARE GONE #image_collection_submit = image_loader(dataset_path # , train_name='test', restrict=restrict, \ # size=800, target_name='letter', no_ids=False, # data_schema=data_schema, split=False,THREAD_COUNT_V = THREAD_COUNT) #arbiter.evaluate(image_collection_train['image_arr']) | 2.200573 | 2 |
sectionlessconfigparser/sectionlessconfigparser.py | thecaffiend/sectionlessconfigparser | 0 | 6615002 | <reponame>thecaffiend/sectionlessconfigparser<filename>sectionlessconfigparser/sectionlessconfigparser.py
# -*- coding: utf-8 -*-
"""
Config parser that handles key/value config files with no section headers.
"""
from configparser import ConfigParser
from io import StringIO
class SectionlessConfigParser(ConfigParser, object):
# fake section header for the config file. needed so we can use the config
# parser class python provides
DUMMY_SECTION = "DUMMY_SECTION"
def read_config(self, fname):
"""
Read/parse the config file.
"""
with open(fname) as cfg_stream:
# some trickery.
# want to use the built in config parser, but it wants the file to
# be separated in '[sections]'. So, add a fake section to the whole
# thing when read, but before sending the content stream to the
# parser...
# Concept found in many places such as:
# http://stackoverflow.com/a/10746467
hdr = "[%s]\n" % (self.DUMMY_SECTION)
cfg_stream = StringIO(hdr + cfg_stream.read())
self.readfp(cfg_stream)
# TODO: rather than make a new method, change this to have same interface as
# 'get' for configparser objects, but fill in the section
# automatically
def get_val(self, option, default):
"""
Get the value for a specified option key. Option is case-insensitive.
There's only one section, so fill that in automatically.
"""
return self.get(self.DUMMY_SECTION, option, fallback=default)
def items(self):
"""
Get a list of all (key, value) tuples in the config file
"""
return super(SectionlessConfigParser, self).items(self.DUMMY_SECTION)
| # -*- coding: utf-8 -*-
"""
Config parser that handles key/value config files with no section headers.
"""
from configparser import ConfigParser
from io import StringIO
class SectionlessConfigParser(ConfigParser, object):
# fake section header for the config file. needed so we can use the config
# parser class python provides
DUMMY_SECTION = "DUMMY_SECTION"
def read_config(self, fname):
"""
Read/parse the config file.
"""
with open(fname) as cfg_stream:
# some trickery.
# want to use the built in config parser, but it wants the file to
# be separated in '[sections]'. So, add a fake section to the whole
# thing when read, but before sending the content stream to the
# parser...
# Concept found in many places such as:
# http://stackoverflow.com/a/10746467
hdr = "[%s]\n" % (self.DUMMY_SECTION)
cfg_stream = StringIO(hdr + cfg_stream.read())
self.readfp(cfg_stream)
# TODO: rather than make a new method, change this to have same interface as
# 'get' for configparser objects, but fill in the section
# automatically
def get_val(self, option, default):
"""
Get the value for a specified option key. Option is case-insensitive.
There's only one section, so fill that in automatically.
"""
return self.get(self.DUMMY_SECTION, option, fallback=default)
def items(self):
"""
Get a list of all (key, value) tuples in the config file
"""
return super(SectionlessConfigParser, self).items(self.DUMMY_SECTION) | en | 0.770715 | # -*- coding: utf-8 -*- Config parser that handles key/value config files with no section headers. # fake section header for the config file. needed so we can use the config # parser class python provides Read/parse the config file. # some trickery. # want to use the built in config parser, but it wants the file to # be separated in '[sections]'. So, add a fake section to the whole # thing when read, but before sending the content stream to the # parser... # Concept found in many places such as: # http://stackoverflow.com/a/10746467 # TODO: rather than make a new method, change this to have same interface as # 'get' for configparser objects, but fill in the section # automatically Get the value for a specified option key. Option is case-insensitive. There's only one section, so fill that in automatically. Get a list of all (key, value) tuples in the config file | 3.500718 | 4 |
Python3/5.longest-palindromic-substring.py | canhetingsky/LeetCode | 1 | 6615003 | <filename>Python3/5.longest-palindromic-substring.py<gh_stars>1-10
#!/usr/bin/python3
# encoding: utf-8
#
# @lc app=leetcode id=5 lang=python3
#
# [5] Longest Palindromic Substring
#
class Solution:
def longestPalindrome(self, s: str) -> str:
length = len(s)
palindrome = ''
for i in range(length):
# for even, like "abba"
temp = self.getPalindrome(s, i, i + 1)
if len(temp) > len(palindrome):
palindrome = temp
# for odd , like "aba"
temp = self.getPalindrome(s, i, i)
if len(temp) > len(palindrome):
palindrome = temp
return palindrome
def getPalindrome(self, s: str, l: int, r: int) -> str:
while l >= 0 and r < len(s) and s[l] == s[r]:
l -= 1
r += 1
return s[l + 1:r]
| <filename>Python3/5.longest-palindromic-substring.py<gh_stars>1-10
#!/usr/bin/python3
# encoding: utf-8
#
# @lc app=leetcode id=5 lang=python3
#
# [5] Longest Palindromic Substring
#
class Solution:
def longestPalindrome(self, s: str) -> str:
length = len(s)
palindrome = ''
for i in range(length):
# for even, like "abba"
temp = self.getPalindrome(s, i, i + 1)
if len(temp) > len(palindrome):
palindrome = temp
# for odd , like "aba"
temp = self.getPalindrome(s, i, i)
if len(temp) > len(palindrome):
palindrome = temp
return palindrome
def getPalindrome(self, s: str, l: int, r: int) -> str:
while l >= 0 and r < len(s) and s[l] == s[r]:
l -= 1
r += 1
return s[l + 1:r]
| en | 0.664244 | #!/usr/bin/python3 # encoding: utf-8 # # @lc app=leetcode id=5 lang=python3 # # [5] Longest Palindromic Substring # # for even, like "abba" # for odd , like "aba" | 3.725477 | 4 |
QuadTreeManager.py | EvanYangAB/FIQTI | 0 | 6615004 | import os
from hilbertcurve.hilbertcurve import HilbertCurve
from pyqtree import Index
import pickle
import sys
import math
import json
import pandas
from epivizfileserver.parser import BigWig
import struct
class QuadTreeManager(object):
def __init__(self, genome, max_items = 128, base_path = os.getcwd()):
self.file_mapping = {}
self.file_objects = {}
# self.file_chrids = {}
self.genome = genome
self.max_items = max_items
self.base_path = base_path
self.file_counter = 0
def hcoords(self, x, chromLength, dims = 2):
hlevel = math.ceil(math.log2(chromLength)/dims)
# print("hlevel, ", hlevel)
hilbert_curve = HilbertCurve(hlevel, dims)
[x,y] = hilbert_curve.coordinates_from_distance(x)
return x, y, hlevel
def get_file_btree(self, file, zoomlvl):
bw = BigWig(file)
bw.getZoomHeader()
tree = bw.getTree(-2)
return tree, bw
def read_node(self, tree, offset, endian="="):
data = tree[offset:offset + 4]
(rIsLeaf, rReserved, rCount) = struct.unpack(endian + "BBH", data)
return {"rIsLeaf": rIsLeaf, "rCount": rCount, "rOffset": offset + 4}
def traverse_nodes(self, node, zoomlvl = -2, tree = None, result = [], fullIndexOffset = None, endian="="):
offset = node.get("rOffset")
if node.get("rIsLeaf"):
for i in range(0, node.get("rCount")):
data = tree[offset + (i * 32) : offset + ( (i+1) * 32 )]
(rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset, rDataSize) = struct.unpack(endian + "IIIIQQ", data)
result.append((rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset, rDataSize))
else:
for i in range(0, node.get("rCount")):
data = tree[offset + (i * 24) : offset + ( (i+1) * 24 )]
(rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset) = struct.unpack(endian + "IIIIQ", data)
# remove index offset since the stored binary starts from 0
diffOffset = fullIndexOffset
childNode = self.read_node(tree, rdataOffset - diffOffset, endian)
self.traverse_nodes(childNode, zoomlvl, result=result, tree = tree,
fullIndexOffset = fullIndexOffset, endian = endian)
return result
def get_leaf_nodes(self, tree, bw, zoomlvl):
findexOffset = bw.header.get("fullIndexOffset")
offset = 48
root = self.read_node(tree, offset, endian = bw.endian)
records = self.traverse_nodes(root, zoomlvl, tree = tree, fullIndexOffset = findexOffset, endian = bw.endian)
df = pandas.DataFrame(records, columns=["rStartChromIx", "rStartBase", "rEndChromIx", "rEndBase",
"rdataOffset", "rDataSize"])
return df
def get_file_chr(self, bw):
bw.getId("chr1")
return bw.chrmIds
def add_to_index(self, file, zoomlvl = -2):
tree, bw = self.get_file_btree(file, zoomlvl)
df = self.get_leaf_nodes(tree, bw, zoomlvl)
chrmTree = self.get_file_chr(bw)
self.file_mapping[file] = self.file_counter
self.file_counter += 1
self.file_objects[file] = bw
for chrm in chrmTree.keys():
chromLength = self.genome[chrm]
dims = 2
hlevel = math.ceil(math.log2(chromLength)/dims)
# print("hlevel", hlevel)
x_y_dim = math.ceil(math.pow(2, hlevel))
# print("max x|y =", x_y_dim)
tree = Index(bbox=(0, 0, x_y_dim, x_y_dim), disk = base_path + "quadtree." + chrm + ".index")
chrmId = chrmTree[chrm]
df = df[df["rStartChromIx"] == chrmId]
# print("\t df shape - ", df.shape)
for i, row in df.iterrows():
x, y, _ = hcoords(row["rStartBase"], chromLength)
tree.insert((row["rStartBase"], row["rEndBase"], row["rdataOffset"], row["rDataSize"], fileIds[file]), (x, y, x+1, y+1))
def query(self, file, chr, start, end, zoomlvl = -2):
chromLength = self.genome[chr]
dims = 2
hlevel = math.ceil(math.log2(chromLength)/dims)
# print("hlevel", hlevel)
x_y_dim = math.ceil(math.pow(2, hlevel))
# print("max x|y =", x_y_dim)
tree = Index(bbox=(0, 0, x_y_dim, x_y_dim), disk = base_path + "quadtree." + chr + ".index")
xstart, ystart, _ = hcoords(start, chromLength)
xend, yend, _ = hcoords(end, chromLength)
overlapbbox = (start - 1, start - 1, end + 1, end + 1)
matches = tree.intersect(overlapbbox)
df = pandas.DataFrame(matches, columns=["start", "end", "offset", "size", "fileid"])
df = df[df["fileid"] == self.file_mapping[file]]
bw = self.file_objects[file]
chrmId = bw.chrmIds[chr]
result = []
for i, row in df.iterrows():
result += bw.parseLeafDataNode(chrmId, start, end, zoomlvl, chrmId, row["start"], chrmId, row["end"], row["offset"], row["size"])
result = toDataFrame(values, bw.columns)
result["chr"] = chr
return result
| import os
from hilbertcurve.hilbertcurve import HilbertCurve
from pyqtree import Index
import pickle
import sys
import math
import json
import pandas
from epivizfileserver.parser import BigWig
import struct
class QuadTreeManager(object):
def __init__(self, genome, max_items = 128, base_path = os.getcwd()):
self.file_mapping = {}
self.file_objects = {}
# self.file_chrids = {}
self.genome = genome
self.max_items = max_items
self.base_path = base_path
self.file_counter = 0
def hcoords(self, x, chromLength, dims = 2):
hlevel = math.ceil(math.log2(chromLength)/dims)
# print("hlevel, ", hlevel)
hilbert_curve = HilbertCurve(hlevel, dims)
[x,y] = hilbert_curve.coordinates_from_distance(x)
return x, y, hlevel
def get_file_btree(self, file, zoomlvl):
bw = BigWig(file)
bw.getZoomHeader()
tree = bw.getTree(-2)
return tree, bw
def read_node(self, tree, offset, endian="="):
data = tree[offset:offset + 4]
(rIsLeaf, rReserved, rCount) = struct.unpack(endian + "BBH", data)
return {"rIsLeaf": rIsLeaf, "rCount": rCount, "rOffset": offset + 4}
def traverse_nodes(self, node, zoomlvl = -2, tree = None, result = [], fullIndexOffset = None, endian="="):
offset = node.get("rOffset")
if node.get("rIsLeaf"):
for i in range(0, node.get("rCount")):
data = tree[offset + (i * 32) : offset + ( (i+1) * 32 )]
(rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset, rDataSize) = struct.unpack(endian + "IIIIQQ", data)
result.append((rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset, rDataSize))
else:
for i in range(0, node.get("rCount")):
data = tree[offset + (i * 24) : offset + ( (i+1) * 24 )]
(rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset) = struct.unpack(endian + "IIIIQ", data)
# remove index offset since the stored binary starts from 0
diffOffset = fullIndexOffset
childNode = self.read_node(tree, rdataOffset - diffOffset, endian)
self.traverse_nodes(childNode, zoomlvl, result=result, tree = tree,
fullIndexOffset = fullIndexOffset, endian = endian)
return result
def get_leaf_nodes(self, tree, bw, zoomlvl):
findexOffset = bw.header.get("fullIndexOffset")
offset = 48
root = self.read_node(tree, offset, endian = bw.endian)
records = self.traverse_nodes(root, zoomlvl, tree = tree, fullIndexOffset = findexOffset, endian = bw.endian)
df = pandas.DataFrame(records, columns=["rStartChromIx", "rStartBase", "rEndChromIx", "rEndBase",
"rdataOffset", "rDataSize"])
return df
def get_file_chr(self, bw):
bw.getId("chr1")
return bw.chrmIds
def add_to_index(self, file, zoomlvl = -2):
tree, bw = self.get_file_btree(file, zoomlvl)
df = self.get_leaf_nodes(tree, bw, zoomlvl)
chrmTree = self.get_file_chr(bw)
self.file_mapping[file] = self.file_counter
self.file_counter += 1
self.file_objects[file] = bw
for chrm in chrmTree.keys():
chromLength = self.genome[chrm]
dims = 2
hlevel = math.ceil(math.log2(chromLength)/dims)
# print("hlevel", hlevel)
x_y_dim = math.ceil(math.pow(2, hlevel))
# print("max x|y =", x_y_dim)
tree = Index(bbox=(0, 0, x_y_dim, x_y_dim), disk = base_path + "quadtree." + chrm + ".index")
chrmId = chrmTree[chrm]
df = df[df["rStartChromIx"] == chrmId]
# print("\t df shape - ", df.shape)
for i, row in df.iterrows():
x, y, _ = hcoords(row["rStartBase"], chromLength)
tree.insert((row["rStartBase"], row["rEndBase"], row["rdataOffset"], row["rDataSize"], fileIds[file]), (x, y, x+1, y+1))
def query(self, file, chr, start, end, zoomlvl = -2):
chromLength = self.genome[chr]
dims = 2
hlevel = math.ceil(math.log2(chromLength)/dims)
# print("hlevel", hlevel)
x_y_dim = math.ceil(math.pow(2, hlevel))
# print("max x|y =", x_y_dim)
tree = Index(bbox=(0, 0, x_y_dim, x_y_dim), disk = base_path + "quadtree." + chr + ".index")
xstart, ystart, _ = hcoords(start, chromLength)
xend, yend, _ = hcoords(end, chromLength)
overlapbbox = (start - 1, start - 1, end + 1, end + 1)
matches = tree.intersect(overlapbbox)
df = pandas.DataFrame(matches, columns=["start", "end", "offset", "size", "fileid"])
df = df[df["fileid"] == self.file_mapping[file]]
bw = self.file_objects[file]
chrmId = bw.chrmIds[chr]
result = []
for i, row in df.iterrows():
result += bw.parseLeafDataNode(chrmId, start, end, zoomlvl, chrmId, row["start"], chrmId, row["end"], row["offset"], row["size"])
result = toDataFrame(values, bw.columns)
result["chr"] = chr
return result
| en | 0.44882 | # self.file_chrids = {} # print("hlevel, ", hlevel) # remove index offset since the stored binary starts from 0 # print("hlevel", hlevel) # print("max x|y =", x_y_dim) # print("\t df shape - ", df.shape) # print("hlevel", hlevel) # print("max x|y =", x_y_dim) | 2.51547 | 3 |
uwhpsc/codes/python/script2.py | philipwangdk/HPC | 0 | 6615005 | <filename>uwhpsc/codes/python/script2.py
"""
$UWHPSC/codes/python/script2.py
Sample script to print values of a function at a few points.
The printing is only done if the file is executed as a script, not if it is
imported as a module.
"""
import numpy as np
def f(x):
"""
A quadratic function.
"""
y = x**2 + 1.
return y
def print_table():
print " x f(x)"
for x in np.linspace(0,4,3):
print "%8.3f %8.3f" % (x, f(x))
if __name__ == "__main__":
print_table()
| <filename>uwhpsc/codes/python/script2.py
"""
$UWHPSC/codes/python/script2.py
Sample script to print values of a function at a few points.
The printing is only done if the file is executed as a script, not if it is
imported as a module.
"""
import numpy as np
def f(x):
"""
A quadratic function.
"""
y = x**2 + 1.
return y
def print_table():
print " x f(x)"
for x in np.linspace(0,4,3):
print "%8.3f %8.3f" % (x, f(x))
if __name__ == "__main__":
print_table()
| en | 0.925016 | $UWHPSC/codes/python/script2.py Sample script to print values of a function at a few points. The printing is only done if the file is executed as a script, not if it is imported as a module. A quadratic function. | 3.227774 | 3 |
bixpy/bix.py | robotfinance/BIX | 8 | 6615006 | <gh_stars>1-10
import time
import redis
import json
import threading
from pubnub import Pubnub
r = redis.StrictRedis(host='localhost', port=6379, db=0)
pubnub = Pubnub(publish_key='YOUR-PUB-KEY', subscribe_key='YOUR-SUB-KEY')
api_path = '/var/www/yourdomain.org/htdocs/api/v1/bix/yourfile.json'
bix_old = {}
bix_old['ask'] = 0
bix_old['bid'] = 0
bfx_old = ''
okc_old = ''
bst_old = ''
rest_bfx_old = ''
rest_okc_old = ''
rest_bst_old = ''
bfx_new = r.get('bfx')
okc_new = r.get('okc')
bst_new = r.get('bst')
rest_bfx_new = r.get('bfx-rest')
rest_okc_new = r.get('okc-rest')
rest_bst_new = r.get('bst-rest')
bmx_new = r.get('bmx-rest')
def PubNubcallback(message):
print(message)
def calculateBIX(bfx_new, okc_new, bst_new, rest_bfx_new, rest_okc_new, rest_bst_new, bmx_new):
global api_path
global bix_old
bfx_weight = 0.5
bst_weight = 0.22
okc_weight = 0.28
bfx_ticker = json.loads(bfx_new)
okc_ticker = json.loads(okc_new)
bst_ticker = json.loads(bst_new)
rest_bfx_ticker = json.loads(rest_bfx_new)
rest_okc_ticker = json.loads(rest_okc_new)
rest_bst_ticker = json.loads(rest_bst_new)
bmx_ticker = json.loads(bmx_new)
time_now = time.time()
bfx_ticker_age = round((time_now - bfx_ticker['ts'])*1000, 2)
bfx_rest_ticker_age = round((time_now - rest_bfx_ticker['ts'])*1000, 2)
okc_ticker_age = round((time_now - okc_ticker['ts'])*1000, 2)
okc_rest_ticker_age = round((time_now - rest_okc_ticker['ts'])*1000, 2)
bst_ticker_age = round((time_now - bst_ticker['ts'])*1000, 2)
bst_rest_ticker_age = round((time_now - rest_bst_ticker['ts'])*1000, 2)
#print "Age in ms - BFX: ", bfx_ticker_age, "BFX REST: ", bfx_rest_ticker_age, "OKC: ", okc_ticker_age, "OKC REST: ", okc_rest_ticker_age, "BST: ", bst_ticker_age, "BST REST: ", bst_rest_ticker_age
if bfx_ticker_age > 10000 and bfx_ticker_age > bfx_rest_ticker_age:
print "BFX websocket data possibly outdated. Failover to REST."
bfx_ticker = rest_bfx_ticker
if okc_ticker_age > 10000 and okc_ticker_age > okc_rest_ticker_age:
print "OKC websocket data possibly outdated. Failover to REST."
okc_ticker = rest_okc_ticker
if bst_ticker_age > 10000 and bst_ticker_age > bst_rest_ticker_age:
print "BST websocket data possibly outdated. Failover to REST."
bst_ticker = rest_bst_ticker
# Adjusting Weightings / Dynamic De-Weighting Mechanism
av_price = (bmx_ticker['mid']+bfx_ticker['mid']+okc_ticker['mid']+bst_ticker['mid']-max([bfx_ticker['mid'],okc_ticker['mid'],bst_ticker['mid'], bmx_ticker['mid']])-min([bfx_ticker['mid'],okc_ticker['mid'],bst_ticker['mid'],bmx_ticker['mid']]))/2
bfx_dev = max([av_price, bfx_ticker['mid']]) - min([av_price, bfx_ticker['mid']])
okc_dev = max([av_price, okc_ticker['mid']]) - min([av_price, okc_ticker['mid']])
bst_dev = max([av_price, bst_ticker['mid']]) - min([av_price, bst_ticker['mid']])
dev_sum = bfx_dev + okc_dev + bst_dev
bfx_dev_rel = round(bfx_dev/dev_sum, 2)
okc_dev_rel = round(okc_dev/dev_sum, 2)
bst_dev_rel = round(bst_dev/dev_sum, 2)
bfx_weight_adj = 1-min([1, max([0, (bfx_dev_rel-0.66)*3.5])])
okc_weight_adj = 1-min([1, max([0, (okc_dev_rel-0.66)*3.5])])
bst_weight_adj = 1-min([1, max([0, (bst_dev_rel-0.66)*3.5])])
if bfx_weight_adj < 1:
adj_bfx_weight = bfx_weight*bfx_weight_adj
adj_okc_weight = (1-adj_bfx_weight)*okc_weight/(okc_weight+bst_weight)
adj_bst_weight = (1-adj_bfx_weight)*bst_weight/(okc_weight+bst_weight)
elif okc_weight_adj < 1:
adj_okc_weight = okc_weight*okc_weight_adj
adj_bfx_weight = (1-adj_okc_weight)*bfx_weight/(bfx_weight+bst_weight)
adj_bst_weight = (1-adj_okc_weight)*bst_weight/(bfx_weight+bst_weight)
elif bst_weight_adj < 1:
adj_bst_weight = bst_weight*bst_weight_adj
adj_bfx_weight = (1-adj_bst_weight)*bfx_weight/(bfx_weight+okc_weight)
adj_okc_weight = (1-adj_bst_weight)*okc_weight/(bfx_weight+okc_weight)
else:
adj_okc_weight = okc_weight
adj_bfx_weight = bfx_weight
adj_bst_weight = bst_weight
print "BFX DEV: ", bfx_dev, bfx_dev_rel, bfx_weight_adj, bfx_weight, adj_bfx_weight
print "OKC DEV: ", okc_dev, okc_dev_rel, okc_weight_adj, okc_weight, adj_okc_weight
print "BST DEV: ", bst_dev, bst_dev_rel, bst_weight_adj, bst_weight, adj_bst_weight
bix_ticker = {}
bix_ticker['ask'] = round((bfx_ticker['ask']*adj_bfx_weight + okc_ticker['ask']*adj_okc_weight + bst_ticker['ask']*adj_bst_weight), 2)
bix_ticker['bid'] = round((bfx_ticker['bid']*adj_bfx_weight + okc_ticker['bid']*adj_okc_weight + bst_ticker['bid']*adj_bst_weight), 2)
bix_ticker['mid'] = round((bix_ticker['ask']+ bix_ticker['bid'])/2, 2)
if bix_old['ask'] != bix_ticker['ask'] or bix_old['bid'] != bix_ticker['bid']:
bix_ticker['ts'] = time.time()
bix = json.dumps(bix_ticker, ensure_ascii=False)
bix_old['ask'] = bix_ticker['ask']
bix_old['bid'] = bix_ticker['bid']
print "BIX: ", bix
print r.set('bix', bix)
with open(api_path, 'w') as outfile:
json.dump(bix_ticker, outfile)
message = {}
message['eon'] = {}
message['eon']['bix'] = bix_ticker['mid']
message['eon']['bfx'] = bfx_ticker['mid']
message['eon']['okc'] = okc_ticker['mid']
message['eon']['bst'] = bst_ticker['mid']
message['eon']['bmx'] = bmx_ticker['mid']
pubnub.publish('bix-chart', message, callback=PubNubcallback, error=PubNubcallback)
def main():
global r, bfx_new, okc_new, bst_new, rest_bfx_new, rest_okc_new, rest_bst_new, bmx_new
sub = r.pubsub()
sub.subscribe('bix-usd')
while True:
for m in sub.listen():
if m['data'] != 1:
print m['data']
ticker = json.loads(m['data'])
if ticker['source'] == 'bfx':
bfx_new = m['data']
calculateBIX(bfx_new, okc_new, bst_new, rest_bfx_new, rest_okc_new, rest_bst_new, bmx_new)
if ticker['source'] == 'okc':
okc_new = m['data']
calculateBIX(bfx_new, okc_new, bst_new, rest_bfx_new, rest_okc_new, rest_bst_new, bmx_new)
if ticker['source'] == 'bst':
bst_new = m['data']
calculateBIX(bfx_new, okc_new, bst_new, rest_bfx_new, rest_okc_new, rest_bst_new, bmx_new)
if ticker['source'] == 'bmx-rest':
bmx_new = m['data']
else:
print m
if __name__ == '__main__':
main()
| import time
import redis
import json
import threading
from pubnub import Pubnub
r = redis.StrictRedis(host='localhost', port=6379, db=0)
pubnub = Pubnub(publish_key='YOUR-PUB-KEY', subscribe_key='YOUR-SUB-KEY')
api_path = '/var/www/yourdomain.org/htdocs/api/v1/bix/yourfile.json'
bix_old = {}
bix_old['ask'] = 0
bix_old['bid'] = 0
bfx_old = ''
okc_old = ''
bst_old = ''
rest_bfx_old = ''
rest_okc_old = ''
rest_bst_old = ''
bfx_new = r.get('bfx')
okc_new = r.get('okc')
bst_new = r.get('bst')
rest_bfx_new = r.get('bfx-rest')
rest_okc_new = r.get('okc-rest')
rest_bst_new = r.get('bst-rest')
bmx_new = r.get('bmx-rest')
def PubNubcallback(message):
print(message)
def calculateBIX(bfx_new, okc_new, bst_new, rest_bfx_new, rest_okc_new, rest_bst_new, bmx_new):
global api_path
global bix_old
bfx_weight = 0.5
bst_weight = 0.22
okc_weight = 0.28
bfx_ticker = json.loads(bfx_new)
okc_ticker = json.loads(okc_new)
bst_ticker = json.loads(bst_new)
rest_bfx_ticker = json.loads(rest_bfx_new)
rest_okc_ticker = json.loads(rest_okc_new)
rest_bst_ticker = json.loads(rest_bst_new)
bmx_ticker = json.loads(bmx_new)
time_now = time.time()
bfx_ticker_age = round((time_now - bfx_ticker['ts'])*1000, 2)
bfx_rest_ticker_age = round((time_now - rest_bfx_ticker['ts'])*1000, 2)
okc_ticker_age = round((time_now - okc_ticker['ts'])*1000, 2)
okc_rest_ticker_age = round((time_now - rest_okc_ticker['ts'])*1000, 2)
bst_ticker_age = round((time_now - bst_ticker['ts'])*1000, 2)
bst_rest_ticker_age = round((time_now - rest_bst_ticker['ts'])*1000, 2)
#print "Age in ms - BFX: ", bfx_ticker_age, "BFX REST: ", bfx_rest_ticker_age, "OKC: ", okc_ticker_age, "OKC REST: ", okc_rest_ticker_age, "BST: ", bst_ticker_age, "BST REST: ", bst_rest_ticker_age
if bfx_ticker_age > 10000 and bfx_ticker_age > bfx_rest_ticker_age:
print "BFX websocket data possibly outdated. Failover to REST."
bfx_ticker = rest_bfx_ticker
if okc_ticker_age > 10000 and okc_ticker_age > okc_rest_ticker_age:
print "OKC websocket data possibly outdated. Failover to REST."
okc_ticker = rest_okc_ticker
if bst_ticker_age > 10000 and bst_ticker_age > bst_rest_ticker_age:
print "BST websocket data possibly outdated. Failover to REST."
bst_ticker = rest_bst_ticker
# Adjusting Weightings / Dynamic De-Weighting Mechanism
av_price = (bmx_ticker['mid']+bfx_ticker['mid']+okc_ticker['mid']+bst_ticker['mid']-max([bfx_ticker['mid'],okc_ticker['mid'],bst_ticker['mid'], bmx_ticker['mid']])-min([bfx_ticker['mid'],okc_ticker['mid'],bst_ticker['mid'],bmx_ticker['mid']]))/2
bfx_dev = max([av_price, bfx_ticker['mid']]) - min([av_price, bfx_ticker['mid']])
okc_dev = max([av_price, okc_ticker['mid']]) - min([av_price, okc_ticker['mid']])
bst_dev = max([av_price, bst_ticker['mid']]) - min([av_price, bst_ticker['mid']])
dev_sum = bfx_dev + okc_dev + bst_dev
bfx_dev_rel = round(bfx_dev/dev_sum, 2)
okc_dev_rel = round(okc_dev/dev_sum, 2)
bst_dev_rel = round(bst_dev/dev_sum, 2)
bfx_weight_adj = 1-min([1, max([0, (bfx_dev_rel-0.66)*3.5])])
okc_weight_adj = 1-min([1, max([0, (okc_dev_rel-0.66)*3.5])])
bst_weight_adj = 1-min([1, max([0, (bst_dev_rel-0.66)*3.5])])
if bfx_weight_adj < 1:
adj_bfx_weight = bfx_weight*bfx_weight_adj
adj_okc_weight = (1-adj_bfx_weight)*okc_weight/(okc_weight+bst_weight)
adj_bst_weight = (1-adj_bfx_weight)*bst_weight/(okc_weight+bst_weight)
elif okc_weight_adj < 1:
adj_okc_weight = okc_weight*okc_weight_adj
adj_bfx_weight = (1-adj_okc_weight)*bfx_weight/(bfx_weight+bst_weight)
adj_bst_weight = (1-adj_okc_weight)*bst_weight/(bfx_weight+bst_weight)
elif bst_weight_adj < 1:
adj_bst_weight = bst_weight*bst_weight_adj
adj_bfx_weight = (1-adj_bst_weight)*bfx_weight/(bfx_weight+okc_weight)
adj_okc_weight = (1-adj_bst_weight)*okc_weight/(bfx_weight+okc_weight)
else:
adj_okc_weight = okc_weight
adj_bfx_weight = bfx_weight
adj_bst_weight = bst_weight
print "BFX DEV: ", bfx_dev, bfx_dev_rel, bfx_weight_adj, bfx_weight, adj_bfx_weight
print "OKC DEV: ", okc_dev, okc_dev_rel, okc_weight_adj, okc_weight, adj_okc_weight
print "BST DEV: ", bst_dev, bst_dev_rel, bst_weight_adj, bst_weight, adj_bst_weight
bix_ticker = {}
bix_ticker['ask'] = round((bfx_ticker['ask']*adj_bfx_weight + okc_ticker['ask']*adj_okc_weight + bst_ticker['ask']*adj_bst_weight), 2)
bix_ticker['bid'] = round((bfx_ticker['bid']*adj_bfx_weight + okc_ticker['bid']*adj_okc_weight + bst_ticker['bid']*adj_bst_weight), 2)
bix_ticker['mid'] = round((bix_ticker['ask']+ bix_ticker['bid'])/2, 2)
if bix_old['ask'] != bix_ticker['ask'] or bix_old['bid'] != bix_ticker['bid']:
bix_ticker['ts'] = time.time()
bix = json.dumps(bix_ticker, ensure_ascii=False)
bix_old['ask'] = bix_ticker['ask']
bix_old['bid'] = bix_ticker['bid']
print "BIX: ", bix
print r.set('bix', bix)
with open(api_path, 'w') as outfile:
json.dump(bix_ticker, outfile)
message = {}
message['eon'] = {}
message['eon']['bix'] = bix_ticker['mid']
message['eon']['bfx'] = bfx_ticker['mid']
message['eon']['okc'] = okc_ticker['mid']
message['eon']['bst'] = bst_ticker['mid']
message['eon']['bmx'] = bmx_ticker['mid']
pubnub.publish('bix-chart', message, callback=PubNubcallback, error=PubNubcallback)
def main():
global r, bfx_new, okc_new, bst_new, rest_bfx_new, rest_okc_new, rest_bst_new, bmx_new
sub = r.pubsub()
sub.subscribe('bix-usd')
while True:
for m in sub.listen():
if m['data'] != 1:
print m['data']
ticker = json.loads(m['data'])
if ticker['source'] == 'bfx':
bfx_new = m['data']
calculateBIX(bfx_new, okc_new, bst_new, rest_bfx_new, rest_okc_new, rest_bst_new, bmx_new)
if ticker['source'] == 'okc':
okc_new = m['data']
calculateBIX(bfx_new, okc_new, bst_new, rest_bfx_new, rest_okc_new, rest_bst_new, bmx_new)
if ticker['source'] == 'bst':
bst_new = m['data']
calculateBIX(bfx_new, okc_new, bst_new, rest_bfx_new, rest_okc_new, rest_bst_new, bmx_new)
if ticker['source'] == 'bmx-rest':
bmx_new = m['data']
else:
print m
if __name__ == '__main__':
main() | en | 0.550795 | #print "Age in ms - BFX: ", bfx_ticker_age, "BFX REST: ", bfx_rest_ticker_age, "OKC: ", okc_ticker_age, "OKC REST: ", okc_rest_ticker_age, "BST: ", bst_ticker_age, "BST REST: ", bst_rest_ticker_age # Adjusting Weightings / Dynamic De-Weighting Mechanism | 2.283329 | 2 |
CatalystCodingContest2017/level_2/2prueba.py | DVRodri8/Competitive-programs | 1 | 6615007 | <filename>CatalystCodingContest2017/level_2/2prueba.py<gh_stars>1-10
def distancia(ciu_A, ciu_B):
restaX = int(ciu_A[1]) - int(ciu_B[1])
restaY = int(ciu_A[2]) - int(ciu_B[2])
dis = (restaX*restaX + restaY*restaY)**0.5
return dis
def tiempo_en_coche(distancia):
return distancia/15.0
def tiempo_en_hyperloop(distancia):
return distancia/250.0 +200
def locate_ciudad(lista_ciudades, ciudad):
for c in lista_ciudades:
if ciudad == c[0]:
return c
fichero = open('level2-4.txt', 'r')
lectura = fichero.read()
lectura = lectura.split('\n')
if '' in lectura:
lectura.remove('')
lectura.pop(0)
viaje_hyperloop = lectura.pop()
viaje_total = lectura.pop()
viaje_total = viaje_total.split(' ')
viaje_hyperloop = viaje_hyperloop.split(' ')
lista_ciudades = []
for ciudad in lectura:
coordenadas = []
c = ciudad.split(' ')
coordenadas.append(c[0])
coordenadas.append(c[1])
coordenadas.append(c[2])
lista_ciudades.append(coordenadas)
ciudad_1 = locate_ciudad(lista_ciudades , viaje_total[0])
ciudad_2 = locate_ciudad(lista_ciudades , viaje_total[1])
ciudad_3 = locate_ciudad(lista_ciudades , viaje_hyperloop[0])
ciudad_4 = locate_ciudad(lista_ciudades , viaje_hyperloop[1])
distancia_1 = distancia(ciudad_1, ciudad_3)
distancia_2 = distancia(ciudad_1, ciudad_4)
minimo = min(distancia_1, distancia_2)
maximo = max(distancia_1, distancia_2)
if minimo ==distancia_1:
ciudad_coche_2=ciudad_4
else:
ciudad_coche_2=ciudad_3
del distancia_1
del distancia_2
t1 = tiempo_en_coche(minimo)
t2 = tiempo_en_hyperloop(distancia(ciudad_3, ciudad_4))
t3 = tiempo_en_coche(distancia(ciudad_coche_2, ciudad_2))
print(round(t1+t2+t3))
| <filename>CatalystCodingContest2017/level_2/2prueba.py<gh_stars>1-10
def distancia(ciu_A, ciu_B):
restaX = int(ciu_A[1]) - int(ciu_B[1])
restaY = int(ciu_A[2]) - int(ciu_B[2])
dis = (restaX*restaX + restaY*restaY)**0.5
return dis
def tiempo_en_coche(distancia):
return distancia/15.0
def tiempo_en_hyperloop(distancia):
return distancia/250.0 +200
def locate_ciudad(lista_ciudades, ciudad):
for c in lista_ciudades:
if ciudad == c[0]:
return c
fichero = open('level2-4.txt', 'r')
lectura = fichero.read()
lectura = lectura.split('\n')
if '' in lectura:
lectura.remove('')
lectura.pop(0)
viaje_hyperloop = lectura.pop()
viaje_total = lectura.pop()
viaje_total = viaje_total.split(' ')
viaje_hyperloop = viaje_hyperloop.split(' ')
lista_ciudades = []
for ciudad in lectura:
coordenadas = []
c = ciudad.split(' ')
coordenadas.append(c[0])
coordenadas.append(c[1])
coordenadas.append(c[2])
lista_ciudades.append(coordenadas)
ciudad_1 = locate_ciudad(lista_ciudades , viaje_total[0])
ciudad_2 = locate_ciudad(lista_ciudades , viaje_total[1])
ciudad_3 = locate_ciudad(lista_ciudades , viaje_hyperloop[0])
ciudad_4 = locate_ciudad(lista_ciudades , viaje_hyperloop[1])
distancia_1 = distancia(ciudad_1, ciudad_3)
distancia_2 = distancia(ciudad_1, ciudad_4)
minimo = min(distancia_1, distancia_2)
maximo = max(distancia_1, distancia_2)
if minimo ==distancia_1:
ciudad_coche_2=ciudad_4
else:
ciudad_coche_2=ciudad_3
del distancia_1
del distancia_2
t1 = tiempo_en_coche(minimo)
t2 = tiempo_en_hyperloop(distancia(ciudad_3, ciudad_4))
t3 = tiempo_en_coche(distancia(ciudad_coche_2, ciudad_2))
print(round(t1+t2+t3))
| none | 1 | 2.916135 | 3 | |
test/vso_tools/install_nni.py | dutxubo/nni | 9,680 | 6615008 | <reponame>dutxubo/nni
import sys
from _common import build_wheel, run_command
if len(sys.argv) <= 2:
extra_dep = ''
else:
extra_dep = f'[{sys.argv[2]}]'
wheel = build_wheel()
run_command(f'{sys.executable} -m pip install {wheel}{extra_dep}')
| import sys
from _common import build_wheel, run_command
if len(sys.argv) <= 2:
extra_dep = ''
else:
extra_dep = f'[{sys.argv[2]}]'
wheel = build_wheel()
run_command(f'{sys.executable} -m pip install {wheel}{extra_dep}') | none | 1 | 2.064873 | 2 | |
src/GimelStudio/utils/text.py | iwoithe/Gimel-Studio | 47 | 6615009 | # ----------------------------------------------------------------------------
# Gimel Studio Copyright 2019-2021 by <NAME> and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# FILE: text.py
# AUTHOR(S): <NAME>
# PURPOSE: Provides utility text manipulation functions
# ----------------------------------------------------------------------------
def TruncateText(text_string, str_length=18):
""" Truncate the text string after a certain
number of characters.
"""
chars = []
for char in text_string:
chars.append(char)
if len(chars) > str_length:
words = chars[:str_length - 1]
text = ''.join(words)
return '{}...'.format(text)
else:
return text_string
| # ----------------------------------------------------------------------------
# Gimel Studio Copyright 2019-2021 by <NAME> and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# FILE: text.py
# AUTHOR(S): <NAME>
# PURPOSE: Provides utility text manipulation functions
# ----------------------------------------------------------------------------
def TruncateText(text_string, str_length=18):
""" Truncate the text string after a certain
number of characters.
"""
chars = []
for char in text_string:
chars.append(char)
if len(chars) > str_length:
words = chars[:str_length - 1]
text = ''.join(words)
return '{}...'.format(text)
else:
return text_string
| en | 0.714443 | # ---------------------------------------------------------------------------- # Gimel Studio Copyright 2019-2021 by <NAME> and contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # FILE: text.py # AUTHOR(S): <NAME> # PURPOSE: Provides utility text manipulation functions # ---------------------------------------------------------------------------- Truncate the text string after a certain number of characters. | 3.049463 | 3 |
python/democli/commands/cmd_workbook.py | smtp4kumar/tableau_python_rest_api_samples | 0 | 6615010 | import click
from democli.cli import pass_context
from democli.utils.click_util import common_options
from democli.utils.log_util import create_logger
from democli.auth.session_mgr import SessionMgr
from democli.workbook.workbook_mgr import WorkbookMgr
logger = create_logger(__name__)
# common options for sub commands
_common_options = [
click.option(
'-s', '--server', required=True, help='The specified server address'
),
click.option(
'-u', '--username', required=True, help='The username(not ID) of the user to sign in as'
),
click.option(
'-p', '--password', required=True, help='The password of the user to sign in as'
)
]
@click.group('workbook', short_help='Root command to manage workbook')
@pass_context
def cli(ctx):
"""Root command to manage workbook"""
pass
@cli.command('move_to_project', short_help='Move workbook to destination project')
@common_options(_common_options)
@click.option(
'-w', '--workbook_name', required=True, help='The name of workbook to move'
)
@click.option(
'-d', '--dest_project', required=True, help='The destination project'
)
@pass_context
def move_to_project(ctx, server, username, password, workbook_name, dest_project):
"""Move workbook to destination project"""
logger.info("\n*Moving '{0}' workbook to '{1}' project as {2}*".format(workbook_name, dest_project, username))
##### STEP 1: Sign in #####
logger.info("\n1. Signing in as " + username)
session_mgr = SessionMgr(server, username, password)
auth_token, site_id, user_id = session_mgr.sign_in()
##### STEP 2: Find new project id #####
logger.info("\n2. Finding project id of '{0}'".format(dest_project))
workbook_mgr = WorkbookMgr(server, auth_token, site_id)
dest_project_id = workbook_mgr.get_project_id(dest_project)
##### STEP 3: Find workbook id #####
logger.info("\n3. Finding workbook id of '{0}'".format(workbook_name))
source_project_id, workbook_id = workbook_mgr.get_workbook_id(user_id, workbook_name)
# Check if the workbook is already in the desired project
if source_project_id == dest_project_id:
error = "Workbook already in destination project"
raise UserDefinedFieldError(error)
##### STEP 4: Move workbook #####
logger.info("\n4. Moving workbook to '{0}'".format(dest_project))
workbook_mgr.move_workbook(workbook_id, dest_project_id)
##### STEP 5: Sign out #####
logger.info("\n5. Signing out and invalidating the authentication token")
session_mgr.sign_out(auth_token)
@cli.command('move_to_server', short_help='Move workbook to destination server')
@common_options(_common_options)
@click.option(
'-w', '--workbook_name', required=True, help='The name of workbook to move'
)
@click.option(
'--dest_server', required=True, help='The destination server'
)
@click.option(
'--dest_username', required=True, help='The destination username'
)
@click.option(
'--dest_password', required=True, help='The destination user password'
)
@click.option(
'--dest_site_id', required=True, help='The destination site id'
)
@pass_context
def move_to_server(ctx, server, username, password, workbook_name, dest_server, dest_username, dest_password):
"""Move workbook to destination server"""
logger.info("\n*Moving '{0}' workbook to the 'default' project in {1}*".format(workbook_name, dest_server))
##### STEP 1: Sign in #####
logger.info("\n1. Signing in to both sites to obtain authentication tokens")
# Source server
source_session_mgr = SessionMgr(server, username, password)
source_auth_token, source_site_id, source_user_id = source_session_mgr.sign_in()
# Destination server
dest_session_mgr = SessionMgr(dest_server, dest_username, dest_password)
dest_auth_token, dest_site_id, dest_user_id = dest_session_mgr.sign_in()
##### STEP 2: Find workbook id #####
logger.info("\n2. Finding workbook id of '{0}'".format(workbook_name))
source_workbook_mgr = WorkbookMgr(server, source_auth_token, source_site_id)
workbook_id = source_workbook_mgr.get_workbook_id(source_user_id, workbook_name)
##### STEP 3: Find 'default' project id for destination server #####
logger.info("\n3. Finding 'default' project id for {0}".format(dest_server))
dest_workbook_mgr = WorkbookMgr(dest_server, dest_auth_token, dest_site_id)
dest_project_id = dest_workbook_mgr.get_default_project_id()
##### STEP 4: Download workbook #####
logger.info("\n4. Downloading the workbook to move")
workbook_filename = source_workbook_mgr.download(workbook_id)
##### STEP 5: Publish to new site #####
logger.info("\n5. Publishing workbook to {0}".format(dest_server))
dest_workbook_mgr.publish_workbook(workbook_filename, dest_project_id)
##### STEP 6: Deleting workbook from the source site #####
logger.info("\n6. Deleting workbook from the original site and temp file")
source_workbook_mgr.delete_workbook(workbook_id, workbook_filename)
##### STEP 7: Sign out #####
logger.info("\n7. Signing out and invalidating the authentication token")
source_session_mgr.sign_out(source_auth_token)
dest_session_mgr.sign_out(dest_auth_token)
@cli.command('move_to_site', short_help='Move workbook to destination site')
@common_options(_common_options)
@click.option(
'-w', '--workbook_name', required=True, help='The name of workbook to move'
)
@click.option(
'--dest_site', required=True, help='The destination site id'
)
@pass_context
def move_to_site(ctx, server, username, password, workbook_name, dest_site):
"""Move workbook to destination site"""
logger.info("\n*Moving '{0}' workbook to the 'default' project in {1}*".format(workbook_name, dest_site))
##### STEP 1: Sign in #####
logger.info("\n1. Signing in to both sites to obtain authentication tokens")
# Default site
source_session_mgr = SessionMgr(server, username, password)
source_auth_token, source_site_id, source_user_id = source_session_mgr.sign_in()
# Specified site
dest_session_mgr = SessionMgr(server, username, password, site=dest_site)
dest_auth_token, dest_site_id, dest_user_id = dest_session_mgr.sign_in()
##### STEP 2: Find workbook id #####
logger.info("\n2. Finding workbook id of '{0}' from source site".format(workbook_name))
source_workbook_mgr = WorkbookMgr(server, source_auth_token, source_site_id)
workbook_id = source_workbook_mgr.get_workbook_id(source_user_id, workbook_name)
##### STEP 3: Find 'default' project id for destination site #####
logger.info("\n3. Finding 'default' project id for destination site")
dest_workbook_mgr = WorkbookMgr(server, source_auth_token, dest_site_id)
dest_project_id = dest_workbook_mgr.get_default_project_id(server, dest_auth_token, dest_site_id)
##### STEP 4: Download workbook #####
logger.info("\n4. Downloading the workbook to move from source site")
workbook_filename, workbook_content = source_workbook_mgr.download(workbook_id)
##### STEP 5: Publish to new site #####
logger.info("\n5. Publishing workbook to destination site")
dest_workbook_mgr.publish_workbook(workbook_filename, workbook_content, dest_project_id)
##### STEP 6: Deleting workbook from the source site #####
logger.info("\n6. Deleting workbook from the source site")
source_workbook_mgr.delete_workbook(workbook_id)
##### STEP 7: Sign out #####
logger.info("\n7. Signing out and invalidating the authentication token")
source_session_mgr.sign_out(source_auth_token)
dest_session_mgr.sign_out(dest_auth_token)
| import click
from democli.cli import pass_context
from democli.utils.click_util import common_options
from democli.utils.log_util import create_logger
from democli.auth.session_mgr import SessionMgr
from democli.workbook.workbook_mgr import WorkbookMgr
logger = create_logger(__name__)
# common options for sub commands
_common_options = [
click.option(
'-s', '--server', required=True, help='The specified server address'
),
click.option(
'-u', '--username', required=True, help='The username(not ID) of the user to sign in as'
),
click.option(
'-p', '--password', required=True, help='The password of the user to sign in as'
)
]
@click.group('workbook', short_help='Root command to manage workbook')
@pass_context
def cli(ctx):
"""Root command to manage workbook"""
pass
@cli.command('move_to_project', short_help='Move workbook to destination project')
@common_options(_common_options)
@click.option(
'-w', '--workbook_name', required=True, help='The name of workbook to move'
)
@click.option(
'-d', '--dest_project', required=True, help='The destination project'
)
@pass_context
def move_to_project(ctx, server, username, password, workbook_name, dest_project):
"""Move workbook to destination project"""
logger.info("\n*Moving '{0}' workbook to '{1}' project as {2}*".format(workbook_name, dest_project, username))
##### STEP 1: Sign in #####
logger.info("\n1. Signing in as " + username)
session_mgr = SessionMgr(server, username, password)
auth_token, site_id, user_id = session_mgr.sign_in()
##### STEP 2: Find new project id #####
logger.info("\n2. Finding project id of '{0}'".format(dest_project))
workbook_mgr = WorkbookMgr(server, auth_token, site_id)
dest_project_id = workbook_mgr.get_project_id(dest_project)
##### STEP 3: Find workbook id #####
logger.info("\n3. Finding workbook id of '{0}'".format(workbook_name))
source_project_id, workbook_id = workbook_mgr.get_workbook_id(user_id, workbook_name)
# Check if the workbook is already in the desired project
if source_project_id == dest_project_id:
error = "Workbook already in destination project"
raise UserDefinedFieldError(error)
##### STEP 4: Move workbook #####
logger.info("\n4. Moving workbook to '{0}'".format(dest_project))
workbook_mgr.move_workbook(workbook_id, dest_project_id)
##### STEP 5: Sign out #####
logger.info("\n5. Signing out and invalidating the authentication token")
session_mgr.sign_out(auth_token)
@cli.command('move_to_server', short_help='Move workbook to destination server')
@common_options(_common_options)
@click.option(
'-w', '--workbook_name', required=True, help='The name of workbook to move'
)
@click.option(
'--dest_server', required=True, help='The destination server'
)
@click.option(
'--dest_username', required=True, help='The destination username'
)
@click.option(
'--dest_password', required=True, help='The destination user password'
)
@click.option(
'--dest_site_id', required=True, help='The destination site id'
)
@pass_context
def move_to_server(ctx, server, username, password, workbook_name, dest_server, dest_username, dest_password):
"""Move workbook to destination server"""
logger.info("\n*Moving '{0}' workbook to the 'default' project in {1}*".format(workbook_name, dest_server))
##### STEP 1: Sign in #####
logger.info("\n1. Signing in to both sites to obtain authentication tokens")
# Source server
source_session_mgr = SessionMgr(server, username, password)
source_auth_token, source_site_id, source_user_id = source_session_mgr.sign_in()
# Destination server
dest_session_mgr = SessionMgr(dest_server, dest_username, dest_password)
dest_auth_token, dest_site_id, dest_user_id = dest_session_mgr.sign_in()
##### STEP 2: Find workbook id #####
logger.info("\n2. Finding workbook id of '{0}'".format(workbook_name))
source_workbook_mgr = WorkbookMgr(server, source_auth_token, source_site_id)
workbook_id = source_workbook_mgr.get_workbook_id(source_user_id, workbook_name)
##### STEP 3: Find 'default' project id for destination server #####
logger.info("\n3. Finding 'default' project id for {0}".format(dest_server))
dest_workbook_mgr = WorkbookMgr(dest_server, dest_auth_token, dest_site_id)
dest_project_id = dest_workbook_mgr.get_default_project_id()
##### STEP 4: Download workbook #####
logger.info("\n4. Downloading the workbook to move")
workbook_filename = source_workbook_mgr.download(workbook_id)
##### STEP 5: Publish to new site #####
logger.info("\n5. Publishing workbook to {0}".format(dest_server))
dest_workbook_mgr.publish_workbook(workbook_filename, dest_project_id)
##### STEP 6: Deleting workbook from the source site #####
logger.info("\n6. Deleting workbook from the original site and temp file")
source_workbook_mgr.delete_workbook(workbook_id, workbook_filename)
##### STEP 7: Sign out #####
logger.info("\n7. Signing out and invalidating the authentication token")
source_session_mgr.sign_out(source_auth_token)
dest_session_mgr.sign_out(dest_auth_token)
@cli.command('move_to_site', short_help='Move workbook to destination site')
@common_options(_common_options)
@click.option(
'-w', '--workbook_name', required=True, help='The name of workbook to move'
)
@click.option(
'--dest_site', required=True, help='The destination site id'
)
@pass_context
def move_to_site(ctx, server, username, password, workbook_name, dest_site):
"""Move workbook to destination site"""
logger.info("\n*Moving '{0}' workbook to the 'default' project in {1}*".format(workbook_name, dest_site))
##### STEP 1: Sign in #####
logger.info("\n1. Signing in to both sites to obtain authentication tokens")
# Default site
source_session_mgr = SessionMgr(server, username, password)
source_auth_token, source_site_id, source_user_id = source_session_mgr.sign_in()
# Specified site
dest_session_mgr = SessionMgr(server, username, password, site=dest_site)
dest_auth_token, dest_site_id, dest_user_id = dest_session_mgr.sign_in()
##### STEP 2: Find workbook id #####
logger.info("\n2. Finding workbook id of '{0}' from source site".format(workbook_name))
source_workbook_mgr = WorkbookMgr(server, source_auth_token, source_site_id)
workbook_id = source_workbook_mgr.get_workbook_id(source_user_id, workbook_name)
##### STEP 3: Find 'default' project id for destination site #####
logger.info("\n3. Finding 'default' project id for destination site")
dest_workbook_mgr = WorkbookMgr(server, source_auth_token, dest_site_id)
dest_project_id = dest_workbook_mgr.get_default_project_id(server, dest_auth_token, dest_site_id)
##### STEP 4: Download workbook #####
logger.info("\n4. Downloading the workbook to move from source site")
workbook_filename, workbook_content = source_workbook_mgr.download(workbook_id)
##### STEP 5: Publish to new site #####
logger.info("\n5. Publishing workbook to destination site")
dest_workbook_mgr.publish_workbook(workbook_filename, workbook_content, dest_project_id)
##### STEP 6: Deleting workbook from the source site #####
logger.info("\n6. Deleting workbook from the source site")
source_workbook_mgr.delete_workbook(workbook_id)
##### STEP 7: Sign out #####
logger.info("\n7. Signing out and invalidating the authentication token")
source_session_mgr.sign_out(source_auth_token)
dest_session_mgr.sign_out(dest_auth_token)
| en | 0.544536 | # common options for sub commands Root command to manage workbook Move workbook to destination project ##### STEP 1: Sign in ##### ##### STEP 2: Find new project id ##### ##### STEP 3: Find workbook id ##### # Check if the workbook is already in the desired project ##### STEP 4: Move workbook ##### ##### STEP 5: Sign out ##### Move workbook to destination server ##### STEP 1: Sign in ##### # Source server # Destination server ##### STEP 2: Find workbook id ##### ##### STEP 3: Find 'default' project id for destination server ##### ##### STEP 4: Download workbook ##### ##### STEP 5: Publish to new site ##### ##### STEP 6: Deleting workbook from the source site ##### ##### STEP 7: Sign out ##### Move workbook to destination site ##### STEP 1: Sign in ##### # Default site # Specified site ##### STEP 2: Find workbook id ##### ##### STEP 3: Find 'default' project id for destination site ##### ##### STEP 4: Download workbook ##### ##### STEP 5: Publish to new site ##### ##### STEP 6: Deleting workbook from the source site ##### ##### STEP 7: Sign out ##### | 2.065771 | 2 |
HW8/smonets/HW3.py | kolyasalubov/Lv-677.PythonCore | 0 | 6615011 | <gh_stars>0
class Employee():
"""
Creates Employee
You must provide Employee name (str) and salary (num).
"""
counter = 0
def __init__(self, name, salary):
self.name = str(name)
self.salary = int(salary)
def __new__(self, *args, **kwargs):
self.counter += 1
return object.__new__(self)
@classmethod
def total_emp(cls):
print(f"We have {cls.counter} employees in total")
def emp_info(self):
print(f"This is {self.name}, his salary is {self.salary} $")
s = Employee("Sviat", 9999)
o = Employee("Oleg", 99999)
g = Employee("Anna", 3333)
o.emp_info()
g.emp_info()
o.total_emp()
# print(Employee.__doc__)
# print(Employee.__base__)
# print(Employee.__dict__)
# print(Employee.__name__)
# print(Employee.__module__)
| class Employee():
"""
Creates Employee
You must provide Employee name (str) and salary (num).
"""
counter = 0
def __init__(self, name, salary):
self.name = str(name)
self.salary = int(salary)
def __new__(self, *args, **kwargs):
self.counter += 1
return object.__new__(self)
@classmethod
def total_emp(cls):
print(f"We have {cls.counter} employees in total")
def emp_info(self):
print(f"This is {self.name}, his salary is {self.salary} $")
s = Employee("Sviat", 9999)
o = Employee("Oleg", 99999)
g = Employee("Anna", 3333)
o.emp_info()
g.emp_info()
o.total_emp()
# print(Employee.__doc__)
# print(Employee.__base__)
# print(Employee.__dict__)
# print(Employee.__name__)
# print(Employee.__module__) | en | 0.658897 | Creates Employee You must provide Employee name (str) and salary (num). # print(Employee.__doc__) # print(Employee.__base__) # print(Employee.__dict__) # print(Employee.__name__) # print(Employee.__module__) | 3.841748 | 4 |
webapp/webapp/run.py | gabrielbazan/pyflix | 1 | 6615012 | <reponame>gabrielbazan/pyflix<gh_stars>1-10
import logging
from settings import HOST, PORT
from app import app
LOGGER = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
LOGGER.info("Starting service")
app.run(host=HOST, port=PORT)
| import logging
from settings import HOST, PORT
from app import app
LOGGER = logging.getLogger(__name__)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
LOGGER.info("Starting service")
app.run(host=HOST, port=PORT) | none | 1 | 2.082354 | 2 | |
freehackquest_libclient_py/freehackquest_api_quests_files.py | freehackquest/libfhqcli-py | 0 | 6615013 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020 FreeHackQuest Team <<EMAIL>>
"""This file was automatically generated by fhq-server
Version: v0.2.47
Date: 2022-01-01 07:15:35
"""
class FreeHackQuestApiQuestsfiles:
""" API Group quests_files"""
__client = None
def __init__(self, client):
self.__client = client
def delete(self, req):
"""Delete file from quest
Permissins:
Denied access for unauthorized users
Denied access for users
Denied access for admins
Args:
quest_uuid (string,required):
Quest UUID
file_id (integer,required):
File ID
"""
if not self.__client.has_connection():
return None
request_json = self.__client.generate_base_command('quests_files.delete')
allowed_params = [
'quest_uuid',
'file_id',
]
self.__client.check_on_excess_params(req, 'quests_files.delete', allowed_params)
required_params = [
'quest_uuid',
'file_id',
]
self.__client.check_on_required_params(req, 'quests_files.delete', required_params)
for param_name in required_params:
if param_name not in req:
raise Exception('Parameter "' + param_name + '" expected (lib)')
if 'quest_uuid' in req:
request_json['quest_uuid'] = req['quest_uuid']
if 'file_id' in req:
request_json['file_id'] = req['file_id']
return self.__client.send_command(request_json)
def upload(self, req):
"""Update the quest
Permissins:
Denied access for unauthorized users
Denied access for users
Denied access for admins
Args:
quest_uuid (string,required):
Quest UUID
file_base64 (string,required):
Byte-array encoded in base64
file_name (string,required):
File name
"""
if not self.__client.has_connection():
return None
request_json = self.__client.generate_base_command('quests_files.upload')
allowed_params = [
'quest_uuid',
'file_base64',
'file_name',
]
self.__client.check_on_excess_params(req, 'quests_files.upload', allowed_params)
required_params = [
'quest_uuid',
'file_base64',
'file_name',
]
self.__client.check_on_required_params(req, 'quests_files.upload', required_params)
for param_name in required_params:
if param_name not in req:
raise Exception('Parameter "' + param_name + '" expected (lib)')
if 'quest_uuid' in req:
request_json['quest_uuid'] = req['quest_uuid']
if 'file_base64' in req:
request_json['file_base64'] = req['file_base64']
if 'file_name' in req:
request_json['file_name'] = req['file_name']
return self.__client.send_command(request_json)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020 FreeHackQuest Team <<EMAIL>>
"""This file was automatically generated by fhq-server
Version: v0.2.47
Date: 2022-01-01 07:15:35
"""
class FreeHackQuestApiQuestsfiles:
""" API Group quests_files"""
__client = None
def __init__(self, client):
self.__client = client
def delete(self, req):
"""Delete file from quest
Permissins:
Denied access for unauthorized users
Denied access for users
Denied access for admins
Args:
quest_uuid (string,required):
Quest UUID
file_id (integer,required):
File ID
"""
if not self.__client.has_connection():
return None
request_json = self.__client.generate_base_command('quests_files.delete')
allowed_params = [
'quest_uuid',
'file_id',
]
self.__client.check_on_excess_params(req, 'quests_files.delete', allowed_params)
required_params = [
'quest_uuid',
'file_id',
]
self.__client.check_on_required_params(req, 'quests_files.delete', required_params)
for param_name in required_params:
if param_name not in req:
raise Exception('Parameter "' + param_name + '" expected (lib)')
if 'quest_uuid' in req:
request_json['quest_uuid'] = req['quest_uuid']
if 'file_id' in req:
request_json['file_id'] = req['file_id']
return self.__client.send_command(request_json)
def upload(self, req):
"""Update the quest
Permissins:
Denied access for unauthorized users
Denied access for users
Denied access for admins
Args:
quest_uuid (string,required):
Quest UUID
file_base64 (string,required):
Byte-array encoded in base64
file_name (string,required):
File name
"""
if not self.__client.has_connection():
return None
request_json = self.__client.generate_base_command('quests_files.upload')
allowed_params = [
'quest_uuid',
'file_base64',
'file_name',
]
self.__client.check_on_excess_params(req, 'quests_files.upload', allowed_params)
required_params = [
'quest_uuid',
'file_base64',
'file_name',
]
self.__client.check_on_required_params(req, 'quests_files.upload', required_params)
for param_name in required_params:
if param_name not in req:
raise Exception('Parameter "' + param_name + '" expected (lib)')
if 'quest_uuid' in req:
request_json['quest_uuid'] = req['quest_uuid']
if 'file_base64' in req:
request_json['file_base64'] = req['file_base64']
if 'file_name' in req:
request_json['file_name'] = req['file_name']
return self.__client.send_command(request_json) | en | 0.633529 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2020 FreeHackQuest Team <<EMAIL>> This file was automatically generated by fhq-server Version: v0.2.47 Date: 2022-01-01 07:15:35 API Group quests_files Delete file from quest Permissins: Denied access for unauthorized users Denied access for users Denied access for admins Args: quest_uuid (string,required): Quest UUID file_id (integer,required): File ID Update the quest Permissins: Denied access for unauthorized users Denied access for users Denied access for admins Args: quest_uuid (string,required): Quest UUID file_base64 (string,required): Byte-array encoded in base64 file_name (string,required): File name | 2.266814 | 2 |
GreyNsights/host.py | kamathhrishi/GreyNSights | 19 | 6615014 | # python depedencies
import socket
# third-paty dependencies
import dill
from .analyst import Command, Pointer
from .generic import Message
from .graph import Node
from .handler import QueryHandler
from .mpc import gen_shares
# lib dependencies
from .utils import log_message
class DataOwner:
"""DataOwner class handles all the functionalities relating to the Data owner (the host or owner of the data).
It also stores the relevent details.
Args:
name[str]: Name of the Data Owner
port[int]: Port through which data owner hosts dataset
host[str]: The name of host
object_type[str]: If the reference of dataowner is original or reference. Can be used to set permissions later.
"""
def __init__(self, name: str, port: int, host: str, object_type: str = "original"):
self.name = name
self.port = port
self.host = host
# Make this private type
self.type = object_type
self.objects = {}
def register_obj(self, name, obj):
self.objects[name] = obj
class Dataset:
"""Dataset handles all the functionalities relating to hosting the dataset and executing the commands on the host.
Args:
owner[DataOwner]: The owner of the dataset
name[str]: Name of the dataset
data: The dataset
"""
def __init__(
self,
owner: DataOwner,
name: str,
data,
config,
whitelist: dict = None,
permission="AGGREGATE-ONLY",
):
if type(name) == str:
self.name = name
else:
raise TypeError
if isinstance(owner, DataOwner):
self.owner = DataOwner(owner.name, owner.port, owner.host, "copy")
else:
self.owner = owner
self.shares = {}
self.config = config
self.host = owner.host
self.port = owner.port
self.data = data
self.whitelist = whitelist
self.buffer = {}
self.temp_graph = []
self.graph = {}
self.temp_buffer = []
self.objects = {}
self.permission = permission
if config.privacy_budget != "None":
from .reporter import DPReporter
self.dp_reporter = DPReporter(config.privacy_budget, 0.7)
else:
self.dp_reporter = None
self.mpc_shares = {}
if config.dataset_name != self.name and config.owner_name != owner.name:
print("Config Rejected")
print(self.name)
print(owner.name)
if self.config.private_columns != "None":
print("Private COlumns Exist")
print(config.private_columns)
self.data = self.data.drop(config.private_columns, axis=1)
owner.register_obj(name, self)
def register_obj(self, name, obj):
"""Register object on the dataset.
Args:
name[str]: Name of the object to be registered
obj: The object to be registered"""
self.objects[name] = obj
def operate(self, query, result):
"""Register result of arithmetic/logical operator and send it across as a pointer
Args:
result: The result of arithmetic/logical operation
Returns:
sent_msg[Message]: The pointer sent across after registering result of operation
"""
Pt = Pointer(
self.owner,
self.name,
self.host,
self.port,
data=result,
additional_data={"name": self.name},
data_type=type(result),
)
n = Node(query, parents=self.temp_buffer)
self.objects[Pt.id] = result
self.buffer[Pt.id] = n
sent_msg = Message(
self.owner.name,
"",
"resultant_pointer",
"pointer",
data=Pt,
extra={"name": self.owner.name, "id": Pt.id},
)
return sent_msg
def __getitem__(self, recieved_msg: Message, query=None):
try:
data = self.objects[recieved_msg.id]
except KeyError:
data = self.owner.objects[recieved_msg.id]
data = data[recieved_msg.key_attr["idx"]]
return self.operate("getitem", data)
def __setitem__(self, recieved_msg, query=None):
try:
data = self.objects[recieved_msg.id]
except KeyError:
data = self.owner.objects[recieved_msg.id]
data[recieved_msg.key_attr["key"]] = recieved_msg.key_attr["newvalue"]
return self.operate("setitem", data)
def register_share(self, recieved_msg, query=None):
self.mpc_shares[recieved_msg.name] = recieved_msg.mpc_share
sent_msg = Message(
self.owner.name,
"",
"resultant_pointer",
"pointer",
data=None,
extra={"name": self.owner.name},
)
return sent_msg
def create_shares(self, recieved_msg, query=None):
try:
data = self.objects[recieved_msg.id]
except KeyError:
data = self.owner.objects[recieved_msg.id]
workers = recieved_msg.key_attr["distributed_workers"]
generated_shares = gen_shares(data, len(workers))
idx = 0
for worker in workers.keys():
if workers[worker]["port"] != self.port:
additional_data = {
"name": self.name,
"mpc_share": generated_shares[idx],
}
cmd = Command(
workers[worker]["host"],
workers[worker]["port"],
"register_share",
additional_data=additional_data,
)
cmd.execute("register_share")
else:
self.mpc_shares[self.name] = generated_shares[idx]
idx += 1
sent_msg = Message(
self.owner.name,
"",
"resultant_pointer",
"pointer",
data=None,
extra={"name": self.owner.name},
)
return sent_msg
def replace_pt_with_data(self, recieved_msg):
"""Given the arguments passed as pointers with Message pointers , the message pointers will be
replaced with the original pointer data
Args:
recieved_msg: The recieved messsage which might have pointers
Returns:
recieved_msg: The recieved message with original data"""
new_args = []
new_kwargs = {}
for i in recieved_msg.attr:
if type(i) == Message and i.msg_type == "Pointer":
new_args.append(self.objects[i.data])
self.temp_buffer.append(self.buffer[i.data])
# elf.temp_graph.append(i.data)
else:
new_args.append(i)
for j in recieved_msg.key_attr.keys():
if (
type(recieved_msg.key_attr[j]) == Message
and recieved_msg.key_attr[j].msg_type == "Pointer"
):
new_kwargs[j] = self.objects[recieved_msg.key_attr[j].data]
self.temp_buffer.append(self.buffer[recieved_msg.key_attr[j].data])
# self.temp_graph.append(j.data)
else:
new_kwargs[j] = recieved_msg.key_attr[j]
recieved_msg.attr = new_args
recieved_msg.key_attr = new_kwargs
return recieved_msg
def get_shares(self, recieved_msg, query=None):
sent_msg = Message(
self.owner.name,
"",
"mpc_shares",
"mpc_shares",
data=self.mpc_shares,
extra={"name": self.owner.name},
)
self.mpc_shares = {}
return sent_msg
def get_config(self, recieved_msg, query=None):
sent_msg = Message(
self.owner.name,
"",
"mpc_shares",
"mpc_shares",
data=self.config,
extra={"name": self.owner.name},
)
return sent_msg
def exec_operation(self, recieved_msg, query=None):
import operator
op_str = query.replace("_", "")
op = getattr(operator, query)
if hasattr(recieved_msg, "x"):
result = op(self.objects[recieved_msg.pt_id1], recieved_msg.x)
else:
result = op(
self.objects[recieved_msg.pt_id1], self.objects[recieved_msg.pt_id2]
)
return self.operate(op_str, result)
def handle_request(self, recieved_msg, query_handler):
self.temp_graph = []
if type(recieved_msg) != Message:
raise TypeError
# Checks if the dataset is the intended dataset. In future some sort of authentication should be present in this phase.
"""if recieved_msg.name != self.name:
raise NameError("Dataset " + recieved_msg.name + " not found")
recieved_msg = recieved_msg"""
# Substitute for type of message , should be replaced by type of message in furture
if hasattr(recieved_msg, "id"):
try:
data = self.objects[recieved_msg.id]
except KeyError:
data = self.owner.objects[recieved_msg.id]
query = recieved_msg.data
self.temp_graph.append([recieved_msg.id, recieved_msg.data])
else:
data = self.data
query = recieved_msg.data
recieved_msg = self.replace_pt_with_data(recieved_msg)
internal_queries = {
"__getitem__": self.__getitem__,
"__setitem__": self.__setitem__,
"__add__": self.exec_operation,
"__sub__": self.exec_operation,
"__mul__": self.exec_operation,
"__truediv__": self.exec_operation,
"__and__": self.exec_operation,
"__or__": self.exec_operation,
"__gte__": self.exec_operation,
"__gt__": self.exec_operation,
"__lt__": self.exec_operation,
"__lte__": self.exec_operation,
"register_share": self.register_share,
"create_shares": self.create_shares,
"get_config": self.get_config,
"get_shares": self.get_shares,
}
if type(query) == str and (query in internal_queries.keys()):
sent_msg = internal_queries[query](recieved_msg=recieved_msg, query=query)
else:
sent_msg = query_handler.handle(
self.temp_buffer, recieved_msg=recieved_msg, data=data, query=query
)
self.temp_buffer = []
return sent_msg
def listen(self):
"""Listens for queries and requests from analyst and executes the queries. The queries are either directly sent or processed by QueryEngine."""
# REFRACTOR REQUIRED: abstract and write seperate functions for various functionalities and cleaner if else statements
# Establishes a socket connection and begins to listen requests
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.host, self.port))
s.listen()
log_message(
"Connection",
self.name
+ " Listening to requests from port "
+ str(self.host)
+ " "
+ str(self.port),
)
# Continiously listens until terminated
while True:
# Waits until a request is present
conn, address = s.accept()
query_handler = QueryHandler(self, self.name, self.host, self.port)
log_message(
"Connection",
"Connection from " + str(address) + " has been established",
)
log_message("Dataset Name", self.name)
recieved_msg = conn.recv(1200000)
recieved_msg = dill.loads(recieved_msg)
sent_msg = self.handle_request(recieved_msg, query_handler)
# Sends pickled message
if type(sent_msg) == Message:
# send_msg(conn,dill.dumps(sent_msg))
conn.sendall(dill.dumps(sent_msg))
else:
raise TypeError(f"{type(sent_msg)} is not valid type for message")
| # python depedencies
import socket
# third-paty dependencies
import dill
from .analyst import Command, Pointer
from .generic import Message
from .graph import Node
from .handler import QueryHandler
from .mpc import gen_shares
# lib dependencies
from .utils import log_message
class DataOwner:
"""DataOwner class handles all the functionalities relating to the Data owner (the host or owner of the data).
It also stores the relevent details.
Args:
name[str]: Name of the Data Owner
port[int]: Port through which data owner hosts dataset
host[str]: The name of host
object_type[str]: If the reference of dataowner is original or reference. Can be used to set permissions later.
"""
def __init__(self, name: str, port: int, host: str, object_type: str = "original"):
self.name = name
self.port = port
self.host = host
# Make this private type
self.type = object_type
self.objects = {}
def register_obj(self, name, obj):
self.objects[name] = obj
class Dataset:
"""Dataset handles all the functionalities relating to hosting the dataset and executing the commands on the host.
Args:
owner[DataOwner]: The owner of the dataset
name[str]: Name of the dataset
data: The dataset
"""
def __init__(
self,
owner: DataOwner,
name: str,
data,
config,
whitelist: dict = None,
permission="AGGREGATE-ONLY",
):
if type(name) == str:
self.name = name
else:
raise TypeError
if isinstance(owner, DataOwner):
self.owner = DataOwner(owner.name, owner.port, owner.host, "copy")
else:
self.owner = owner
self.shares = {}
self.config = config
self.host = owner.host
self.port = owner.port
self.data = data
self.whitelist = whitelist
self.buffer = {}
self.temp_graph = []
self.graph = {}
self.temp_buffer = []
self.objects = {}
self.permission = permission
if config.privacy_budget != "None":
from .reporter import DPReporter
self.dp_reporter = DPReporter(config.privacy_budget, 0.7)
else:
self.dp_reporter = None
self.mpc_shares = {}
if config.dataset_name != self.name and config.owner_name != owner.name:
print("Config Rejected")
print(self.name)
print(owner.name)
if self.config.private_columns != "None":
print("Private COlumns Exist")
print(config.private_columns)
self.data = self.data.drop(config.private_columns, axis=1)
owner.register_obj(name, self)
def register_obj(self, name, obj):
"""Register object on the dataset.
Args:
name[str]: Name of the object to be registered
obj: The object to be registered"""
self.objects[name] = obj
def operate(self, query, result):
"""Register result of arithmetic/logical operator and send it across as a pointer
Args:
result: The result of arithmetic/logical operation
Returns:
sent_msg[Message]: The pointer sent across after registering result of operation
"""
Pt = Pointer(
self.owner,
self.name,
self.host,
self.port,
data=result,
additional_data={"name": self.name},
data_type=type(result),
)
n = Node(query, parents=self.temp_buffer)
self.objects[Pt.id] = result
self.buffer[Pt.id] = n
sent_msg = Message(
self.owner.name,
"",
"resultant_pointer",
"pointer",
data=Pt,
extra={"name": self.owner.name, "id": Pt.id},
)
return sent_msg
def __getitem__(self, recieved_msg: Message, query=None):
try:
data = self.objects[recieved_msg.id]
except KeyError:
data = self.owner.objects[recieved_msg.id]
data = data[recieved_msg.key_attr["idx"]]
return self.operate("getitem", data)
def __setitem__(self, recieved_msg, query=None):
try:
data = self.objects[recieved_msg.id]
except KeyError:
data = self.owner.objects[recieved_msg.id]
data[recieved_msg.key_attr["key"]] = recieved_msg.key_attr["newvalue"]
return self.operate("setitem", data)
def register_share(self, recieved_msg, query=None):
self.mpc_shares[recieved_msg.name] = recieved_msg.mpc_share
sent_msg = Message(
self.owner.name,
"",
"resultant_pointer",
"pointer",
data=None,
extra={"name": self.owner.name},
)
return sent_msg
def create_shares(self, recieved_msg, query=None):
try:
data = self.objects[recieved_msg.id]
except KeyError:
data = self.owner.objects[recieved_msg.id]
workers = recieved_msg.key_attr["distributed_workers"]
generated_shares = gen_shares(data, len(workers))
idx = 0
for worker in workers.keys():
if workers[worker]["port"] != self.port:
additional_data = {
"name": self.name,
"mpc_share": generated_shares[idx],
}
cmd = Command(
workers[worker]["host"],
workers[worker]["port"],
"register_share",
additional_data=additional_data,
)
cmd.execute("register_share")
else:
self.mpc_shares[self.name] = generated_shares[idx]
idx += 1
sent_msg = Message(
self.owner.name,
"",
"resultant_pointer",
"pointer",
data=None,
extra={"name": self.owner.name},
)
return sent_msg
def replace_pt_with_data(self, recieved_msg):
"""Given the arguments passed as pointers with Message pointers , the message pointers will be
replaced with the original pointer data
Args:
recieved_msg: The recieved messsage which might have pointers
Returns:
recieved_msg: The recieved message with original data"""
new_args = []
new_kwargs = {}
for i in recieved_msg.attr:
if type(i) == Message and i.msg_type == "Pointer":
new_args.append(self.objects[i.data])
self.temp_buffer.append(self.buffer[i.data])
# elf.temp_graph.append(i.data)
else:
new_args.append(i)
for j in recieved_msg.key_attr.keys():
if (
type(recieved_msg.key_attr[j]) == Message
and recieved_msg.key_attr[j].msg_type == "Pointer"
):
new_kwargs[j] = self.objects[recieved_msg.key_attr[j].data]
self.temp_buffer.append(self.buffer[recieved_msg.key_attr[j].data])
# self.temp_graph.append(j.data)
else:
new_kwargs[j] = recieved_msg.key_attr[j]
recieved_msg.attr = new_args
recieved_msg.key_attr = new_kwargs
return recieved_msg
def get_shares(self, recieved_msg, query=None):
sent_msg = Message(
self.owner.name,
"",
"mpc_shares",
"mpc_shares",
data=self.mpc_shares,
extra={"name": self.owner.name},
)
self.mpc_shares = {}
return sent_msg
def get_config(self, recieved_msg, query=None):
sent_msg = Message(
self.owner.name,
"",
"mpc_shares",
"mpc_shares",
data=self.config,
extra={"name": self.owner.name},
)
return sent_msg
def exec_operation(self, recieved_msg, query=None):
import operator
op_str = query.replace("_", "")
op = getattr(operator, query)
if hasattr(recieved_msg, "x"):
result = op(self.objects[recieved_msg.pt_id1], recieved_msg.x)
else:
result = op(
self.objects[recieved_msg.pt_id1], self.objects[recieved_msg.pt_id2]
)
return self.operate(op_str, result)
def handle_request(self, recieved_msg, query_handler):
self.temp_graph = []
if type(recieved_msg) != Message:
raise TypeError
# Checks if the dataset is the intended dataset. In future some sort of authentication should be present in this phase.
"""if recieved_msg.name != self.name:
raise NameError("Dataset " + recieved_msg.name + " not found")
recieved_msg = recieved_msg"""
# Substitute for type of message , should be replaced by type of message in furture
if hasattr(recieved_msg, "id"):
try:
data = self.objects[recieved_msg.id]
except KeyError:
data = self.owner.objects[recieved_msg.id]
query = recieved_msg.data
self.temp_graph.append([recieved_msg.id, recieved_msg.data])
else:
data = self.data
query = recieved_msg.data
recieved_msg = self.replace_pt_with_data(recieved_msg)
internal_queries = {
"__getitem__": self.__getitem__,
"__setitem__": self.__setitem__,
"__add__": self.exec_operation,
"__sub__": self.exec_operation,
"__mul__": self.exec_operation,
"__truediv__": self.exec_operation,
"__and__": self.exec_operation,
"__or__": self.exec_operation,
"__gte__": self.exec_operation,
"__gt__": self.exec_operation,
"__lt__": self.exec_operation,
"__lte__": self.exec_operation,
"register_share": self.register_share,
"create_shares": self.create_shares,
"get_config": self.get_config,
"get_shares": self.get_shares,
}
if type(query) == str and (query in internal_queries.keys()):
sent_msg = internal_queries[query](recieved_msg=recieved_msg, query=query)
else:
sent_msg = query_handler.handle(
self.temp_buffer, recieved_msg=recieved_msg, data=data, query=query
)
self.temp_buffer = []
return sent_msg
def listen(self):
"""Listens for queries and requests from analyst and executes the queries. The queries are either directly sent or processed by QueryEngine."""
# REFRACTOR REQUIRED: abstract and write seperate functions for various functionalities and cleaner if else statements
# Establishes a socket connection and begins to listen requests
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.host, self.port))
s.listen()
log_message(
"Connection",
self.name
+ " Listening to requests from port "
+ str(self.host)
+ " "
+ str(self.port),
)
# Continiously listens until terminated
while True:
# Waits until a request is present
conn, address = s.accept()
query_handler = QueryHandler(self, self.name, self.host, self.port)
log_message(
"Connection",
"Connection from " + str(address) + " has been established",
)
log_message("Dataset Name", self.name)
recieved_msg = conn.recv(1200000)
recieved_msg = dill.loads(recieved_msg)
sent_msg = self.handle_request(recieved_msg, query_handler)
# Sends pickled message
if type(sent_msg) == Message:
# send_msg(conn,dill.dumps(sent_msg))
conn.sendall(dill.dumps(sent_msg))
else:
raise TypeError(f"{type(sent_msg)} is not valid type for message")
| en | 0.740285 | # python depedencies # third-paty dependencies # lib dependencies DataOwner class handles all the functionalities relating to the Data owner (the host or owner of the data). It also stores the relevent details. Args: name[str]: Name of the Data Owner port[int]: Port through which data owner hosts dataset host[str]: The name of host object_type[str]: If the reference of dataowner is original or reference. Can be used to set permissions later. # Make this private type Dataset handles all the functionalities relating to hosting the dataset and executing the commands on the host. Args: owner[DataOwner]: The owner of the dataset name[str]: Name of the dataset data: The dataset Register object on the dataset. Args: name[str]: Name of the object to be registered obj: The object to be registered Register result of arithmetic/logical operator and send it across as a pointer Args: result: The result of arithmetic/logical operation Returns: sent_msg[Message]: The pointer sent across after registering result of operation Given the arguments passed as pointers with Message pointers , the message pointers will be replaced with the original pointer data Args: recieved_msg: The recieved messsage which might have pointers Returns: recieved_msg: The recieved message with original data # elf.temp_graph.append(i.data) # self.temp_graph.append(j.data) # Checks if the dataset is the intended dataset. In future some sort of authentication should be present in this phase. if recieved_msg.name != self.name: raise NameError("Dataset " + recieved_msg.name + " not found") recieved_msg = recieved_msg # Substitute for type of message , should be replaced by type of message in furture Listens for queries and requests from analyst and executes the queries. The queries are either directly sent or processed by QueryEngine. # REFRACTOR REQUIRED: abstract and write seperate functions for various functionalities and cleaner if else statements # Establishes a socket connection and begins to listen requests # Continiously listens until terminated # Waits until a request is present # Sends pickled message # send_msg(conn,dill.dumps(sent_msg)) | 2.327532 | 2 |
src/events/migrations/0002_rename_img_event_image.py | sophia-ts/ieeesb-app | 7 | 6615015 | <reponame>sophia-ts/ieeesb-app
# Generated by Django 4.0.1 on 2022-01-08 22:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("events", "0001_initial"),
]
operations = [
migrations.RenameField(
model_name="event",
old_name="img",
new_name="image",
),
]
| # Generated by Django 4.0.1 on 2022-01-08 22:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("events", "0001_initial"),
]
operations = [
migrations.RenameField(
model_name="event",
old_name="img",
new_name="image",
),
] | en | 0.876187 | # Generated by Django 4.0.1 on 2022-01-08 22:03 | 1.714368 | 2 |
src/main/java/finished/others/finished/no1056/1056.py | realxwx/leetcode-solve | 0 | 6615016 | # Copyright (c) 2020
# Author: xiaoweixiang
class Solution:
def confusingNumber(self, N: int) -> bool:
'''
参考其他人的Python写法
:param N:
:return:
'''
d = {'0': '0', '1': '1', '6': '9', '8': '8', '9': '6'}
ans = ''
s = str(N)
for i in range(len(s)):
if s[i] in d:
ans += d[s[i]]
else:
return False
return ans[::-1] != s
| # Copyright (c) 2020
# Author: xiaoweixiang
class Solution:
def confusingNumber(self, N: int) -> bool:
'''
参考其他人的Python写法
:param N:
:return:
'''
d = {'0': '0', '1': '1', '6': '9', '8': '8', '9': '6'}
ans = ''
s = str(N)
for i in range(len(s)):
if s[i] in d:
ans += d[s[i]]
else:
return False
return ans[::-1] != s
| zh | 0.280356 | # Copyright (c) 2020 # Author: xiaoweixiang 参考其他人的Python写法 :param N: :return: | 3.340873 | 3 |
tests/views.py | hodossy/django-nlf | 0 | 6615017 | from rest_framework import serializers, viewsets
from .models import Article, Publication
class ArticleSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = "__all__"
class PublicationSerializer(serializers.ModelSerializer):
class Meta:
model = Publication
fields = "__all__"
class ArticleViewSet(viewsets.ModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
class PublicationViewSet(viewsets.ModelViewSet):
queryset = Publication.objects.all()
serializer_class = PublicationSerializer
| from rest_framework import serializers, viewsets
from .models import Article, Publication
class ArticleSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = "__all__"
class PublicationSerializer(serializers.ModelSerializer):
class Meta:
model = Publication
fields = "__all__"
class ArticleViewSet(viewsets.ModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
class PublicationViewSet(viewsets.ModelViewSet):
queryset = Publication.objects.all()
serializer_class = PublicationSerializer
| none | 1 | 2.072987 | 2 | |
1535. Find the winner of an Array game M.py | bogdan824/LeetCode-Problems | 0 | 6615018 | def findWinner(arr,k):
if k>len(arr):
return max(arr)
first_val = arr[0]
win_count = 0
while win_count!=k:
if arr[0] > arr[1]:
arr.append(arr[1])
arr.remove(arr[1])
else:
arr.append(arr[0])
arr.remove(arr[0])
win_count=0
first_val = arr[0]
win_count+=1
return arr[0]
#arr = [2,1,3,5,4,6,7]
#k = 2
arr = [1,25,35,42,68,70]
k = 1
print(findWinner(arr,k)) | def findWinner(arr,k):
if k>len(arr):
return max(arr)
first_val = arr[0]
win_count = 0
while win_count!=k:
if arr[0] > arr[1]:
arr.append(arr[1])
arr.remove(arr[1])
else:
arr.append(arr[0])
arr.remove(arr[0])
win_count=0
first_val = arr[0]
win_count+=1
return arr[0]
#arr = [2,1,3,5,4,6,7]
#k = 2
arr = [1,25,35,42,68,70]
k = 1
print(findWinner(arr,k)) | en | 0.846284 | #arr = [2,1,3,5,4,6,7] #k = 2 | 3.33587 | 3 |
benchmark/models/ct.py | 0h-n0/DL_benchmarks | 0 | 6615019 | """
CNTK trainers and models
"""
import os
import numpy as np
import cntk as C
from cntk.device import try_set_default_device, gpu, all_devices
from ctmodel import cnn
from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs, INFINITELY_REPEAT
from cntk.initializer import he_normal
from cntk.layers import AveragePooling, BatchNormalization, Convolution, Dense
from cntk.ops import element_times, relu
class Trainer(object):
def __init__(self, model, ngpu, options=None):
self.model = model
self.ngpu = ngpu
self.gpu_mode = True if ngpu >= 1 else False
if self.gpu_mode:
gpus = [i for i in range(self.ngpu)]
self.is_parallel = False
if options:
self.progressbar = options['progressbar']
def set_optimizer(self, opt_type, opt_conf):
if opt_type == 'SGD':
self.lr_schedule = C.learning_rate_schedule(
opt_conf['lr'], C.UnitType.minibatch)
self.m_schedule = C.momentum_schedule(
opt_conf['momentum'], C.UnitType.minibatch)
else:
raise NotImplementedError
def run(self, iterator, mode='train'):
report = dict()
input_var = C.ops.input_variable(np.prod(iterator.iamge_shape),
np.float32)
label_var = C.ops.input_variable(iterator.batch_size, np.float32)
model = self.model(input_var,)
ce = C.losses.cross_entropy_with_softmax(model, label_var)
pe = C.metrics.classification_error(model, label_var)
z = cnn(input_var)
learner = C.learners.momentum_sgd(z.parameters, self.lr_schedule, self.m_schedule)
if self.is_parallel:
distributed_learner = \
C.data_parallel_distributed_learner(learner=learner,
distributed_after=0)
progress_printer = \
C.logging.ProgressPrinter(tag='Training',
num_epochs=iterator.niteration)
if self.is_parallel:
trainer = C.Trainer(z, (ce, pe), distributed_learner,
progress_printer)
else:
trainer = C.Trainer(z, (ce, pe), learner, progress_printer)
for idx, (x, t) in enumerate(iterator):
total_s = time.perf_counter()
trainer.train_minibatch({input_var : x, label_var : t})
forward_s = time.perf_counter()
forward_e = time.perf_counter()
backward_s = time.perf_counter()
backward_e = time.perf_counter()
total_e = time.perf_counter()
report[idx] = dict(
forward=forward_e - forward_s,
backward=backward_e - backward_s,
total=total_e - total_s
)
return report
class CNN(object):
def __init__(self, channel, xdim, ydim, output_num):
self.cnn = partial(cnn,
channel=channel,
xdim=xdim,
ydim=ydim,
output_num=output_num)
def get_func(self):
return self.cnn
def __call__(self, x):
return self.cnn(x)
def cnn(x, channel, xdim, ydim, output_num):
net = C.layers.Convolution2D((xdim, 3), 180, activation=C.ops.relu, pad=False, strides=1)(x)
net = C.layers.Convolution2D((1, 3), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.MaxPooling((1, 2), strides=2)(net)
net = C.layers.Convolution2D((1, 3), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.Convolution2D((1, 3), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.MaxPooling((1, 2), strides=2)(net)
net = C.layers.Convolution2D((1, 2), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.Convolution2D((1, 1), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.Dense(2048)(net)
net = C.layers.Dense(2048)(net)
net = C.layers.Dense(output_num, activation=None)(net)
return net
| """
CNTK trainers and models
"""
import os
import numpy as np
import cntk as C
from cntk.device import try_set_default_device, gpu, all_devices
from ctmodel import cnn
from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs, INFINITELY_REPEAT
from cntk.initializer import he_normal
from cntk.layers import AveragePooling, BatchNormalization, Convolution, Dense
from cntk.ops import element_times, relu
class Trainer(object):
def __init__(self, model, ngpu, options=None):
self.model = model
self.ngpu = ngpu
self.gpu_mode = True if ngpu >= 1 else False
if self.gpu_mode:
gpus = [i for i in range(self.ngpu)]
self.is_parallel = False
if options:
self.progressbar = options['progressbar']
def set_optimizer(self, opt_type, opt_conf):
if opt_type == 'SGD':
self.lr_schedule = C.learning_rate_schedule(
opt_conf['lr'], C.UnitType.minibatch)
self.m_schedule = C.momentum_schedule(
opt_conf['momentum'], C.UnitType.minibatch)
else:
raise NotImplementedError
def run(self, iterator, mode='train'):
report = dict()
input_var = C.ops.input_variable(np.prod(iterator.iamge_shape),
np.float32)
label_var = C.ops.input_variable(iterator.batch_size, np.float32)
model = self.model(input_var,)
ce = C.losses.cross_entropy_with_softmax(model, label_var)
pe = C.metrics.classification_error(model, label_var)
z = cnn(input_var)
learner = C.learners.momentum_sgd(z.parameters, self.lr_schedule, self.m_schedule)
if self.is_parallel:
distributed_learner = \
C.data_parallel_distributed_learner(learner=learner,
distributed_after=0)
progress_printer = \
C.logging.ProgressPrinter(tag='Training',
num_epochs=iterator.niteration)
if self.is_parallel:
trainer = C.Trainer(z, (ce, pe), distributed_learner,
progress_printer)
else:
trainer = C.Trainer(z, (ce, pe), learner, progress_printer)
for idx, (x, t) in enumerate(iterator):
total_s = time.perf_counter()
trainer.train_minibatch({input_var : x, label_var : t})
forward_s = time.perf_counter()
forward_e = time.perf_counter()
backward_s = time.perf_counter()
backward_e = time.perf_counter()
total_e = time.perf_counter()
report[idx] = dict(
forward=forward_e - forward_s,
backward=backward_e - backward_s,
total=total_e - total_s
)
return report
class CNN(object):
def __init__(self, channel, xdim, ydim, output_num):
self.cnn = partial(cnn,
channel=channel,
xdim=xdim,
ydim=ydim,
output_num=output_num)
def get_func(self):
return self.cnn
def __call__(self, x):
return self.cnn(x)
def cnn(x, channel, xdim, ydim, output_num):
net = C.layers.Convolution2D((xdim, 3), 180, activation=C.ops.relu, pad=False, strides=1)(x)
net = C.layers.Convolution2D((1, 3), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.MaxPooling((1, 2), strides=2)(net)
net = C.layers.Convolution2D((1, 3), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.Convolution2D((1, 3), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.MaxPooling((1, 2), strides=2)(net)
net = C.layers.Convolution2D((1, 2), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.Convolution2D((1, 1), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.Dense(2048)(net)
net = C.layers.Dense(2048)(net)
net = C.layers.Dense(output_num, activation=None)(net)
return net
| en | 0.898295 | CNTK trainers and models | 2.007917 | 2 |
resources/cpp_from_json.py | gmjosack/Spelunky2X64DbgPlugin | 2 | 6615020 | import re
import json
import urllib.request
print("Preparing ...")
# # Download Spelunky2.json from the x64dbg github repo
# #url = "https://gitcdn.link/repo/spelunky-fyi/Spelunky2X64DbgPlugin/master/resources/Spelunky2.json"
# url = "https://raw.githubusercontent.com/spelunky-fyi/Spelunky2X64DbgPlugin/master/resources/Spelunky2.json"
# response = urllib.request.urlopen(url)
# spelunky2json = response.read().decode('utf-8')
spelunky2json = open("Spelunky2.json", "r").read()
j = json.loads(re.sub("//.*", "", spelunky2json, flags=re.MULTILINE))
#default_entity_types = j["default_entity_types"]
entity_class_hierarchy = j["entity_class_hierarchy"]
pointer_types = j["pointer_types"]
inline_struct_types = j["inline_struct_types"]
all_types = j["fields"]
entity_class_hierarchy["Entity"]="Entity" #add missing 'Entity' type for convenience
# Remove the standard types
inline_struct_types.remove("Map")
inline_struct_types.remove("UnorderedMap")
#inline_struct_types.remove("StdVector")
inline_struct_types.remove("StdList")
pointer_types.remove("StdListIteratorPointer")
pointer_types.remove("UnorderedMapBucketPointer")
# ent_base_types = [ "FLOOR_",
# "FLOORSTYLED_",
# "DECORATION_",
# "EMBED_"
# "CHAR_"
# "MONS_",
# "ITEM_",
# "ACTIVEFLOOR_",
# "FX_",
# "BG_",
# "MIDBG"
# "LOGICAL_",
# "MOUNT_",
# "LIQUID_"]
cpp_types = {
"Bool": "bool",
"Byte": "int8_t",
"UnsignedByte": "uint8_t",
"Word": "int16_t",
"UnsignedWord": "uint16_t",
"Dword": "int32_t",
"UnsignedDword": "uint32_t",
"Qword": "int64_t",
"UnsignedQword": "uint64_t",
"Float": "float",
"Flags8": "uint8_t",
"Flags16": "uint16_t",
"Flags32": "uint32_t",
"State8": "int8_t",
"State16": "int16_t",
"State32": "int32_t",
"CodePointer": "size_t",
"DataPointer": "size_t",
"EntityPointer": "Entity*",
"EntityUID": "int32_t",
"EntityDBID": "ENT_TYPE",
"TextureDBID": "TEXTURE",
"EntityUIDPointer": "size_t",
"EntityDBPointer": "EntityDB*",
"ParticleDBPointer": "ParticleDB*",
"ParticleDBID": "uint32_t",
"TextureDBPointer": "Texture*",
"TextureDBID": "int64_t",
"Vector": "Vector",
"ConstCharPointer": "const char*",
"ConstCharPointerPointer": "const char**",
"UTF16Char": "char16_t",
"StringsTableID": "STRINGID",
"CharacterDBID": "uint8_t",
"Map": "std::map<?,?>",
"UnorderedMap": "std::unordered_map<?,?>",
"StdVector": "std::vector<?>",
"StdList": "std::list<?>",
"StdListIteratorPointer": "std::list<?>::const_iterator",
#"UnorderedMapBucketPointer": "",
}
main_structs = [
"GameManager",
"State",
"SaveGame",
"LevelGen",
"EntityDB",
"ParticleDB",
"TextureDB",
"CharacterDB",
"Online"]
def format_type(type):
return_type = ""
if type["type"] == "Skip":
return cpp_types["Byte"] + " skip[" + str(type["offset"]) + "];"
elif type["type"] == "UTF16StringFixedSize":
return "char16_t " + type["field"] + "[" + str(int(type["offset"]/2)) + "];"
elif type["type"] == "UTF8StringFixedSize":
return "char " + type["field"] + "[" + str(type["offset"]) + "];" #unsure if correct
elif type["type"] in inline_struct_types:
return_type = type["type"]
elif type["type"] not in cpp_types:
return_type = type["type"] + "*"
else:
return_type = cpp_types[type["type"]]
return return_type + " " + type["field"] + ";"
def write_vars(ent_type, file, remove_item):
for var in all_types[ent_type]:
if "vftablefunctions" in var: #exception for Movable
continue
file.write(" ")
if var["type"] == "VirtualFunctionTable" or var["field"] == "__vftable":
file.write("//")
file.write(format_type(var))
if "comment" in var:
file.write(" //" + var["comment"])
file.write("\n")
file.write("};\n\n")
if remove_item:
all_types.pop(ent_type)
user_input = input("Generate game structs? (Y/N)")
if user_input == 'Y' or user_input == 'y':
with open("inline_structs.txt", 'w') as inline_file:
inline_file.write("\n")
for struct in inline_struct_types:
inline_file.write("struct " + struct + "\n{\n")
write_vars(struct, inline_file, 1)
with open("pointers.txt", 'w') as pointers_file:
pointers_file.write("\n")
for struct in pointer_types:
pointers_file.write("struct " + struct + "\n{\n")
write_vars(struct, pointers_file, 1)
with open("main_structs.txt", 'w') as main_structs_file:
main_structs_file.write("\n")
for struct in main_structs:
main_structs_file.write("struct " + struct + "\n{\n")
write_vars(struct, main_structs_file, 1)
print("DONE")
print()
user_input2 = input("Generate entities? (Y/N)")
if user_input2 == 'Y' or user_input2 == 'y':
with open("entities.txt", 'w') as entities_file:
for ent_class in entity_class_hierarchy:
entities_file.write("class " + ent_class + " : public " + entity_class_hierarchy[ent_class] + "\n{\n")
entities_file.write(" public:\n")
write_vars(ent_class, entities_file, 1)
print("DONE")
print()
if (user_input == 'Y' or user_input == 'y') and (user_input2 == 'Y' or user_input2 == 'y'):
print("dumping the 'unused' types into dump.txt")
with open("dump.txt", 'w') as dump_file:
for struct in all_types:
dump_file.write("struct " + struct + "\n{\n")
write_vars(struct, dump_file, 0)
| import re
import json
import urllib.request
print("Preparing ...")
# # Download Spelunky2.json from the x64dbg github repo
# #url = "https://gitcdn.link/repo/spelunky-fyi/Spelunky2X64DbgPlugin/master/resources/Spelunky2.json"
# url = "https://raw.githubusercontent.com/spelunky-fyi/Spelunky2X64DbgPlugin/master/resources/Spelunky2.json"
# response = urllib.request.urlopen(url)
# spelunky2json = response.read().decode('utf-8')
spelunky2json = open("Spelunky2.json", "r").read()
j = json.loads(re.sub("//.*", "", spelunky2json, flags=re.MULTILINE))
#default_entity_types = j["default_entity_types"]
entity_class_hierarchy = j["entity_class_hierarchy"]
pointer_types = j["pointer_types"]
inline_struct_types = j["inline_struct_types"]
all_types = j["fields"]
entity_class_hierarchy["Entity"]="Entity" #add missing 'Entity' type for convenience
# Remove the standard types
inline_struct_types.remove("Map")
inline_struct_types.remove("UnorderedMap")
#inline_struct_types.remove("StdVector")
inline_struct_types.remove("StdList")
pointer_types.remove("StdListIteratorPointer")
pointer_types.remove("UnorderedMapBucketPointer")
# ent_base_types = [ "FLOOR_",
# "FLOORSTYLED_",
# "DECORATION_",
# "EMBED_"
# "CHAR_"
# "MONS_",
# "ITEM_",
# "ACTIVEFLOOR_",
# "FX_",
# "BG_",
# "MIDBG"
# "LOGICAL_",
# "MOUNT_",
# "LIQUID_"]
cpp_types = {
"Bool": "bool",
"Byte": "int8_t",
"UnsignedByte": "uint8_t",
"Word": "int16_t",
"UnsignedWord": "uint16_t",
"Dword": "int32_t",
"UnsignedDword": "uint32_t",
"Qword": "int64_t",
"UnsignedQword": "uint64_t",
"Float": "float",
"Flags8": "uint8_t",
"Flags16": "uint16_t",
"Flags32": "uint32_t",
"State8": "int8_t",
"State16": "int16_t",
"State32": "int32_t",
"CodePointer": "size_t",
"DataPointer": "size_t",
"EntityPointer": "Entity*",
"EntityUID": "int32_t",
"EntityDBID": "ENT_TYPE",
"TextureDBID": "TEXTURE",
"EntityUIDPointer": "size_t",
"EntityDBPointer": "EntityDB*",
"ParticleDBPointer": "ParticleDB*",
"ParticleDBID": "uint32_t",
"TextureDBPointer": "Texture*",
"TextureDBID": "int64_t",
"Vector": "Vector",
"ConstCharPointer": "const char*",
"ConstCharPointerPointer": "const char**",
"UTF16Char": "char16_t",
"StringsTableID": "STRINGID",
"CharacterDBID": "uint8_t",
"Map": "std::map<?,?>",
"UnorderedMap": "std::unordered_map<?,?>",
"StdVector": "std::vector<?>",
"StdList": "std::list<?>",
"StdListIteratorPointer": "std::list<?>::const_iterator",
#"UnorderedMapBucketPointer": "",
}
main_structs = [
"GameManager",
"State",
"SaveGame",
"LevelGen",
"EntityDB",
"ParticleDB",
"TextureDB",
"CharacterDB",
"Online"]
def format_type(type):
return_type = ""
if type["type"] == "Skip":
return cpp_types["Byte"] + " skip[" + str(type["offset"]) + "];"
elif type["type"] == "UTF16StringFixedSize":
return "char16_t " + type["field"] + "[" + str(int(type["offset"]/2)) + "];"
elif type["type"] == "UTF8StringFixedSize":
return "char " + type["field"] + "[" + str(type["offset"]) + "];" #unsure if correct
elif type["type"] in inline_struct_types:
return_type = type["type"]
elif type["type"] not in cpp_types:
return_type = type["type"] + "*"
else:
return_type = cpp_types[type["type"]]
return return_type + " " + type["field"] + ";"
def write_vars(ent_type, file, remove_item):
for var in all_types[ent_type]:
if "vftablefunctions" in var: #exception for Movable
continue
file.write(" ")
if var["type"] == "VirtualFunctionTable" or var["field"] == "__vftable":
file.write("//")
file.write(format_type(var))
if "comment" in var:
file.write(" //" + var["comment"])
file.write("\n")
file.write("};\n\n")
if remove_item:
all_types.pop(ent_type)
user_input = input("Generate game structs? (Y/N)")
if user_input == 'Y' or user_input == 'y':
with open("inline_structs.txt", 'w') as inline_file:
inline_file.write("\n")
for struct in inline_struct_types:
inline_file.write("struct " + struct + "\n{\n")
write_vars(struct, inline_file, 1)
with open("pointers.txt", 'w') as pointers_file:
pointers_file.write("\n")
for struct in pointer_types:
pointers_file.write("struct " + struct + "\n{\n")
write_vars(struct, pointers_file, 1)
with open("main_structs.txt", 'w') as main_structs_file:
main_structs_file.write("\n")
for struct in main_structs:
main_structs_file.write("struct " + struct + "\n{\n")
write_vars(struct, main_structs_file, 1)
print("DONE")
print()
user_input2 = input("Generate entities? (Y/N)")
if user_input2 == 'Y' or user_input2 == 'y':
with open("entities.txt", 'w') as entities_file:
for ent_class in entity_class_hierarchy:
entities_file.write("class " + ent_class + " : public " + entity_class_hierarchy[ent_class] + "\n{\n")
entities_file.write(" public:\n")
write_vars(ent_class, entities_file, 1)
print("DONE")
print()
if (user_input == 'Y' or user_input == 'y') and (user_input2 == 'Y' or user_input2 == 'y'):
print("dumping the 'unused' types into dump.txt")
with open("dump.txt", 'w') as dump_file:
for struct in all_types:
dump_file.write("struct " + struct + "\n{\n")
write_vars(struct, dump_file, 0)
| en | 0.48106 | # # Download Spelunky2.json from the x64dbg github repo # #url = "https://gitcdn.link/repo/spelunky-fyi/Spelunky2X64DbgPlugin/master/resources/Spelunky2.json" # url = "https://raw.githubusercontent.com/spelunky-fyi/Spelunky2X64DbgPlugin/master/resources/Spelunky2.json" # response = urllib.request.urlopen(url) # spelunky2json = response.read().decode('utf-8') #default_entity_types = j["default_entity_types"] #add missing 'Entity' type for convenience # Remove the standard types #inline_struct_types.remove("StdVector") # ent_base_types = [ "FLOOR_", # "FLOORSTYLED_", # "DECORATION_", # "EMBED_" # "CHAR_" # "MONS_", # "ITEM_", # "ACTIVEFLOOR_", # "FX_", # "BG_", # "MIDBG" # "LOGICAL_", # "MOUNT_", # "LIQUID_"] #"UnorderedMapBucketPointer": "", #unsure if correct #exception for Movable | 2.84496 | 3 |
scripts/python/0728_self_dividing_numbers.py | weirdcoder247/leetcode_solutions | 1 | 6615021 | class Solution(object):
def selfDividingNumbers(self, left, right):
"""
:type left: int
:type right: int
:rtype: List[int]
"""
sdn_list = []
for i in range(left, right + 1):
digits = [int(x) for x in list(str(i))]
for j in digits:
flag = True
if j is 0 or i % j != 0:
flag = False
break
if flag:
sdn_list.append(i)
return sdn_list
def main():
left = 1
right = 22
left = 47
right = 85
obj = Solution()
return obj.selfDividingNumbers(left, right)
if __name__ == "__main__":
print(main())
| class Solution(object):
def selfDividingNumbers(self, left, right):
"""
:type left: int
:type right: int
:rtype: List[int]
"""
sdn_list = []
for i in range(left, right + 1):
digits = [int(x) for x in list(str(i))]
for j in digits:
flag = True
if j is 0 or i % j != 0:
flag = False
break
if flag:
sdn_list.append(i)
return sdn_list
def main():
left = 1
right = 22
left = 47
right = 85
obj = Solution()
return obj.selfDividingNumbers(left, right)
if __name__ == "__main__":
print(main())
| en | 0.346502 | :type left: int :type right: int :rtype: List[int] | 3.686388 | 4 |
tools_dyy/backbone/efficientnet_classifier.py | hukefei/chongqing_contest | 1 | 6615022 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
import time
import copy
from collections import OrderedDict
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch+1, num_epochs))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
if __name__ == '__main__':
from efficientnet_pytorch import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b4', num_classes=7)
print(model)
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# model = model.to(device)
model.cuda()
model = nn.DataParallel(model, device_ids=[0,1])
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(380),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(0.05, 0.05, 0.05, 0.05),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(400),
transforms.RandomCrop(380),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = '/home/dyy/a/CQ/pg_cls/'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=16,
shuffle=True, num_workers=2)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
# class_names = image_datasets['train'].classes
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
# Decay LR by a factor of 0.1
exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[14, 20], gamma=0.1)
model_ft = train_model(model, criterion, optimizer, exp_lr_scheduler, num_epochs=24)
torch.save(model_ft.cpu().state_dict(), 'efficientnet-b4_finetuned.pth')
net = torch.load('efficientnet-b4_finetuned.pth')
state_dict = OrderedDict()
for i, (k,v) in enumerate(net.items()):
name = k.replace('module.', '')
state_dict[name] = v
torch.save(state_dict, 'efficientnet-b4_finetuned_pg.pth')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
import time
import copy
from collections import OrderedDict
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch+1, num_epochs))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
if __name__ == '__main__':
from efficientnet_pytorch import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b4', num_classes=7)
print(model)
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# model = model.to(device)
model.cuda()
model = nn.DataParallel(model, device_ids=[0,1])
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(380),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(0.05, 0.05, 0.05, 0.05),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(400),
transforms.RandomCrop(380),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = '/home/dyy/a/CQ/pg_cls/'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=16,
shuffle=True, num_workers=2)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
# class_names = image_datasets['train'].classes
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
# Decay LR by a factor of 0.1
exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[14, 20], gamma=0.1)
model_ft = train_model(model, criterion, optimizer, exp_lr_scheduler, num_epochs=24)
torch.save(model_ft.cpu().state_dict(), 'efficientnet-b4_finetuned.pth')
net = torch.load('efficientnet-b4_finetuned.pth')
state_dict = OrderedDict()
for i, (k,v) in enumerate(net.items()):
name = k.replace('module.', '')
state_dict[name] = v
torch.save(state_dict, 'efficientnet-b4_finetuned_pg.pth')
| en | 0.717808 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Each epoch has a training and validation phase # Set model to training mode # Set model to evaluate mode # Iterate over data. # zero the parameter gradients # forward # track history if only in train # backward + optimize only if in training phase # statistics # deep copy the model # load best model weights # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # model = model.to(device) # Data augmentation and normalization for training # Just normalization for validation # class_names = image_datasets['train'].classes # Observe that all parameters are being optimized # Decay LR by a factor of 0.1 | 2.485801 | 2 |
SIPSim/Commands/qSIP_atomExcess.py | arischwartz/test | 2 | 6615023 | <reponame>arischwartz/test
#!/usr/bin/env python
"""
qSIP_atomExcess: calculate isotope enrichment from qSIP data
Usage:
qSIP_atomExcess [options] <OTU_table> <exp_design>
qSIP_atomExcess -h | --help
qSIP_atomExcess --version
Options:
<OTU_table> OTU table file
(must contain an 'abs_abund' column).
<exp_design> Experimental design table. (See Description)
-i=<i> Isotope (13C or 18O).
[Default: 13C]
-n=<n> Number of bootstrap replicates to calculate CIs.
[Default: 1000]
-a=<a> Alpha for confidence interval.
[Default: 0.1]
--np=<np> Number of processors.
[Default: 1]
--byBoot Parallelization by bootstrap replicate instead of taxon.
(useful if running many bootstrap reps on few taxa)
--version Show version.
--debug Debug mode (no multiprocessing).
-h --help Show this screen.
Description:
'exp_design' input file
-----------------------
2-column table: <library><tab><control|treatment>
__Example-start__
1<tab>control
2<tab>treatment
3<tab>control
4<tab>treatment
--Example-end--
References:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et
al. (2015). Quantitative Microbial Ecology Through Stable Isotope Probing.
Appl Environ Microbiol AEM.02280-15.
"""
# import
## batteries
from docopt import docopt
import sys
import os
from functools import partial
## 3rd party
import numpy as np
import pandas as pd
## application libraries
from SIPSim.OTU_Table import OTU_table
from SIPSim import QSIP_atomExcess
def opt_parse(args=None):
if args is None:
args = docopt(__doc__, version='0.1')
else:
args = docopt(__doc__, version='0.1', argv=args)
otu = QSIP_atomExcess.qSIP_atomExcess(args)
otu.to_csv(sys.stdout, sep='\t', index=False)
| #!/usr/bin/env python
"""
qSIP_atomExcess: calculate isotope enrichment from qSIP data
Usage:
qSIP_atomExcess [options] <OTU_table> <exp_design>
qSIP_atomExcess -h | --help
qSIP_atomExcess --version
Options:
<OTU_table> OTU table file
(must contain an 'abs_abund' column).
<exp_design> Experimental design table. (See Description)
-i=<i> Isotope (13C or 18O).
[Default: 13C]
-n=<n> Number of bootstrap replicates to calculate CIs.
[Default: 1000]
-a=<a> Alpha for confidence interval.
[Default: 0.1]
--np=<np> Number of processors.
[Default: 1]
--byBoot Parallelization by bootstrap replicate instead of taxon.
(useful if running many bootstrap reps on few taxa)
--version Show version.
--debug Debug mode (no multiprocessing).
-h --help Show this screen.
Description:
'exp_design' input file
-----------------------
2-column table: <library><tab><control|treatment>
__Example-start__
1<tab>control
2<tab>treatment
3<tab>control
4<tab>treatment
--Example-end--
References:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et
al. (2015). Quantitative Microbial Ecology Through Stable Isotope Probing.
Appl Environ Microbiol AEM.02280-15.
"""
# import
## batteries
from docopt import docopt
import sys
import os
from functools import partial
## 3rd party
import numpy as np
import pandas as pd
## application libraries
from SIPSim.OTU_Table import OTU_table
from SIPSim import QSIP_atomExcess
def opt_parse(args=None):
if args is None:
args = docopt(__doc__, version='0.1')
else:
args = docopt(__doc__, version='0.1', argv=args)
otu = QSIP_atomExcess.qSIP_atomExcess(args)
otu.to_csv(sys.stdout, sep='\t', index=False) | en | 0.49053 | #!/usr/bin/env python qSIP_atomExcess: calculate isotope enrichment from qSIP data Usage: qSIP_atomExcess [options] <OTU_table> <exp_design> qSIP_atomExcess -h | --help qSIP_atomExcess --version Options: <OTU_table> OTU table file (must contain an 'abs_abund' column). <exp_design> Experimental design table. (See Description) -i=<i> Isotope (13C or 18O). [Default: 13C] -n=<n> Number of bootstrap replicates to calculate CIs. [Default: 1000] -a=<a> Alpha for confidence interval. [Default: 0.1] --np=<np> Number of processors. [Default: 1] --byBoot Parallelization by bootstrap replicate instead of taxon. (useful if running many bootstrap reps on few taxa) --version Show version. --debug Debug mode (no multiprocessing). -h --help Show this screen. Description: 'exp_design' input file ----------------------- 2-column table: <library><tab><control|treatment> __Example-start__ 1<tab>control 2<tab>treatment 3<tab>control 4<tab>treatment --Example-end-- References: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al. (2015). Quantitative Microbial Ecology Through Stable Isotope Probing. Appl Environ Microbiol AEM.02280-15. # import ## batteries ## 3rd party ## application libraries | 1.953594 | 2 |
circular/test_.py | technolingo/AlgoStructuresPy | 0 | 6615024 | <reponame>technolingo/AlgoStructuresPy
from .linkedlist import LinkedList, Node
from .index import is_circular
class TestIsCircular():
def test_circular(self):
d = Node('d')
c = Node('c', d)
b = Node('b', c)
a = Node('a', b)
d.next = b
llst = LinkedList(a)
assert llst.get_first().data == 'a'
assert llst.get_first().next == b
assert is_circular(llst) is True
def test_non_circular(self):
d = Node('d')
c = Node('c', d)
b = Node('b', c)
a = Node('a', b)
llst = LinkedList(a)
assert llst.get_first().data == 'a'
assert llst.get_first().next == b
assert is_circular(llst) is False
| from .linkedlist import LinkedList, Node
from .index import is_circular
class TestIsCircular():
def test_circular(self):
d = Node('d')
c = Node('c', d)
b = Node('b', c)
a = Node('a', b)
d.next = b
llst = LinkedList(a)
assert llst.get_first().data == 'a'
assert llst.get_first().next == b
assert is_circular(llst) is True
def test_non_circular(self):
d = Node('d')
c = Node('c', d)
b = Node('b', c)
a = Node('a', b)
llst = LinkedList(a)
assert llst.get_first().data == 'a'
assert llst.get_first().next == b
assert is_circular(llst) is False | none | 1 | 3.699042 | 4 | |
wsgi.py | pentestfail/CAPCollector | 11 | 6615025 | <gh_stars>10-100
"""WSGI config for CAPCollector project.
It exposes the WSGI callable as a module-level variable named "application".
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
import sys
# AppEngine third-party apps path include.
if "SERVER_SOFTWARE" in os.environ:
if os.environ.get("SERVER_SOFTWARE"): # AppEngine.
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "libs"))
else: # AppEngine managed VMs.
sys.path.insert(0, "/usr/local/lib/python2.7/dist-packages/")
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CAPCollector.settings")
application = get_wsgi_application()
| """WSGI config for CAPCollector project.
It exposes the WSGI callable as a module-level variable named "application".
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
import sys
# AppEngine third-party apps path include.
if "SERVER_SOFTWARE" in os.environ:
if os.environ.get("SERVER_SOFTWARE"): # AppEngine.
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "libs"))
else: # AppEngine managed VMs.
sys.path.insert(0, "/usr/local/lib/python2.7/dist-packages/")
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CAPCollector.settings")
application = get_wsgi_application() | en | 0.795086 | WSGI config for CAPCollector project. It exposes the WSGI callable as a module-level variable named "application". For more information on this file, see https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/ # AppEngine third-party apps path include. # AppEngine. # AppEngine managed VMs. | 2.130332 | 2 |
so-tetris-python/display/so_event_listener.py | soaprasri/so-tetris-python | 0 | 6615026 | <reponame>soaprasri/so-tetris-python
import pygame
from config.board_config import BoardConfig
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s | %(name)s | %(levelname)s | %(message)s')
logger = logging.getLogger(__name__)
class TetrisEventListener():
"""PyGame events listener
Receives all pygame events and pushes them to appropriate modules
"""
def __init__(self, board) -> None:
self.board = board
self.app_exit = False
self.current_game_over = False
def on_event(self, event):
if(event.type == pygame.QUIT):
self.app_exit = True
logger.info("User requesting QUIT!!")
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w or event.key == pygame.K_UP:
self.board.move(BoardConfig.DIRECTION_UP)
elif event.key == pygame.K_s or event.key == pygame.K_DOWN:
self.board.move(BoardConfig.DIRECTION_DOWN)
pass
elif event.key == pygame.K_a or event.key == pygame.K_LEFT:
self.board.move(BoardConfig.DIRECTION_LEFT)
elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:
self.board.move(BoardConfig.DIRECTION_RIGHT)
return (self.app_exit, self.current_game_over) | import pygame
from config.board_config import BoardConfig
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s | %(name)s | %(levelname)s | %(message)s')
logger = logging.getLogger(__name__)
class TetrisEventListener():
"""PyGame events listener
Receives all pygame events and pushes them to appropriate modules
"""
def __init__(self, board) -> None:
self.board = board
self.app_exit = False
self.current_game_over = False
def on_event(self, event):
if(event.type == pygame.QUIT):
self.app_exit = True
logger.info("User requesting QUIT!!")
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w or event.key == pygame.K_UP:
self.board.move(BoardConfig.DIRECTION_UP)
elif event.key == pygame.K_s or event.key == pygame.K_DOWN:
self.board.move(BoardConfig.DIRECTION_DOWN)
pass
elif event.key == pygame.K_a or event.key == pygame.K_LEFT:
self.board.move(BoardConfig.DIRECTION_LEFT)
elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:
self.board.move(BoardConfig.DIRECTION_RIGHT)
return (self.app_exit, self.current_game_over) | en | 0.890448 | PyGame events listener Receives all pygame events and pushes them to appropriate modules | 2.975185 | 3 |
amt/cli.py | bellockk/amtool | 0 | 6615027 | # -*- coding: utf-8 -*-
"""Console script for amtool."""
import os
import sys
import importlib
import click
import logging
import click_log
from pkg_resources import iter_entry_points
from click_plugins import with_plugins
click_log.basic_config()
# Define Entry Point Command
@with_plugins(iter_entry_points('amt.plugins'))
@click.group()
@click_log.simple_verbosity_option(default='WARNING')
def main(args=None):
"""Console script for amtool."""
logging.info('A message')
return 0
# Load Commands from Subdirectories
SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))
for m in next(os.walk(SCRIPT_PATH))[1]:
try:
commands = importlib.import_module(f"amt.{m}.command")
for command in [c for c in importlib.import_module(
f"amt.{m}.command").__dict__.values() if isinstance(
c, click.core.Command)]:
main.add_command(command)
except ModuleNotFoundError:
pass
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| # -*- coding: utf-8 -*-
"""Console script for amtool."""
import os
import sys
import importlib
import click
import logging
import click_log
from pkg_resources import iter_entry_points
from click_plugins import with_plugins
click_log.basic_config()
# Define Entry Point Command
@with_plugins(iter_entry_points('amt.plugins'))
@click.group()
@click_log.simple_verbosity_option(default='WARNING')
def main(args=None):
"""Console script for amtool."""
logging.info('A message')
return 0
# Load Commands from Subdirectories
SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))
for m in next(os.walk(SCRIPT_PATH))[1]:
try:
commands = importlib.import_module(f"amt.{m}.command")
for command in [c for c in importlib.import_module(
f"amt.{m}.command").__dict__.values() if isinstance(
c, click.core.Command)]:
main.add_command(command)
except ModuleNotFoundError:
pass
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| en | 0.733883 | # -*- coding: utf-8 -*- Console script for amtool. # Define Entry Point Command Console script for amtool. # Load Commands from Subdirectories # pragma: no cover | 2.034415 | 2 |
velvet/utils.py | sgwoodjr/velvet | 0 | 6615028 | <filename>velvet/utils.py
"""Utility Functions
"""
#------------------------------------------------------------------------
# Copyright (c) 2015 SGW
#
# Distributed under the terms of the New BSD License.
#
# The full License is in the file LICENSE
#------------------------------------------------------------------------
from __future__ import print_function
import sys
import numpy as np
# Public API
__all__ = ['isodd', 'ProgressBar']
def isodd(x):
""" Check if a number is odd
b = isodd(x) returns 1 (True) if x is odd, otherwise it returns 0
(False).
If x is a ndarray then b is an ndarray with each element
set to 0 or 1, correspdonding to the value in each element of x.
Parameters
-----------
x : {scalar, ndarray} : int value
Input data
Returns
--------
b : {scalar, ndarray}
Examples
---------
>>> import numpy as np
>>> import velvet as vt
>>> x = np.array([1,2,3])
>>> vt.isodd(x)
array([ 1, 0, 1])
"""
# Error check input
if isinstance(x, np.ndarray):
if x.dtype != int:
raise ValueError("input array must have int values")
elif not isinstance(x, int):
raise ValueError("input must be an int")
return x & 1
class ProgressBar:
"""Print a progress bar to the terminal
Code grabbed from an Ipython Notebook example.
Example useage:
P = ProgressBar(10)
for ii in arange(10):
p.animate(ii+1)
"""
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iter):
print('\r', self, end=' ')
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
| <filename>velvet/utils.py
"""Utility Functions
"""
#------------------------------------------------------------------------
# Copyright (c) 2015 SGW
#
# Distributed under the terms of the New BSD License.
#
# The full License is in the file LICENSE
#------------------------------------------------------------------------
from __future__ import print_function
import sys
import numpy as np
# Public API
__all__ = ['isodd', 'ProgressBar']
def isodd(x):
""" Check if a number is odd
b = isodd(x) returns 1 (True) if x is odd, otherwise it returns 0
(False).
If x is a ndarray then b is an ndarray with each element
set to 0 or 1, correspdonding to the value in each element of x.
Parameters
-----------
x : {scalar, ndarray} : int value
Input data
Returns
--------
b : {scalar, ndarray}
Examples
---------
>>> import numpy as np
>>> import velvet as vt
>>> x = np.array([1,2,3])
>>> vt.isodd(x)
array([ 1, 0, 1])
"""
# Error check input
if isinstance(x, np.ndarray):
if x.dtype != int:
raise ValueError("input array must have int values")
elif not isinstance(x, int):
raise ValueError("input must be an int")
return x & 1
class ProgressBar:
"""Print a progress bar to the terminal
Code grabbed from an Ipython Notebook example.
Example useage:
P = ProgressBar(10)
for ii in arange(10):
p.animate(ii+1)
"""
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iter):
print('\r', self, end=' ')
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
| en | 0.48683 | Utility Functions #------------------------------------------------------------------------ # Copyright (c) 2015 SGW # # Distributed under the terms of the New BSD License. # # The full License is in the file LICENSE #------------------------------------------------------------------------ # Public API Check if a number is odd b = isodd(x) returns 1 (True) if x is odd, otherwise it returns 0 (False). If x is a ndarray then b is an ndarray with each element set to 0 or 1, correspdonding to the value in each element of x. Parameters ----------- x : {scalar, ndarray} : int value Input data Returns -------- b : {scalar, ndarray} Examples --------- >>> import numpy as np >>> import velvet as vt >>> x = np.array([1,2,3]) >>> vt.isodd(x) array([ 1, 0, 1]) # Error check input Print a progress bar to the terminal Code grabbed from an Ipython Notebook example. Example useage: P = ProgressBar(10) for ii in arange(10): p.animate(ii+1) | 2.672522 | 3 |
boa3_test/test_sc/native_test/stdlib/MemoryCompareTooManyArguments.py | OnBlockIO/neo3-boa | 25 | 6615029 | from typing import Any, Union
from boa3.builtin.nativecontract.stdlib import StdLib
def main(mem1: Union[bytes, str], mem2: Union[bytes, str], arg: Any) -> int:
return StdLib.memory_compare(mem1, mem2, arg)
| from typing import Any, Union
from boa3.builtin.nativecontract.stdlib import StdLib
def main(mem1: Union[bytes, str], mem2: Union[bytes, str], arg: Any) -> int:
return StdLib.memory_compare(mem1, mem2, arg)
| none | 1 | 2.287324 | 2 | |
pav_banking/pav_banking/doctype/bank_deposit/bank_deposit.py | alkuhlani/PAV-Banking | 0 | 6615030 | # -*- coding: utf-8 -*-
# Copyright (c) 2021, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from erpnext.accounts.general_ledger import make_gl_entries, merge_similar_entries, delete_gl_entries
from frappe.utils import cint, cstr, formatdate, flt, getdate, nowdate, get_link_to_form
from frappe import _, throw
from erpnext.accounts.utils import get_fiscal_years, validate_fiscal_year, get_account_currency
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_accounting_dimensions
class BankDeposit(Document):
def on_submit(self):
self.status = 'Open'
self.make_gl_entries()
def on_cancel(self):
self.status = 'Cancel'
self.make_gl_entries(cancel=True)
def set_status(self, status=None):
if not status:
status = self.get_status()
self.db_set("status", status)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Closed"
for d in self.bank_deposit_journal:
if not d.journal_entry:
status = "Open"
elif self.docstatus == 2:
status = "Cancelled"
return status
def fill_detail_journal(self):
from dateutil import rrule
from datetime import date, datetime
import calendar
link_start = datetime.strptime(self.link_start, '%Y-%m-%d')
due_date = datetime.strptime(self.due_date, '%Y-%m-%d')
details = list(rrule.rrule(rrule.MONTHLY, dtstart = link_start.replace(day=1), until = due_date))
details_len = len(details)
details_rows = []
journal_rows = []
days_total = (due_date - link_start).days
amount_per_day = flt(self.deposit_rate_amount) / days_total
last_amount = 0
benefit_txt = _('Proof of accrued interest on the date ')
reverse_txt = _('reversal of interest accrued from ')
count = 1
if details_len > 1:
for idx, val in enumerate(details):
if idx == 0:
last_day = calendar.monthrange(link_start.year, link_start.month)[1]
last_day_date = datetime(year = val.year, month = val.month, day = last_day)
days = (last_day_date - link_start).days
amount = (amount_per_day * days)
details_rows.append({'date': link_start.date(), 'day_count': days, 'deposit_rate': amount, })
#month end from link start
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': last_day_date.date(),
'deposit_rate': amount, 'account_debit': self.deferred_income, 'account_credit': self.bank_deposit_benefits,
'debit_remark': benefit_txt + str(last_day_date.date()), 'credit_remark': benefit_txt + str(last_day_date.date()),})
last_amount = amount
last_date_start = link_start.date()
last_date_end = last_day_date.date()
count += 1
elif idx == (details_len - 1):
days = (due_date - val).days + 1
amount = (amount_per_day * days)
details_rows.append({'date': due_date.date(), 'day_count': days, 'deposit_rate': amount, })
#month start
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': val.date(), 'deposit_rate': last_amount,
'account_debit': self.deposit_account, 'account_credit': self.deferred_income,
'debit_remark': benefit_txt + "%s to %s" %(last_date_start, last_date_end),
'credit_remark': reverse_txt + "%s to %s" %(last_date_start, last_date_end),})
count += 1
#due date from month start
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': due_date.date(), 'deposit_rate': amount,
'account_debit': self.deposit_account, 'account_credit': self.bank_deposit_benefits,
'debit_remark': benefit_txt + "%s to %s" %(val.date(), due_date.date()),
'credit_remark': benefit_txt + "%s to %s" %(val.date(), due_date.date()),})
else:
days = (calendar.monthrange(val.year, val.month))[1]
amount = (amount_per_day * days)
details_rows.append({'date': val.date(), 'day_count': days, 'deposit_rate': amount, })
#month start
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': val.date(), 'deposit_rate': last_amount,
'account_debit': self.deposit_account, 'account_credit': self.deferred_income,
'debit_remark': benefit_txt + "%s to %s" %(last_date_start, last_date_end),
'credit_remark': reverse_txt + "%s to %s" %(last_date_start, last_date_end),})
count += 1
#month end
last_day_date = date(val.year, val.month, calendar.monthrange(val.year, val.month)[1])
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': last_day_date, 'deposit_rate': amount,
'account_debit': self.deferred_income, 'account_credit': self.bank_deposit_benefits,
'debit_remark': benefit_txt + str(last_day_date), 'credit_remark': benefit_txt + str(last_day_date),})
last_amount = amount
last_date_start = val.date()
last_date_end = last_day_date
count += 1
elif details_len == 1:
details_rows.append({'date': details[0].date(), 'day_count': days_total, 'deposit_rate': self.deposit_rate_amount, })
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': link_start.date(),
'deposit_rate': deposit_rate_amount, 'account_debit': self.deposit_account, 'account_credit': self.bank_deposit_benefits,
'debit_remark': benefit_txt + "%s to %s" %(link_start.date(), due_date.date()),
'credit_remark': benefit_txt + "%s to %s" %(link_start.date(), due_date.date()),})
self.days = days_total
self.day_amount = amount_per_day
self.update({"bank_deposit_detail": details_rows})
self.update({"bank_deposit_journal": journal_rows})
#row = self.append('bank_deposit_detail', {})
def make_gl_entries(self, cancel = False):
if not self.amount:
return
gl_entries = self.get_gl_entries()
if cancel:
for row in self.bank_deposit_journal:
r = {'date': row.date, 'deposit_rate': row.deposit_rate, 'debit_remark': row.debit_remark,
'credit_remark': row.credit_remark, 'account_debit': row.account_debit, 'account_credit': row.account_credit,}
self.get_row_gl_entry(r, gl_entries)
if gl_entries:
make_gl_entries(gl_entries, cancel= cancel)
def get_gl_entries(self):
gl_entries = []
self.make_gl_entry(gl_entries)
gl_entries = merge_similar_entries(gl_entries)
return gl_entries
def make_gl_entry(self, gl_entries):
gl_entries.append(
self.get_gl_dict({
"account": self.deposit_account,
"against": self.current_account,
"account_currency": self.currency,
"credit": (self.amount * self.exchange_rate) if self.currency != self.company_currency
else self.amount,
"credit_in_account_currency": (self.amount),
"conversion_rate":self.exchange_rate if self.currency != self.company_currency else 1.0,
"remarks": self.get("remarks") or _(""),
}, self.currency))
gl_entries.append(
self.get_gl_dict({
"account": self.current_account,
"against": self.deposit_account,
"account_currency": self.currency,
"debit": (self.amount * self.exchange_rate) if self.currency != self.company_currency
else self.amount,
"debit_in_account_currency": (self.amount),
"conversion_rate":self.exchange_rate if self.currency != self.company_currency else 1.0,
"remarks": self.get("remarks") or _(""),
}, self.currency))
def make_journal_entry(self, date):
frappe.has_permission('Journal Entry', throw=True)
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_checks_for_pl_and_bs_accounts
accounting_dimensions = get_checks_for_pl_and_bs_accounts()
for row in self.get("bank_deposit_journal"):
if not row.journal_entry and getdate(row.date) <= getdate(date):
je = frappe.new_doc("Journal Entry")
# je.voucher_type = "Bank Deposit"
je.posting_date = row.date
je.company = self.company
# je.remark = "Bank Deposit Entry against {0} worth {1}".format(self.name, row.journal_name)
je.multi_currency = 1
credit_entry = self.get_gl_dict({
"reference_type": "Bank Deposit",
"reference_name": self.name,
"account": row.account_credit,
"account_currency": self.currency,
"credit": (row.deposit_rate * self.exchange_rate) if self.currency != self.company_currency else row.deposit_rate,
"credit_in_account_currency": row.deposit_rate,
"exchange_rate":self.exchange_rate if self.currency != self.company_currency else 1.0,
"user_remark": row.credit_remark,
"cost_center": self.cost_center,
}, self.currency)
debit_entry = self.get_gl_dict({
"reference_type": "Bank Deposit",
"reference_name": self.name,
"account": row.account_debit,
"account_currency": self.currency,
"debit": (row.deposit_rate * self.exchange_rate) if self.currency != self.company_currency else row.deposit_rate,
"debit_in_account_currency": row.deposit_rate,
"exchange_rate":self.exchange_rate if self.currency != self.company_currency else 1.0,
"user_remark": row.debit_remark,
"cost_center": self.cost_center,
}, self.currency)
for dimension in accounting_dimensions:
if (self.get(dimension['fieldname']) or dimension.get('mandatory_for_bs')):
credit_entry.update({
dimension['fieldname']: self.get(dimension['fieldname']) or dimension.get('default_dimension')
})
if (self.get(dimension['fieldname']) or dimension.get('mandatory_for_pl')):
debit_entry.update({
dimension['fieldname']: self.get(dimension['fieldname']) or dimension.get('default_dimension')
})
je.append("accounts", credit_entry)
je.append("accounts", debit_entry)
je.flags.ignore_permissions = True
je.save()
if not je.meta.get_workflow():
je.submit()
row.db_set("journal_entry", je.name)
self.set_status()
return self
def get_gl_dict(self, args, account_currency=None, item=None):
"""this method populates the common properties of a gl entry record"""
posting_date = args.get('posting_date') or self.get('posting_date')
fiscal_years = get_fiscal_years(posting_date, company=self.company)
if len(fiscal_years) > 1:
frappe.throw(_("Multiple fiscal years exist for the date {0}. Please set company in Fiscal Year").format(
formatdate(posting_date)))
else:
fiscal_year = fiscal_years[0][0]
gl_dict = frappe._dict({
'company': self.company,
'posting_date': posting_date,
'fiscal_year': fiscal_year,
'voucher_type': self.doctype,
'voucher_no': self.name,
'remarks': "",
'debit': 0,
'credit': 0,
'debit_in_account_currency': 0,
'credit_in_account_currency': 0,
'is_opening':"No",
'party_type': None,
'party': None,
'project': self.get("project")
})
accounting_dimensions = get_accounting_dimensions()
dimension_dict = frappe._dict()
for dimension in accounting_dimensions:
dimension_dict[dimension] = self.get(dimension)
if item and item.get(dimension):
dimension_dict[dimension] = item.get(dimension)
gl_dict.update(dimension_dict)
gl_dict.update(args)
if not account_currency:
account_currency = get_account_currency(gl_dict.account)
self.validate_account_currency(gl_dict.account, account_currency)
set_balance_in_account_currency(gl_dict, account_currency, self.get("exchange_rate"), self.company_currency)
return gl_dict
def validate_account_currency(self, account, account_currency=None):
valid_currency = [self.company_currency]
if self.get("currency") and self.currency != self.company_currency:
valid_currency.append(self.currency)
if account_currency not in valid_currency:
frappe.throw(_("Account {0} is invalid. Account Currency must be {1}")
.format(account, _(" or ").join(valid_currency)))
def set_balance_in_account_currency(gl_dict, account_currency=None, conversion_rate=None, company_currency=None):
if (not conversion_rate) and (account_currency != company_currency):
frappe.throw(_("Account: {0} with currency: {1} can not be selected")
.format(gl_dict.account, account_currency))
gl_dict["account_currency"] = company_currency if account_currency == company_currency \
else account_currency
@frappe.whitelist()
def get_bank_account_details(bank_account, date):
from erpnext.accounts.doctype.payment_entry.payment_entry import get_account_details
account = frappe.db.get_value("Bank Account",
bank_account, ['account','from_account', 'bank', 'bank_account_no'], as_dict=1)
details = get_account_details(account['account'], date)
details2 = get_account_details(account['from_account'], date)
if details['account_currency'] != details2['account_currency']:
frappe.throw(_("Deposit Account currency %s not equal Current Account currency %s"
% (details['account_currency'], details2['account_currency'])))
return
account.update(details)
settings = frappe.get_doc("PAV Banking Setttings")
deferred_income = settings.deferred_income_account
bank_deposit_benefits = settings.bank_deposit_benefits_account
deferred_income_acc = frappe.db.sql("""SELECT name FROM `tabAccount`
WHERE parent_account = %s AND account_currency = %s""" , [deferred_income, account['account_currency']], as_dict = 1)
bank_deposit_benefits_acc = frappe.db.sql("""SELECT name from `tabAccount`
WHERE parent_account = %s AND account_currency = %s""", [bank_deposit_benefits, account['account_currency']], as_dict = 1)
if not deferred_income_acc or not bank_deposit_benefits_acc:
frappe.throw(_( "Accounts Not Exist Please Set PAV Banking Setttings "))
account['deferred_income'] = deferred_income_acc[0]['name']
account['bank_deposit_benefits'] = bank_deposit_benefits_acc[0]['name']
return account | # -*- coding: utf-8 -*-
# Copyright (c) 2021, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from erpnext.accounts.general_ledger import make_gl_entries, merge_similar_entries, delete_gl_entries
from frappe.utils import cint, cstr, formatdate, flt, getdate, nowdate, get_link_to_form
from frappe import _, throw
from erpnext.accounts.utils import get_fiscal_years, validate_fiscal_year, get_account_currency
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_accounting_dimensions
class BankDeposit(Document):
def on_submit(self):
self.status = 'Open'
self.make_gl_entries()
def on_cancel(self):
self.status = 'Cancel'
self.make_gl_entries(cancel=True)
def set_status(self, status=None):
if not status:
status = self.get_status()
self.db_set("status", status)
def get_status(self):
if self.docstatus == 0:
status = "Draft"
elif self.docstatus == 1:
status = "Closed"
for d in self.bank_deposit_journal:
if not d.journal_entry:
status = "Open"
elif self.docstatus == 2:
status = "Cancelled"
return status
def fill_detail_journal(self):
from dateutil import rrule
from datetime import date, datetime
import calendar
link_start = datetime.strptime(self.link_start, '%Y-%m-%d')
due_date = datetime.strptime(self.due_date, '%Y-%m-%d')
details = list(rrule.rrule(rrule.MONTHLY, dtstart = link_start.replace(day=1), until = due_date))
details_len = len(details)
details_rows = []
journal_rows = []
days_total = (due_date - link_start).days
amount_per_day = flt(self.deposit_rate_amount) / days_total
last_amount = 0
benefit_txt = _('Proof of accrued interest on the date ')
reverse_txt = _('reversal of interest accrued from ')
count = 1
if details_len > 1:
for idx, val in enumerate(details):
if idx == 0:
last_day = calendar.monthrange(link_start.year, link_start.month)[1]
last_day_date = datetime(year = val.year, month = val.month, day = last_day)
days = (last_day_date - link_start).days
amount = (amount_per_day * days)
details_rows.append({'date': link_start.date(), 'day_count': days, 'deposit_rate': amount, })
#month end from link start
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': last_day_date.date(),
'deposit_rate': amount, 'account_debit': self.deferred_income, 'account_credit': self.bank_deposit_benefits,
'debit_remark': benefit_txt + str(last_day_date.date()), 'credit_remark': benefit_txt + str(last_day_date.date()),})
last_amount = amount
last_date_start = link_start.date()
last_date_end = last_day_date.date()
count += 1
elif idx == (details_len - 1):
days = (due_date - val).days + 1
amount = (amount_per_day * days)
details_rows.append({'date': due_date.date(), 'day_count': days, 'deposit_rate': amount, })
#month start
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': val.date(), 'deposit_rate': last_amount,
'account_debit': self.deposit_account, 'account_credit': self.deferred_income,
'debit_remark': benefit_txt + "%s to %s" %(last_date_start, last_date_end),
'credit_remark': reverse_txt + "%s to %s" %(last_date_start, last_date_end),})
count += 1
#due date from month start
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': due_date.date(), 'deposit_rate': amount,
'account_debit': self.deposit_account, 'account_credit': self.bank_deposit_benefits,
'debit_remark': benefit_txt + "%s to %s" %(val.date(), due_date.date()),
'credit_remark': benefit_txt + "%s to %s" %(val.date(), due_date.date()),})
else:
days = (calendar.monthrange(val.year, val.month))[1]
amount = (amount_per_day * days)
details_rows.append({'date': val.date(), 'day_count': days, 'deposit_rate': amount, })
#month start
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': val.date(), 'deposit_rate': last_amount,
'account_debit': self.deposit_account, 'account_credit': self.deferred_income,
'debit_remark': benefit_txt + "%s to %s" %(last_date_start, last_date_end),
'credit_remark': reverse_txt + "%s to %s" %(last_date_start, last_date_end),})
count += 1
#month end
last_day_date = date(val.year, val.month, calendar.monthrange(val.year, val.month)[1])
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': last_day_date, 'deposit_rate': amount,
'account_debit': self.deferred_income, 'account_credit': self.bank_deposit_benefits,
'debit_remark': benefit_txt + str(last_day_date), 'credit_remark': benefit_txt + str(last_day_date),})
last_amount = amount
last_date_start = val.date()
last_date_end = last_day_date
count += 1
elif details_len == 1:
details_rows.append({'date': details[0].date(), 'day_count': days_total, 'deposit_rate': self.deposit_rate_amount, })
journal_rows.append({'journal_name': self.name + '-'+ str(count), 'date': link_start.date(),
'deposit_rate': deposit_rate_amount, 'account_debit': self.deposit_account, 'account_credit': self.bank_deposit_benefits,
'debit_remark': benefit_txt + "%s to %s" %(link_start.date(), due_date.date()),
'credit_remark': benefit_txt + "%s to %s" %(link_start.date(), due_date.date()),})
self.days = days_total
self.day_amount = amount_per_day
self.update({"bank_deposit_detail": details_rows})
self.update({"bank_deposit_journal": journal_rows})
#row = self.append('bank_deposit_detail', {})
def make_gl_entries(self, cancel = False):
if not self.amount:
return
gl_entries = self.get_gl_entries()
if cancel:
for row in self.bank_deposit_journal:
r = {'date': row.date, 'deposit_rate': row.deposit_rate, 'debit_remark': row.debit_remark,
'credit_remark': row.credit_remark, 'account_debit': row.account_debit, 'account_credit': row.account_credit,}
self.get_row_gl_entry(r, gl_entries)
if gl_entries:
make_gl_entries(gl_entries, cancel= cancel)
def get_gl_entries(self):
gl_entries = []
self.make_gl_entry(gl_entries)
gl_entries = merge_similar_entries(gl_entries)
return gl_entries
def make_gl_entry(self, gl_entries):
gl_entries.append(
self.get_gl_dict({
"account": self.deposit_account,
"against": self.current_account,
"account_currency": self.currency,
"credit": (self.amount * self.exchange_rate) if self.currency != self.company_currency
else self.amount,
"credit_in_account_currency": (self.amount),
"conversion_rate":self.exchange_rate if self.currency != self.company_currency else 1.0,
"remarks": self.get("remarks") or _(""),
}, self.currency))
gl_entries.append(
self.get_gl_dict({
"account": self.current_account,
"against": self.deposit_account,
"account_currency": self.currency,
"debit": (self.amount * self.exchange_rate) if self.currency != self.company_currency
else self.amount,
"debit_in_account_currency": (self.amount),
"conversion_rate":self.exchange_rate if self.currency != self.company_currency else 1.0,
"remarks": self.get("remarks") or _(""),
}, self.currency))
def make_journal_entry(self, date):
frappe.has_permission('Journal Entry', throw=True)
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_checks_for_pl_and_bs_accounts
accounting_dimensions = get_checks_for_pl_and_bs_accounts()
for row in self.get("bank_deposit_journal"):
if not row.journal_entry and getdate(row.date) <= getdate(date):
je = frappe.new_doc("Journal Entry")
# je.voucher_type = "Bank Deposit"
je.posting_date = row.date
je.company = self.company
# je.remark = "Bank Deposit Entry against {0} worth {1}".format(self.name, row.journal_name)
je.multi_currency = 1
credit_entry = self.get_gl_dict({
"reference_type": "Bank Deposit",
"reference_name": self.name,
"account": row.account_credit,
"account_currency": self.currency,
"credit": (row.deposit_rate * self.exchange_rate) if self.currency != self.company_currency else row.deposit_rate,
"credit_in_account_currency": row.deposit_rate,
"exchange_rate":self.exchange_rate if self.currency != self.company_currency else 1.0,
"user_remark": row.credit_remark,
"cost_center": self.cost_center,
}, self.currency)
debit_entry = self.get_gl_dict({
"reference_type": "Bank Deposit",
"reference_name": self.name,
"account": row.account_debit,
"account_currency": self.currency,
"debit": (row.deposit_rate * self.exchange_rate) if self.currency != self.company_currency else row.deposit_rate,
"debit_in_account_currency": row.deposit_rate,
"exchange_rate":self.exchange_rate if self.currency != self.company_currency else 1.0,
"user_remark": row.debit_remark,
"cost_center": self.cost_center,
}, self.currency)
for dimension in accounting_dimensions:
if (self.get(dimension['fieldname']) or dimension.get('mandatory_for_bs')):
credit_entry.update({
dimension['fieldname']: self.get(dimension['fieldname']) or dimension.get('default_dimension')
})
if (self.get(dimension['fieldname']) or dimension.get('mandatory_for_pl')):
debit_entry.update({
dimension['fieldname']: self.get(dimension['fieldname']) or dimension.get('default_dimension')
})
je.append("accounts", credit_entry)
je.append("accounts", debit_entry)
je.flags.ignore_permissions = True
je.save()
if not je.meta.get_workflow():
je.submit()
row.db_set("journal_entry", je.name)
self.set_status()
return self
def get_gl_dict(self, args, account_currency=None, item=None):
"""this method populates the common properties of a gl entry record"""
posting_date = args.get('posting_date') or self.get('posting_date')
fiscal_years = get_fiscal_years(posting_date, company=self.company)
if len(fiscal_years) > 1:
frappe.throw(_("Multiple fiscal years exist for the date {0}. Please set company in Fiscal Year").format(
formatdate(posting_date)))
else:
fiscal_year = fiscal_years[0][0]
gl_dict = frappe._dict({
'company': self.company,
'posting_date': posting_date,
'fiscal_year': fiscal_year,
'voucher_type': self.doctype,
'voucher_no': self.name,
'remarks': "",
'debit': 0,
'credit': 0,
'debit_in_account_currency': 0,
'credit_in_account_currency': 0,
'is_opening':"No",
'party_type': None,
'party': None,
'project': self.get("project")
})
accounting_dimensions = get_accounting_dimensions()
dimension_dict = frappe._dict()
for dimension in accounting_dimensions:
dimension_dict[dimension] = self.get(dimension)
if item and item.get(dimension):
dimension_dict[dimension] = item.get(dimension)
gl_dict.update(dimension_dict)
gl_dict.update(args)
if not account_currency:
account_currency = get_account_currency(gl_dict.account)
self.validate_account_currency(gl_dict.account, account_currency)
set_balance_in_account_currency(gl_dict, account_currency, self.get("exchange_rate"), self.company_currency)
return gl_dict
def validate_account_currency(self, account, account_currency=None):
valid_currency = [self.company_currency]
if self.get("currency") and self.currency != self.company_currency:
valid_currency.append(self.currency)
if account_currency not in valid_currency:
frappe.throw(_("Account {0} is invalid. Account Currency must be {1}")
.format(account, _(" or ").join(valid_currency)))
def set_balance_in_account_currency(gl_dict, account_currency=None, conversion_rate=None, company_currency=None):
if (not conversion_rate) and (account_currency != company_currency):
frappe.throw(_("Account: {0} with currency: {1} can not be selected")
.format(gl_dict.account, account_currency))
gl_dict["account_currency"] = company_currency if account_currency == company_currency \
else account_currency
@frappe.whitelist()
def get_bank_account_details(bank_account, date):
from erpnext.accounts.doctype.payment_entry.payment_entry import get_account_details
account = frappe.db.get_value("Bank Account",
bank_account, ['account','from_account', 'bank', 'bank_account_no'], as_dict=1)
details = get_account_details(account['account'], date)
details2 = get_account_details(account['from_account'], date)
if details['account_currency'] != details2['account_currency']:
frappe.throw(_("Deposit Account currency %s not equal Current Account currency %s"
% (details['account_currency'], details2['account_currency'])))
return
account.update(details)
settings = frappe.get_doc("PAV Banking Setttings")
deferred_income = settings.deferred_income_account
bank_deposit_benefits = settings.bank_deposit_benefits_account
deferred_income_acc = frappe.db.sql("""SELECT name FROM `tabAccount`
WHERE parent_account = %s AND account_currency = %s""" , [deferred_income, account['account_currency']], as_dict = 1)
bank_deposit_benefits_acc = frappe.db.sql("""SELECT name from `tabAccount`
WHERE parent_account = %s AND account_currency = %s""", [bank_deposit_benefits, account['account_currency']], as_dict = 1)
if not deferred_income_acc or not bank_deposit_benefits_acc:
frappe.throw(_( "Accounts Not Exist Please Set PAV Banking Setttings "))
account['deferred_income'] = deferred_income_acc[0]['name']
account['bank_deposit_benefits'] = bank_deposit_benefits_acc[0]['name']
return account | en | 0.596583 | # -*- coding: utf-8 -*- # Copyright (c) 2021, <NAME> and contributors # For license information, please see license.txt #month end from link start #month start #due date from month start #month start #month end #row = self.append('bank_deposit_detail', {}) # je.voucher_type = "Bank Deposit" # je.remark = "Bank Deposit Entry against {0} worth {1}".format(self.name, row.journal_name) this method populates the common properties of a gl entry record SELECT name FROM `tabAccount` WHERE parent_account = %s AND account_currency = %s SELECT name from `tabAccount` WHERE parent_account = %s AND account_currency = %s | 1.720269 | 2 |
bin/train_transformer.py | mleila/transformers | 0 | 6615031 | #!/bin/env/python
'''
python bin/train_transformer.py \
--data-dir datat_store \
--batch-size 128 \
--max_epochs 5 \
--gpus 2 \
--accelerator ddp
'''
from argparse import ArgumentParser
import torch
from torch.nn import functional as F
import pytorch_lightning as pl
from transformers.lit_models import LitModel
from transformers.torch_models import Transformer
from transformers.lit_data import WMT_DataModule
from transformers.nlp_utils import split_tokenizer
def cli_main():
pl.seed_everything(1234)
# ------------
# args
# ------------
parser = ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = LitModel.add_model_specific_args(parser)
parser = WMT_DataModule.add_argparse_args(parser)
args = parser.parse_args()
# ------------
# data
# ------------
dm = WMT_DataModule.from_argparse_args(args)
#dm.prepare_data()
#dm.setup()
# ------------
# model
# ------------
#target_vocab_size = len(dm.trgt_field.vocab)
target_vocab_size = 25252
backbone = Transformer(num_classes=target_vocab_size, max_output_length=32)
#padding_index = dm.src_field.vocab.stoi[dm.src_field.pad_token]
padding_index = 1
model = LitModel(backbone, padding_index, args.learning_rate, args.batch_size)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, datamodule=dm)
# ------------
# testing
# ------------
#result = trainer.test(test_dataloaders=test_loader)
#print(result)
print('do some testing')
if __name__ == '__main__':
cli_main()
| #!/bin/env/python
'''
python bin/train_transformer.py \
--data-dir datat_store \
--batch-size 128 \
--max_epochs 5 \
--gpus 2 \
--accelerator ddp
'''
from argparse import ArgumentParser
import torch
from torch.nn import functional as F
import pytorch_lightning as pl
from transformers.lit_models import LitModel
from transformers.torch_models import Transformer
from transformers.lit_data import WMT_DataModule
from transformers.nlp_utils import split_tokenizer
def cli_main():
pl.seed_everything(1234)
# ------------
# args
# ------------
parser = ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = LitModel.add_model_specific_args(parser)
parser = WMT_DataModule.add_argparse_args(parser)
args = parser.parse_args()
# ------------
# data
# ------------
dm = WMT_DataModule.from_argparse_args(args)
#dm.prepare_data()
#dm.setup()
# ------------
# model
# ------------
#target_vocab_size = len(dm.trgt_field.vocab)
target_vocab_size = 25252
backbone = Transformer(num_classes=target_vocab_size, max_output_length=32)
#padding_index = dm.src_field.vocab.stoi[dm.src_field.pad_token]
padding_index = 1
model = LitModel(backbone, padding_index, args.learning_rate, args.batch_size)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, datamodule=dm)
# ------------
# testing
# ------------
#result = trainer.test(test_dataloaders=test_loader)
#print(result)
print('do some testing')
if __name__ == '__main__':
cli_main()
| en | 0.085639 | #!/bin/env/python python bin/train_transformer.py \ --data-dir datat_store \ --batch-size 128 \ --max_epochs 5 \ --gpus 2 \ --accelerator ddp # ------------ # args # ------------ # ------------ # data # ------------ #dm.prepare_data() #dm.setup() # ------------ # model # ------------ #target_vocab_size = len(dm.trgt_field.vocab) #padding_index = dm.src_field.vocab.stoi[dm.src_field.pad_token] # ------------ # training # ------------ # ------------ # testing # ------------ #result = trainer.test(test_dataloaders=test_loader) #print(result) | 2.278536 | 2 |
src/graccreports/copyfiles.py | shreyb/fife_notes_report | 0 | 6615032 | <gh_stars>0
import shutil
import os
import pkg_resources
import sys
import argparse
import random
import re
basedir = 'gracc-reporting'
etcpath = os.path.join('/etc', basedir)
dirs = ['config', 'html_templates']
def setup_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', help='Verbose flag',
action='store_true')
parser.add_argument('-d', '--destdir', help='specify destination dir',
dest='destdir')
args = parser.parse_args()
return args
def test_access(d):
"""Test read-write access to dir"""
testfilename = 'test{0}.test'.format(random.randint(0, 100))
testdir = 'test{0}'.format(random.randint(0, 100))
# Try to create the dir if it doesn't exist
if not os.path.exists(d):
try:
os.makedirs(d)
cleanup = True
except OSError as e:
print "Can't create dir {0}".format(d)
print e
return False
else:
cleanup = False
# Can we write a file to the dir?
try:
fn = os.path.join(d, testfilename)
with open(fn, 'w') as f:
f.write('12345')
os.unlink(fn)
except IOError as e:
print "Permission denied to write to {0}".format(fn)
print e
return False
# Can we create a dir inside that dir? (Should be the same answer to above)
try:
dd = os.path.join(d, testdir)
os.makedirs(dd)
shutil.rmtree(dd)
except OSError as e:
print "Permission denied to make dir in {0}".format(d)
print e
return False
# If we had to create a dir in the first test, delete it
if cleanup:
shutil.rmtree(d)
return True
def check_usedir(d):
"""Get confirmation to delete current location of config files"""
answer = raw_input("Directory {0} already exists. Delete and overwrite"
" it? (Y/[n])".format(d))
if answer == 'Y':
return True
else:
return False
def main():
trydirs = [etcpath,]
args = setup_parser()
# Did we override the default dest dir?
if args.destdir is not None:
override = '{0}/{1}'.format(args.destdir, basedir)
trydirs.insert(0, override)
# Test our access, get destination dir
for d in trydirs:
if test_access(d):
usedir = d
print "Writing to {0}".format(d)
break
else:
print "Can't write to any dirs"
sys.exit(1)
if os.path.exists(usedir):
if check_usedir(usedir):
shutil.rmtree(usedir)
else:
print "Not overwriting directory. Please provide a different " \
"directory to use. Exiting."
sys.exit(0)
# Copy files out
for d in dirs:
destpath = os.path.join(usedir, d)
try:
os.makedirs(destpath)
except OSError as e:
print e
sys.exit(1)
files = pkg_resources.resource_listdir('graccreports', d)
for f in files:
if re.search('\.spec$', f):
# Skip our spec file. We don't need to install it outside
continue
fname = pkg_resources.resource_filename('graccreports',
os.path.join(d, f))
try:
shutil.copy(fname, destpath)
except OSError as e:
print e
sys.exit(1)
print "Files copied to {0}".format(usedir)
sys.exit(0) | import shutil
import os
import pkg_resources
import sys
import argparse
import random
import re
basedir = 'gracc-reporting'
etcpath = os.path.join('/etc', basedir)
dirs = ['config', 'html_templates']
def setup_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', help='Verbose flag',
action='store_true')
parser.add_argument('-d', '--destdir', help='specify destination dir',
dest='destdir')
args = parser.parse_args()
return args
def test_access(d):
"""Test read-write access to dir"""
testfilename = 'test{0}.test'.format(random.randint(0, 100))
testdir = 'test{0}'.format(random.randint(0, 100))
# Try to create the dir if it doesn't exist
if not os.path.exists(d):
try:
os.makedirs(d)
cleanup = True
except OSError as e:
print "Can't create dir {0}".format(d)
print e
return False
else:
cleanup = False
# Can we write a file to the dir?
try:
fn = os.path.join(d, testfilename)
with open(fn, 'w') as f:
f.write('12345')
os.unlink(fn)
except IOError as e:
print "Permission denied to write to {0}".format(fn)
print e
return False
# Can we create a dir inside that dir? (Should be the same answer to above)
try:
dd = os.path.join(d, testdir)
os.makedirs(dd)
shutil.rmtree(dd)
except OSError as e:
print "Permission denied to make dir in {0}".format(d)
print e
return False
# If we had to create a dir in the first test, delete it
if cleanup:
shutil.rmtree(d)
return True
def check_usedir(d):
"""Get confirmation to delete current location of config files"""
answer = raw_input("Directory {0} already exists. Delete and overwrite"
" it? (Y/[n])".format(d))
if answer == 'Y':
return True
else:
return False
def main():
trydirs = [etcpath,]
args = setup_parser()
# Did we override the default dest dir?
if args.destdir is not None:
override = '{0}/{1}'.format(args.destdir, basedir)
trydirs.insert(0, override)
# Test our access, get destination dir
for d in trydirs:
if test_access(d):
usedir = d
print "Writing to {0}".format(d)
break
else:
print "Can't write to any dirs"
sys.exit(1)
if os.path.exists(usedir):
if check_usedir(usedir):
shutil.rmtree(usedir)
else:
print "Not overwriting directory. Please provide a different " \
"directory to use. Exiting."
sys.exit(0)
# Copy files out
for d in dirs:
destpath = os.path.join(usedir, d)
try:
os.makedirs(destpath)
except OSError as e:
print e
sys.exit(1)
files = pkg_resources.resource_listdir('graccreports', d)
for f in files:
if re.search('\.spec$', f):
# Skip our spec file. We don't need to install it outside
continue
fname = pkg_resources.resource_filename('graccreports',
os.path.join(d, f))
try:
shutil.copy(fname, destpath)
except OSError as e:
print e
sys.exit(1)
print "Files copied to {0}".format(usedir)
sys.exit(0) | en | 0.912901 | Test read-write access to dir # Try to create the dir if it doesn't exist # Can we write a file to the dir? # Can we create a dir inside that dir? (Should be the same answer to above) # If we had to create a dir in the first test, delete it Get confirmation to delete current location of config files # Did we override the default dest dir? # Test our access, get destination dir # Copy files out # Skip our spec file. We don't need to install it outside | 2.947926 | 3 |
gemini/example_cryptocompare.py | dctanner/Gemini | 1 | 6615033 | import pandas as pd
import gemini
import cryptocompare as cc
import helpers
from datetime import *
pair = ['BTC','USD'] # Use ETH pricing data on the BTC market
daysBack = 0 # Grab data starting X days ago
daysData = 180 # From there collect X days of data
LookbackPeriod = 18 # How many days to lookback for momentum
TradingInterval = 1 # Run trading logic every X days
FeesSpread = 0.0025+0.001 # Fees 0.25% + Bid/ask spread to account for http://data.bitcoinity.org/markets/spread/6m/USD?c=e&f=m20&st=log&t=l using Kraken 0.1% as worse case
Exchange = 'Bitstamp'
# Request data from cryptocompare
data = cc.getPast(pair, daysBack, daysData, Exchange='CCCAGG')
# Convert to Pandas dataframe with datetime format
data = pd.DataFrame(data)
data['date'] = pd.to_datetime(data['time'], unit='s')
def Logic(Account, Lookback, LookbackPeriod):
try:
# Load into period class to simplify indexing
Lookback = helpers.Period(Lookback)
Today = Lookback.loc(0) # Current candle
Yesterday = Lookback.loc(-LookbackPeriod) # Previous candle
print('Lookback from {} to {}'.format(Yesterday['date'],Today['date']))
if Today['close'] < Yesterday['close']:
ExitPrice = Today['close']
for Position in Account.Positions:
if Position.Type == 'Long':
print("{} Sell {}BTC @ ${} = ${} balance".format(Today['date'],Position.Shares,ExitPrice,Position.Shares*ExitPrice))
Account.ClosePosition(Position, 1, ExitPrice)
if Today['close'] > Yesterday['close']:
EntryPrice = Today['close']+(Today['close']*FeesSpread)
EntryCapital = Account.BuyingPower
if EntryCapital > 0:
Account.EnterPosition('Long', EntryCapital, EntryPrice)
print("{} Buy ${} of BTC @ ${} = {}BTC balance".format(Today['date'],EntryCapital,EntryPrice,EntryCapital/EntryPrice))
except ValueError:
pass # Handles lookback errors in beginning of dataset
# Load the data into a backtesting class called Run
r = gemini.Run(data)
# Start backtesting custom logic with 1000 (BTC) intital capital and 2 day trading interval
r.Start(1000, Logic, TradingInterval, LookbackPeriod)
r.Results()
r.Chart('LookbackPeriod: {}, TradingInterval: {}'.format(LookbackPeriod,TradingInterval),ShowTrades=True)
| import pandas as pd
import gemini
import cryptocompare as cc
import helpers
from datetime import *
pair = ['BTC','USD'] # Use ETH pricing data on the BTC market
daysBack = 0 # Grab data starting X days ago
daysData = 180 # From there collect X days of data
LookbackPeriod = 18 # How many days to lookback for momentum
TradingInterval = 1 # Run trading logic every X days
FeesSpread = 0.0025+0.001 # Fees 0.25% + Bid/ask spread to account for http://data.bitcoinity.org/markets/spread/6m/USD?c=e&f=m20&st=log&t=l using Kraken 0.1% as worse case
Exchange = 'Bitstamp'
# Request data from cryptocompare
data = cc.getPast(pair, daysBack, daysData, Exchange='CCCAGG')
# Convert to Pandas dataframe with datetime format
data = pd.DataFrame(data)
data['date'] = pd.to_datetime(data['time'], unit='s')
def Logic(Account, Lookback, LookbackPeriod):
try:
# Load into period class to simplify indexing
Lookback = helpers.Period(Lookback)
Today = Lookback.loc(0) # Current candle
Yesterday = Lookback.loc(-LookbackPeriod) # Previous candle
print('Lookback from {} to {}'.format(Yesterday['date'],Today['date']))
if Today['close'] < Yesterday['close']:
ExitPrice = Today['close']
for Position in Account.Positions:
if Position.Type == 'Long':
print("{} Sell {}BTC @ ${} = ${} balance".format(Today['date'],Position.Shares,ExitPrice,Position.Shares*ExitPrice))
Account.ClosePosition(Position, 1, ExitPrice)
if Today['close'] > Yesterday['close']:
EntryPrice = Today['close']+(Today['close']*FeesSpread)
EntryCapital = Account.BuyingPower
if EntryCapital > 0:
Account.EnterPosition('Long', EntryCapital, EntryPrice)
print("{} Buy ${} of BTC @ ${} = {}BTC balance".format(Today['date'],EntryCapital,EntryPrice,EntryCapital/EntryPrice))
except ValueError:
pass # Handles lookback errors in beginning of dataset
# Load the data into a backtesting class called Run
r = gemini.Run(data)
# Start backtesting custom logic with 1000 (BTC) intital capital and 2 day trading interval
r.Start(1000, Logic, TradingInterval, LookbackPeriod)
r.Results()
r.Chart('LookbackPeriod: {}, TradingInterval: {}'.format(LookbackPeriod,TradingInterval),ShowTrades=True)
| en | 0.77947 | # Use ETH pricing data on the BTC market # Grab data starting X days ago # From there collect X days of data # How many days to lookback for momentum # Run trading logic every X days # Fees 0.25% + Bid/ask spread to account for http://data.bitcoinity.org/markets/spread/6m/USD?c=e&f=m20&st=log&t=l using Kraken 0.1% as worse case # Request data from cryptocompare # Convert to Pandas dataframe with datetime format # Load into period class to simplify indexing # Current candle # Previous candle # Handles lookback errors in beginning of dataset # Load the data into a backtesting class called Run # Start backtesting custom logic with 1000 (BTC) intital capital and 2 day trading interval | 3.023064 | 3 |
codebase/archs/cluster/da/multi_domain_heads.py | sudipansaha/acids-clustering-domain-shift | 10 | 6615034 | <reponame>sudipansaha/acids-clustering-domain-shift
import torch
import torch.nn as nn
from codebase.archs.cluster.da.dial_resnet18_two_head import DialResNet18TwoHeadHead
class MultiDomainHeads(nn.Module):
def __init__(self, config):
super(MultiDomainHeads, self).__init__()
self.config = config
self.domains_count = config.domains_count
self.num_sub_heads = config.num_sub_heads
# Creates separate heads for each domain
self.modules_map = nn.ModuleDict()
for domain_idx in range(self.domains_count):
for head in ('A', 'B'):
outputs_count = self.config.output_k_B
if head == 'A':
outputs_count = self.config.output_k_A
self.modules_map[self.module_name_from_params(domain_idx, head)] = DialResNet18TwoHeadHead(self.config, outputs_count)
self._initialize_weights()
def module_name_from_params(selfdo, domain_idx, head):
return str(domain_idx) + "_" + str(head)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
# nn.init.eye_(m.weight) # Not present in original code. Cannot use with multiple heads, otherwise there will be symmetries
m.bias.data.zero_()
def forward(self, x, domain_idx, head):
'''
:param x: Tensors to forward
:param domain_idx: Domain id to which the tensors belong to
:param head: character identifiying the head to which to forward the images
:return:
'''
current_head = self.modules_map[self.module_name_from_params(domain_idx, head)]
return current_head(x) | import torch
import torch.nn as nn
from codebase.archs.cluster.da.dial_resnet18_two_head import DialResNet18TwoHeadHead
class MultiDomainHeads(nn.Module):
def __init__(self, config):
super(MultiDomainHeads, self).__init__()
self.config = config
self.domains_count = config.domains_count
self.num_sub_heads = config.num_sub_heads
# Creates separate heads for each domain
self.modules_map = nn.ModuleDict()
for domain_idx in range(self.domains_count):
for head in ('A', 'B'):
outputs_count = self.config.output_k_B
if head == 'A':
outputs_count = self.config.output_k_A
self.modules_map[self.module_name_from_params(domain_idx, head)] = DialResNet18TwoHeadHead(self.config, outputs_count)
self._initialize_weights()
def module_name_from_params(selfdo, domain_idx, head):
return str(domain_idx) + "_" + str(head)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
# nn.init.eye_(m.weight) # Not present in original code. Cannot use with multiple heads, otherwise there will be symmetries
m.bias.data.zero_()
def forward(self, x, domain_idx, head):
'''
:param x: Tensors to forward
:param domain_idx: Domain id to which the tensors belong to
:param head: character identifiying the head to which to forward the images
:return:
'''
current_head = self.modules_map[self.module_name_from_params(domain_idx, head)]
return current_head(x) | en | 0.823155 | # Creates separate heads for each domain # nn.init.eye_(m.weight) # Not present in original code. Cannot use with multiple heads, otherwise there will be symmetries :param x: Tensors to forward :param domain_idx: Domain id to which the tensors belong to :param head: character identifiying the head to which to forward the images :return: | 2.183579 | 2 |
pyrez/enumerations/QueueRealmRoyale.py | CLeendert/Pyrez | 25 | 6615035 | <reponame>CLeendert/Pyrez
from .Queue import Queue
class QueueRealmRoyale(Queue):
Live_Solo = 474
Live_Duo = 475
Live_Squad = 476
Live_Wars = 477
Live_Tutorial = 478
Live_Solo_Mid_Level = 479
Live_Solo_Low_Level = 480
Live_Squad_Mid_Level = 481
Live_Squad_Low_Level = 482
Live_Duo_Mid_Level = 483
Live_Duo_Low_Level = 484
#Challenge_Solo = 10188
#Challenge_Duo = 10189
#Challenge_Squad = 10190
#Storm = 10192
#Solo_With_Bots = 10193
#Deathmatch = 10194
#Tutorial = 10195
| from .Queue import Queue
class QueueRealmRoyale(Queue):
Live_Solo = 474
Live_Duo = 475
Live_Squad = 476
Live_Wars = 477
Live_Tutorial = 478
Live_Solo_Mid_Level = 479
Live_Solo_Low_Level = 480
Live_Squad_Mid_Level = 481
Live_Squad_Low_Level = 482
Live_Duo_Mid_Level = 483
Live_Duo_Low_Level = 484
#Challenge_Solo = 10188
#Challenge_Duo = 10189
#Challenge_Squad = 10190
#Storm = 10192
#Solo_With_Bots = 10193
#Deathmatch = 10194
#Tutorial = 10195 | en | 0.363876 | #Challenge_Solo = 10188 #Challenge_Duo = 10189 #Challenge_Squad = 10190 #Storm = 10192 #Solo_With_Bots = 10193 #Deathmatch = 10194 #Tutorial = 10195 | 1.786239 | 2 |
dataloaders/datasets/flood.py | rucnyz/pytorch-deeplab | 0 | 6615036 | # -*- coding: utf-8 -*-
# @Time : 2020/10/29 23:58
# @Author : nieyuzhou
# @File : flood.py.py
# @Software: PyCharm
from time import time
import csv
import os
import numpy as np
import rasterio
import torch
from torchvision import transforms
import torchvision.transforms.functional as F
import random
from PIL import Image
class InMemoryDataset(torch.utils.data.Dataset):
def __init__(self, data_list, preprocess_func):
self.data_list = data_list
self.preprocess_func = preprocess_func
def __getitem__(self, i):
return self.preprocess_func(self.data_list[i])
def __len__(self):
return len(self.data_list)
def processAndAugment(data):
(x, y) = data
im, label = x.copy(), y.copy()
# convert to PIL for easier transforms
im1 = Image.fromarray(im[0])
im2 = Image.fromarray(im[1])
label = Image.fromarray(label.squeeze())
# Get params for random transforms
i, j, h, w = transforms.RandomCrop.get_params(im1, (256, 256))
im1 = F.crop(im1, i, j, h, w)
im2 = F.crop(im2, i, j, h, w)
label = F.crop(label, i, j, h, w)
if random.random() > 0.5:
im1 = F.hflip(im1)
im2 = F.hflip(im2)
label = F.hflip(label)
if random.random() > 0.5:
im1 = F.vflip(im1)
im2 = F.vflip(im2)
label = F.vflip(label)
norm = transforms.Normalize([0.6851, 0.5235], [0.0820, 0.1102])
im = torch.stack([transforms.ToTensor()(im1).squeeze(), transforms.ToTensor()(im2).squeeze()])
im = norm(im)
label = transforms.ToTensor()(label).squeeze()
if torch.sum(label.gt(.003) * label.lt(.004)):
label *= 255
label = label.round()
return im, label
def processTestIm(data):
(x, y) = data
im, label = x.copy(), y.copy()
norm = transforms.Normalize([0.6851, 0.5235], [0.0820, 0.1102])
# convert to PIL for easier transforms
im_c1 = Image.fromarray(im[0]).resize((512, 512))
im_c2 = Image.fromarray(im[1]).resize((512, 512))
label = Image.fromarray(label.squeeze()).resize((512, 512))
im_c1s = [F.crop(im_c1, 0, 0, 256, 256), F.crop(im_c1, 0, 256, 256, 256),
F.crop(im_c1, 256, 0, 256, 256), F.crop(im_c1, 256, 256, 256, 256)]
im_c2s = [F.crop(im_c2, 0, 0, 256, 256), F.crop(im_c2, 0, 256, 256, 256),
F.crop(im_c2, 256, 0, 256, 256), F.crop(im_c2, 256, 256, 256, 256)]
labels = [F.crop(label, 0, 0, 256, 256), F.crop(label, 0, 256, 256, 256),
F.crop(label, 256, 0, 256, 256), F.crop(label, 256, 256, 256, 256)]
ims = [torch.stack((transforms.ToTensor()(x).squeeze(),
transforms.ToTensor()(y).squeeze()))
for (x, y) in zip(im_c1s, im_c2s)]
ims = [norm(im) for im in ims]
ims = torch.stack(ims)
labels = [(transforms.ToTensor()(label).squeeze()) for label in labels]
labels = torch.stack(labels)
if torch.sum(labels.gt(.003) * labels.lt(.004)):
labels *= 255
labels = labels.round()
return ims, labels
def getArrFlood(fname):
return rasterio.open(fname).read()
def download_flood_water_data_from_list(l):
i = 0
tot_nan = 0
tot_good = 0
flood_data = []
for (im_fname, mask_fname) in l:
if not os.path.exists(os.path.join("files/", im_fname)):
continue
arr_x = np.nan_to_num(getArrFlood(os.path.join("files/", im_fname)))
arr_y = getArrFlood(os.path.join("files/", mask_fname))
ignore = (arr_y == -1)
ignore = ((np.uint8(ignore) * -1) * 256) + 1
arr_y *= ignore
arr_y = np.uint8(getArrFlood(os.path.join("files/", mask_fname)))
if np.sum((arr_y != arr_y)) == 0:
arr_x = np.clip(arr_x, -50, 1)
arr_x = (arr_x + 50) / 51
if i % 100 == 0:
print(im_fname, mask_fname)
i += 1
flood_data.append((arr_x, arr_y))
else:
print("skipping nan")
return flood_data
def load_flood_train_data(workpath):
fname = workpath + "flood_train_data.csv"
training_files = []
with open(fname) as f:
for line in csv.reader(f):
training_files.append(tuple((workpath + line[0], workpath + line[1])))
return download_flood_water_data_from_list(training_files)
def load_flood_valid_data(workpath):
fname = workpath + "flood_valid_data.csv"
validation_files = []
with open(fname) as f:
for line in csv.reader(f):
validation_files.append(tuple((workpath + line[0], workpath + line[1])))
return download_flood_water_data_from_list(validation_files)
def load_flood_test_data(workpath):
fname = workpath + "flood_test_data.csv"
testing_files = []
with open(fname) as f:
for line in csv.reader(f):
testing_files.append(tuple((workpath + line[0], workpath + line[1])))
return download_flood_water_data_from_list(testing_files)
| # -*- coding: utf-8 -*-
# @Time : 2020/10/29 23:58
# @Author : nieyuzhou
# @File : flood.py.py
# @Software: PyCharm
from time import time
import csv
import os
import numpy as np
import rasterio
import torch
from torchvision import transforms
import torchvision.transforms.functional as F
import random
from PIL import Image
class InMemoryDataset(torch.utils.data.Dataset):
def __init__(self, data_list, preprocess_func):
self.data_list = data_list
self.preprocess_func = preprocess_func
def __getitem__(self, i):
return self.preprocess_func(self.data_list[i])
def __len__(self):
return len(self.data_list)
def processAndAugment(data):
(x, y) = data
im, label = x.copy(), y.copy()
# convert to PIL for easier transforms
im1 = Image.fromarray(im[0])
im2 = Image.fromarray(im[1])
label = Image.fromarray(label.squeeze())
# Get params for random transforms
i, j, h, w = transforms.RandomCrop.get_params(im1, (256, 256))
im1 = F.crop(im1, i, j, h, w)
im2 = F.crop(im2, i, j, h, w)
label = F.crop(label, i, j, h, w)
if random.random() > 0.5:
im1 = F.hflip(im1)
im2 = F.hflip(im2)
label = F.hflip(label)
if random.random() > 0.5:
im1 = F.vflip(im1)
im2 = F.vflip(im2)
label = F.vflip(label)
norm = transforms.Normalize([0.6851, 0.5235], [0.0820, 0.1102])
im = torch.stack([transforms.ToTensor()(im1).squeeze(), transforms.ToTensor()(im2).squeeze()])
im = norm(im)
label = transforms.ToTensor()(label).squeeze()
if torch.sum(label.gt(.003) * label.lt(.004)):
label *= 255
label = label.round()
return im, label
def processTestIm(data):
(x, y) = data
im, label = x.copy(), y.copy()
norm = transforms.Normalize([0.6851, 0.5235], [0.0820, 0.1102])
# convert to PIL for easier transforms
im_c1 = Image.fromarray(im[0]).resize((512, 512))
im_c2 = Image.fromarray(im[1]).resize((512, 512))
label = Image.fromarray(label.squeeze()).resize((512, 512))
im_c1s = [F.crop(im_c1, 0, 0, 256, 256), F.crop(im_c1, 0, 256, 256, 256),
F.crop(im_c1, 256, 0, 256, 256), F.crop(im_c1, 256, 256, 256, 256)]
im_c2s = [F.crop(im_c2, 0, 0, 256, 256), F.crop(im_c2, 0, 256, 256, 256),
F.crop(im_c2, 256, 0, 256, 256), F.crop(im_c2, 256, 256, 256, 256)]
labels = [F.crop(label, 0, 0, 256, 256), F.crop(label, 0, 256, 256, 256),
F.crop(label, 256, 0, 256, 256), F.crop(label, 256, 256, 256, 256)]
ims = [torch.stack((transforms.ToTensor()(x).squeeze(),
transforms.ToTensor()(y).squeeze()))
for (x, y) in zip(im_c1s, im_c2s)]
ims = [norm(im) for im in ims]
ims = torch.stack(ims)
labels = [(transforms.ToTensor()(label).squeeze()) for label in labels]
labels = torch.stack(labels)
if torch.sum(labels.gt(.003) * labels.lt(.004)):
labels *= 255
labels = labels.round()
return ims, labels
def getArrFlood(fname):
return rasterio.open(fname).read()
def download_flood_water_data_from_list(l):
i = 0
tot_nan = 0
tot_good = 0
flood_data = []
for (im_fname, mask_fname) in l:
if not os.path.exists(os.path.join("files/", im_fname)):
continue
arr_x = np.nan_to_num(getArrFlood(os.path.join("files/", im_fname)))
arr_y = getArrFlood(os.path.join("files/", mask_fname))
ignore = (arr_y == -1)
ignore = ((np.uint8(ignore) * -1) * 256) + 1
arr_y *= ignore
arr_y = np.uint8(getArrFlood(os.path.join("files/", mask_fname)))
if np.sum((arr_y != arr_y)) == 0:
arr_x = np.clip(arr_x, -50, 1)
arr_x = (arr_x + 50) / 51
if i % 100 == 0:
print(im_fname, mask_fname)
i += 1
flood_data.append((arr_x, arr_y))
else:
print("skipping nan")
return flood_data
def load_flood_train_data(workpath):
fname = workpath + "flood_train_data.csv"
training_files = []
with open(fname) as f:
for line in csv.reader(f):
training_files.append(tuple((workpath + line[0], workpath + line[1])))
return download_flood_water_data_from_list(training_files)
def load_flood_valid_data(workpath):
fname = workpath + "flood_valid_data.csv"
validation_files = []
with open(fname) as f:
for line in csv.reader(f):
validation_files.append(tuple((workpath + line[0], workpath + line[1])))
return download_flood_water_data_from_list(validation_files)
def load_flood_test_data(workpath):
fname = workpath + "flood_test_data.csv"
testing_files = []
with open(fname) as f:
for line in csv.reader(f):
testing_files.append(tuple((workpath + line[0], workpath + line[1])))
return download_flood_water_data_from_list(testing_files)
| en | 0.450022 | # -*- coding: utf-8 -*- # @Time : 2020/10/29 23:58 # @Author : nieyuzhou # @File : flood.py.py # @Software: PyCharm # convert to PIL for easier transforms # Get params for random transforms # convert to PIL for easier transforms | 2.530623 | 3 |
geosupportbindingstests.py | murphyd2/NYCSpillsGeocoding | 0 | 6615037 | <filename>geosupportbindingstests.py
"<NAME> 2018-08-16"
import geosupport
def main():
g = geosupport.Geosupport()
spills="1 2 FIFTH AVENUE"
result= g.call(function=1, house_number=2, borough_code= 1, street_name_1= "Fifth Ave")
print(result)
main() | <filename>geosupportbindingstests.py
"<NAME> 2018-08-16"
import geosupport
def main():
g = geosupport.Geosupport()
spills="1 2 FIFTH AVENUE"
result= g.call(function=1, house_number=2, borough_code= 1, street_name_1= "Fifth Ave")
print(result)
main() | none | 1 | 2.342688 | 2 | |
spatial.py | mbrc27/lasREST | 0 | 6615038 | <reponame>mbrc27/lasREST
import numpy as np
import matplotlib.path as mpl_path
# Uwaga nie wspiera roznych ukladow odniesienia
def validate_geom(geometry):
try:
sr = geometry["spatialReference"]
rings = geometry["rings"]
return True
except:
return False
def las_within(points_file, polygon, parameters, point_export = False):
bb_path = mpl_path.Path(np.array(polygon))
coords = np.vstack((points_file.x, points_file.y)).transpose()
point_tester = bb_path.contains_points(coords)
params_list = []
for param in parameters:
params_list.append(getattr(points_file, param, None)[np.where(point_tester)])
return_arr = np.vstack(tuple(params_list)).transpose()
if point_export == True:
return return_arr.tolist()
else:
return_obj = {"params": parameters, "points": return_arr.tolist()}
return return_obj
def las_statistics(z_array):
num_array = np.array(z_array)
minVal = num_array.min()
maxVal = num_array.max()
meanVal = np.mean(num_array)
std = np.std(num_array)
return {"MIN": minVal, "MAX": maxVal, "MEAN": meanVal, "STD": std}
def las_header(hdr):
return {
"version": hdr.version,
"filesource_id": hdr.filesource_id,
#"reserved": 0,
"guid": hdr.guid.urn, #TODO zweryfikowac do konca obiekt {UUID}
"system_id": hdr.system_id,
"software_id": hdr.software_id,
"date": hdr.date.microsecond,
"header_size": hdr.header_size,
"data_offset": hdr.data_offset,
"vlrs_count": len(hdr.vlrs),
"dataformat_id": hdr.dataformat_id,
"data_record_length": hdr.data_record_length,
"number_points": hdr.count,
"point_return_count": hdr.point_return_count,
"scale": hdr.scale,
"offset": hdr.offset,
"min": hdr.min,
"max": hdr.max}
| import numpy as np
import matplotlib.path as mpl_path
# Uwaga nie wspiera roznych ukladow odniesienia
def validate_geom(geometry):
try:
sr = geometry["spatialReference"]
rings = geometry["rings"]
return True
except:
return False
def las_within(points_file, polygon, parameters, point_export = False):
bb_path = mpl_path.Path(np.array(polygon))
coords = np.vstack((points_file.x, points_file.y)).transpose()
point_tester = bb_path.contains_points(coords)
params_list = []
for param in parameters:
params_list.append(getattr(points_file, param, None)[np.where(point_tester)])
return_arr = np.vstack(tuple(params_list)).transpose()
if point_export == True:
return return_arr.tolist()
else:
return_obj = {"params": parameters, "points": return_arr.tolist()}
return return_obj
def las_statistics(z_array):
num_array = np.array(z_array)
minVal = num_array.min()
maxVal = num_array.max()
meanVal = np.mean(num_array)
std = np.std(num_array)
return {"MIN": minVal, "MAX": maxVal, "MEAN": meanVal, "STD": std}
def las_header(hdr):
return {
"version": hdr.version,
"filesource_id": hdr.filesource_id,
#"reserved": 0,
"guid": hdr.guid.urn, #TODO zweryfikowac do konca obiekt {UUID}
"system_id": hdr.system_id,
"software_id": hdr.software_id,
"date": hdr.date.microsecond,
"header_size": hdr.header_size,
"data_offset": hdr.data_offset,
"vlrs_count": len(hdr.vlrs),
"dataformat_id": hdr.dataformat_id,
"data_record_length": hdr.data_record_length,
"number_points": hdr.count,
"point_return_count": hdr.point_return_count,
"scale": hdr.scale,
"offset": hdr.offset,
"min": hdr.min,
"max": hdr.max} | pl | 0.881089 | # Uwaga nie wspiera roznych ukladow odniesienia #"reserved": 0, #TODO zweryfikowac do konca obiekt {UUID} | 2.215512 | 2 |
hashing/hashing_2/stream_of_character_check_current_string_is_palindrom.py | rjsnh1522/geeks-4-geeks-python | 0 | 6615039 | # class Solve:
#
# def __init__(self):
# self.string = ''
#
# def solve(self, char):
# pass
from collections import OrderedDict
st = ["shivesh", "bhavesh", "ramesh", "suresh", "dddddddddddd"]
dd = dict()
k = 9
for i,j in enumerate(st):
dd[j] = k
k-=1
for k,v in dd.items():
print(k, v)
| # class Solve:
#
# def __init__(self):
# self.string = ''
#
# def solve(self, char):
# pass
from collections import OrderedDict
st = ["shivesh", "bhavesh", "ramesh", "suresh", "dddddddddddd"]
dd = dict()
k = 9
for i,j in enumerate(st):
dd[j] = k
k-=1
for k,v in dd.items():
print(k, v)
| en | 0.515749 | # class Solve: # # def __init__(self): # self.string = '' # # def solve(self, char): # pass | 3.220547 | 3 |
polybar/scripts/weather/openweather.py | summerysaturn/lyla-dotfiles | 0 | 6615040 | <reponame>summerysaturn/lyla-dotfiles
import requests
def get_weather(city: str, api_key: str) -> dict[str, str] | None:
try:
r = requests.get(
f"https://api.openweathermap.org/data/2.5/weather?q={city}&units=metric&appid={api_key}",
headers={"User-agent": "Mozilla/5.0"},
)
data = r.json()
temp = data["main"]["temp"]
desc = data["weather"][0]["description"]
unit = "ºC"
return {
"temp": f"{int(temp)}{unit}",
"desc": desc.title(),
}
except Exception as e:
#print(e)
return None
| import requests
def get_weather(city: str, api_key: str) -> dict[str, str] | None:
try:
r = requests.get(
f"https://api.openweathermap.org/data/2.5/weather?q={city}&units=metric&appid={api_key}",
headers={"User-agent": "Mozilla/5.0"},
)
data = r.json()
temp = data["main"]["temp"]
desc = data["weather"][0]["description"]
unit = "ºC"
return {
"temp": f"{int(temp)}{unit}",
"desc": desc.title(),
}
except Exception as e:
#print(e)
return None | none | 1 | 3.200429 | 3 | |
wonderbook.py | adavesik/abebooks | 0 | 6615041 | <filename>wonderbook.py
from re import sub
from decimal import Decimal
import requests
from bs4 import BeautifulSoup
import csv
# here we should store generated links for abebooks
links = []
# here we should store grabbed prices
prices = []
def wonderbook_request(links):
for link in links[0:10]:
tmp_prices = []
page = requests.get(link)
soup = BeautifulSoup(page.text, 'html.parser')
price_list = soup.find_all('p', {'class': ['pcShowProductPrice']})
for price in price_list:
pr = Decimal(sub(r'[^\d.]', '', price.text[4:]))
tmp_prices.append(pr)
prices.append(tmp_prices)
return prices
def build_link(csv_file):
with open(csv_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['Book Title']:
links.append("http://www.wonderbk.com/productcart/pc/showsearchresults.asp?idcategory_type=0&idcategory=0&Title="+ row['Book Title']+"&Author="+row['Last Name'].strip()+"%2C+"+row['First Name'].strip()+"&ISBN=&Publisher=&pubDateFrom=yyyy&pubDateTo=yyyy&idbinding=0&priceFrom=0&priceUntil=999999999&withstock=-1&sku=&includeSKU=&resultCnt=15&order=1&Submit.x=85&Submit.y=15")
else:
links.append("http://www.wonderbk.com/productcart/pc/showsearchresults.asp?idcategory_type=0&idcategory=0&Title="+ row['Series Title/Book Title']+"&Author="+row['Last Name'].strip()+"%2C+"+row['First Name'].strip()+"&ISBN=&Publisher=&pubDateFrom=yyyy&pubDateTo=yyyy&idbinding=0&priceFrom=0&priceUntil=999999999&withstock=-1&sku=&includeSKU=&resultCnt=15&order=1&Submit.x=85&Submit.y=15")
return links
# 'albert.csv' - input file
# 'albert_final.csv' - genreted file
def generate_csv():
with open('albert.csv') as csvfile:
reader = csv.DictReader(csvfile)
with open('albert_wonder.csv', 'a', newline='') as csvfinal:
fieldnames = ['Box', 'Last Name', 'First Name', 'Series Title/Book Title', 'Series #', 'Book Title', 'Minimum Price', 'Maximum Price', 'Avg Price', 'Search Result']
writer = csv.DictWriter(csvfinal, fieldnames=fieldnames)
writer.writeheader()
for row, value in zip(reader, prices):
if not value:
writer.writerow({'Box': row['Box'],
'Last Name': row['Last Name'],
'First Name': row['First Name'],
'Series Title/Book Title': row['Series Title/Book Title'],
'Series #': row['Series #'],
'Book Title': row['Book Title'],
'Minimum Price': '',
'Maximum Price': '',
'Avg Price': '',
'Search Result': 'No Result'})
print("Wrote: "+row['Last Name']+" book "+ row['Book Title'])
elif len(value) >= 2:
price1 = min(value)
price2 = max(value)
writer.writerow({'Box': row['Box'],
'Last Name': row['Last Name'],
'First Name': row['First Name'],
'Series Title/Book Title': row['Series Title/Book Title'],
'Series #': row['Series #'],
'Book Title': row['Book Title'],
'Minimum Price': price1,
'Maximum Price': price2,
'Avg Price': price_average(value)})
print("Wrote: " + row['Last Name'] + " book " + row['Book Title'])
else:
price1 = min(value)
price2 = max(value)
writer.writerow({'Box': row['Box'],
'Last Name': row['Last Name'],
'First Name': row['First Name'],
'Series Title/Book Title': row['Series Title/Book Title'],
'Series #': row['Series #'],
'Book Title': row['Book Title'],
'Minimum Price': price1,
'Maximum Price': price2,
'Avg Price': price_average(value)})
print("Wrote: " + row['Last Name'] + " book " + row['Book Title'])
print("Writing complete")
def price_average(lst):
"""
Returns the average price of the given book
:param lst:
:return:
"""
return sum(lst) / len(lst)
build_link("albert.csv") # input CVS file
wonderbook_request(links)
generate_csv()
# for link in links[0:500]:
# print(link)
#print(prices)
| <filename>wonderbook.py
from re import sub
from decimal import Decimal
import requests
from bs4 import BeautifulSoup
import csv
# here we should store generated links for abebooks
links = []
# here we should store grabbed prices
prices = []
def wonderbook_request(links):
for link in links[0:10]:
tmp_prices = []
page = requests.get(link)
soup = BeautifulSoup(page.text, 'html.parser')
price_list = soup.find_all('p', {'class': ['pcShowProductPrice']})
for price in price_list:
pr = Decimal(sub(r'[^\d.]', '', price.text[4:]))
tmp_prices.append(pr)
prices.append(tmp_prices)
return prices
def build_link(csv_file):
with open(csv_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['Book Title']:
links.append("http://www.wonderbk.com/productcart/pc/showsearchresults.asp?idcategory_type=0&idcategory=0&Title="+ row['Book Title']+"&Author="+row['Last Name'].strip()+"%2C+"+row['First Name'].strip()+"&ISBN=&Publisher=&pubDateFrom=yyyy&pubDateTo=yyyy&idbinding=0&priceFrom=0&priceUntil=999999999&withstock=-1&sku=&includeSKU=&resultCnt=15&order=1&Submit.x=85&Submit.y=15")
else:
links.append("http://www.wonderbk.com/productcart/pc/showsearchresults.asp?idcategory_type=0&idcategory=0&Title="+ row['Series Title/Book Title']+"&Author="+row['Last Name'].strip()+"%2C+"+row['First Name'].strip()+"&ISBN=&Publisher=&pubDateFrom=yyyy&pubDateTo=yyyy&idbinding=0&priceFrom=0&priceUntil=999999999&withstock=-1&sku=&includeSKU=&resultCnt=15&order=1&Submit.x=85&Submit.y=15")
return links
# 'albert.csv' - input file
# 'albert_final.csv' - genreted file
def generate_csv():
with open('albert.csv') as csvfile:
reader = csv.DictReader(csvfile)
with open('albert_wonder.csv', 'a', newline='') as csvfinal:
fieldnames = ['Box', 'Last Name', 'First Name', 'Series Title/Book Title', 'Series #', 'Book Title', 'Minimum Price', 'Maximum Price', 'Avg Price', 'Search Result']
writer = csv.DictWriter(csvfinal, fieldnames=fieldnames)
writer.writeheader()
for row, value in zip(reader, prices):
if not value:
writer.writerow({'Box': row['Box'],
'Last Name': row['Last Name'],
'First Name': row['First Name'],
'Series Title/Book Title': row['Series Title/Book Title'],
'Series #': row['Series #'],
'Book Title': row['Book Title'],
'Minimum Price': '',
'Maximum Price': '',
'Avg Price': '',
'Search Result': 'No Result'})
print("Wrote: "+row['Last Name']+" book "+ row['Book Title'])
elif len(value) >= 2:
price1 = min(value)
price2 = max(value)
writer.writerow({'Box': row['Box'],
'Last Name': row['Last Name'],
'First Name': row['First Name'],
'Series Title/Book Title': row['Series Title/Book Title'],
'Series #': row['Series #'],
'Book Title': row['Book Title'],
'Minimum Price': price1,
'Maximum Price': price2,
'Avg Price': price_average(value)})
print("Wrote: " + row['Last Name'] + " book " + row['Book Title'])
else:
price1 = min(value)
price2 = max(value)
writer.writerow({'Box': row['Box'],
'Last Name': row['Last Name'],
'First Name': row['First Name'],
'Series Title/Book Title': row['Series Title/Book Title'],
'Series #': row['Series #'],
'Book Title': row['Book Title'],
'Minimum Price': price1,
'Maximum Price': price2,
'Avg Price': price_average(value)})
print("Wrote: " + row['Last Name'] + " book " + row['Book Title'])
print("Writing complete")
def price_average(lst):
"""
Returns the average price of the given book
:param lst:
:return:
"""
return sum(lst) / len(lst)
build_link("albert.csv") # input CVS file
wonderbook_request(links)
generate_csv()
# for link in links[0:500]:
# print(link)
#print(prices)
| en | 0.542896 | # here we should store generated links for abebooks # here we should store grabbed prices # 'albert.csv' - input file # 'albert_final.csv' - genreted file #', 'Book Title', 'Minimum Price', 'Maximum Price', 'Avg Price', 'Search Result'] #': row['Series #'], #': row['Series #'], #': row['Series #'], Returns the average price of the given book
:param lst:
:return: # input CVS file # for link in links[0:500]: # print(link) #print(prices) | 3.132321 | 3 |
App/settings.py | happyoneweek/FlaskAXF | 0 | 6615042 | # import os
#
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_db_uri(dbinfo):
user = dbinfo.get("USER") or 'root'
password = dbinfo.get("PASSWORD") or '<PASSWORD>'
host = dbinfo.get("HOST") or "zkx1801.top"
port = dbinfo.get("PORT") or '3306'
name = dbinfo.get("NAME") or 'mysql'
db = dbinfo.get("DB") or 'mysql'
driver = dbinfo.get("DRIVER") or 'pymysql'
return "{}+{}://{}:{}@{}:{}/{}".format(db, driver, user, password, host, port, name)
class Config():
DEBUG = False
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = "1234567890"
SESSION_TYPE = "redis"
class DevelopConfig(Config):
DEBUG = True
DATABASE = {
"USER": 'root',
"PASSWORD": '<PASSWORD>',
"HOST": '192.168.127.12',
"PORT": '3306',
"NAME": 'FlaskDay06',
'DB': 'mysql',
'DRIVER': 'pymysql'
}
SQLALCHEMY_DATABASE_URI = get_db_uri(DATABASE)
env = {
"develop": DevelopConfig,
}
| # import os
#
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_db_uri(dbinfo):
user = dbinfo.get("USER") or 'root'
password = dbinfo.get("PASSWORD") or '<PASSWORD>'
host = dbinfo.get("HOST") or "zkx1801.top"
port = dbinfo.get("PORT") or '3306'
name = dbinfo.get("NAME") or 'mysql'
db = dbinfo.get("DB") or 'mysql'
driver = dbinfo.get("DRIVER") or 'pymysql'
return "{}+{}://{}:{}@{}:{}/{}".format(db, driver, user, password, host, port, name)
class Config():
DEBUG = False
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = "1234567890"
SESSION_TYPE = "redis"
class DevelopConfig(Config):
DEBUG = True
DATABASE = {
"USER": 'root',
"PASSWORD": '<PASSWORD>',
"HOST": '192.168.127.12',
"PORT": '3306',
"NAME": 'FlaskDay06',
'DB': 'mysql',
'DRIVER': 'pymysql'
}
SQLALCHEMY_DATABASE_URI = get_db_uri(DATABASE)
env = {
"develop": DevelopConfig,
}
| en | 0.141769 | # import os # # BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | 2.44599 | 2 |
main.py | wandering-robot/carl | 0 | 6615043 | <filename>main.py
"""main entry to code"""
import pygame as py
from regulatory.gui import GUI
from regulatory.keeper import Keeper
from regulatory.handler import Handler
class Main:
"""Main section of code that allows for different sections to communicate"""
def __init__(self):
py.init()
self.gui = GUI()
self.keeper = Keeper()
self.handler = Handler()
def call_gui(self):
"""uses GUI to obtain user input, returns parameters in dict form"""
def unpack_gui(self):
"""goes through the info gathered from GUI and distributes it approproately"""
def get_data(self):
"""uses Keeper to obtain data"""
def create_memory(self):
"""uses knowledge data to create AI's memory"""
def create_environment(self):
"""uses environment data to create AI's environment"""
def handle_input(self):
"""uses Handler to interpret user input"""
def run_program(self):
"""main loop for learning/displaying"""
if __name__ == "__main__":
Main()
| <filename>main.py
"""main entry to code"""
import pygame as py
from regulatory.gui import GUI
from regulatory.keeper import Keeper
from regulatory.handler import Handler
class Main:
"""Main section of code that allows for different sections to communicate"""
def __init__(self):
py.init()
self.gui = GUI()
self.keeper = Keeper()
self.handler = Handler()
def call_gui(self):
"""uses GUI to obtain user input, returns parameters in dict form"""
def unpack_gui(self):
"""goes through the info gathered from GUI and distributes it approproately"""
def get_data(self):
"""uses Keeper to obtain data"""
def create_memory(self):
"""uses knowledge data to create AI's memory"""
def create_environment(self):
"""uses environment data to create AI's environment"""
def handle_input(self):
"""uses Handler to interpret user input"""
def run_program(self):
"""main loop for learning/displaying"""
if __name__ == "__main__":
Main()
| en | 0.821969 | main entry to code Main section of code that allows for different sections to communicate uses GUI to obtain user input, returns parameters in dict form goes through the info gathered from GUI and distributes it approproately uses Keeper to obtain data uses knowledge data to create AI's memory uses environment data to create AI's environment uses Handler to interpret user input main loop for learning/displaying | 3.41471 | 3 |
PSMS/migrations/0009_auto_20181218_0223.py | hellen6654/DataBase_CGHP | 0 | 6615044 | <reponame>hellen6654/DataBase_CGHP
# Generated by Django 2.1.3 on 2018-12-17 18:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PSMS', '0008_remove_pizza_isvegetarian'),
]
operations = [
migrations.AlterField(
model_name='pizza',
name='description',
field=models.TextField(max_length=50, verbose_name='披薩描述'),
),
]
| # Generated by Django 2.1.3 on 2018-12-17 18:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PSMS', '0008_remove_pizza_isvegetarian'),
]
operations = [
migrations.AlterField(
model_name='pizza',
name='description',
field=models.TextField(max_length=50, verbose_name='披薩描述'),
),
] | en | 0.790232 | # Generated by Django 2.1.3 on 2018-12-17 18:23 | 1.466722 | 1 |
network_generator.py | mahdi-zafarmand/SNA | 0 | 6615045 | <filename>network_generator.py
import networkx as nx
def create_network_using_lfr(network_info):
n = network_info['n']
tau1 = network_info['tau1']
tau2 = network_info['tau2']
mu = network_info['mu']
min_degree = network_info['min_degree']
# min_community = network_info['min_community']
# return nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=min_degree, min_community=min_community)
return nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=min_degree)
def write_network_info(graph, network_info, partition):
num_nodes = graph.number_of_nodes()
num_edges = graph.number_of_edges()
avg_degree = num_edges * 2 / num_nodes
modularity = nx.algorithms.community.modularity(graph, partition)
file_name = make_file_name_out_of_network_info(network_info, 'info')
with open(file_name, 'w') as file:
file.write('Number of nodes: ' + str(num_nodes) + '\n')
file.write('Number of edges: ' + str(num_edges) + '\n')
file.write('Average degree: ' + str(avg_degree) + '\n')
file.write('Modularity: ' + str(modularity) + '\n')
def extract_partition(graph):
communities = {frozenset(graph.nodes[v]['community']) for v in graph}
partition = list()
n = 0
for nodes_in_community in communities:
partition.append(list(nodes_in_community))
return partition
def extract_communities(partition):
communities = {}
for community in partition:
for node in community:
communities[node] = community
return communities
def make_file_name_out_of_network_info(network_info, str_type):
n = network_info['n']
tau1 = network_info['tau1']
tau2 = network_info['tau2']
mu = network_info['mu']
min_degree = network_info['min_degree']
# min_community = network_info['min_community']
file_name = 'Datasets/' + str_type + '_' + str(n)
file_name += '_' + str(tau1)
file_name += '_' + str(tau2)
file_name += '_' + str(mu)
file_name += '_' + str(min_degree)
# file_name += '_' + str(min_community)
if str_type == 'network' or str_type == 'info':
file_name += '.mtx'
else:
file_name += '.txt'
return file_name
def write_network(network_info, network):
file_name = make_file_name_out_of_network_info(network_info, 'network')
nx.write_edgelist(network, file_name, delimiter='\t', data=False)
def write_partition(network_info, partition):
file_name = make_file_name_out_of_network_info(network_info, 'partition')
with open(file_name, 'w') as file:
for community in sorted(partition):
community.sort()
line = str(community) + ' (' + str(len(community)) + ')\n'
file.write(line)
def write_communities(network_info, communities):
file_name = make_file_name_out_of_network_info(network_info, 'communities')
with open(file_name, 'w') as file:
for key, value in sorted(communities.items()):
value.sort()
line = str(key) + ' : ' + str(value) + ' (' + str(len(value)) + ')\n'
file.write(line)
def make_networks_for_experiments():
network_specs = []
# 1. for testing size of the network (7 networks)
network_specs.append({'n': 100, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 500, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 5000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 10000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 50000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 100000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
# 2. for testing compactness of the network (8 networks)
# network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10}) # already generated
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 15})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 20})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 25})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 30})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 35})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 40})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 45})
# 3. for testing mixing parameter of the network (8 networks)
# network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.10, 'min_degree': 10}) # already generated
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.15, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.20, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.25, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.30, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.35, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.40, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.45, 'min_degree': 10})
for ith_network in range(len(network_specs)):
network_spec = network_specs[ith_network]
lfr_network = create_network_using_lfr(network_spec)
partition = extract_partition(lfr_network)
communities = extract_communities(partition)
write_network_info(lfr_network, network_spec, partition)
write_network(network_spec, lfr_network)
write_partition(network_spec, partition)
write_communities(network_spec, communities)
print('Network', ith_network + 1, '/', len(network_specs), ' is generated.')
print('All networks are generated.')
| <filename>network_generator.py
import networkx as nx
def create_network_using_lfr(network_info):
n = network_info['n']
tau1 = network_info['tau1']
tau2 = network_info['tau2']
mu = network_info['mu']
min_degree = network_info['min_degree']
# min_community = network_info['min_community']
# return nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=min_degree, min_community=min_community)
return nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=min_degree)
def write_network_info(graph, network_info, partition):
num_nodes = graph.number_of_nodes()
num_edges = graph.number_of_edges()
avg_degree = num_edges * 2 / num_nodes
modularity = nx.algorithms.community.modularity(graph, partition)
file_name = make_file_name_out_of_network_info(network_info, 'info')
with open(file_name, 'w') as file:
file.write('Number of nodes: ' + str(num_nodes) + '\n')
file.write('Number of edges: ' + str(num_edges) + '\n')
file.write('Average degree: ' + str(avg_degree) + '\n')
file.write('Modularity: ' + str(modularity) + '\n')
def extract_partition(graph):
communities = {frozenset(graph.nodes[v]['community']) for v in graph}
partition = list()
n = 0
for nodes_in_community in communities:
partition.append(list(nodes_in_community))
return partition
def extract_communities(partition):
communities = {}
for community in partition:
for node in community:
communities[node] = community
return communities
def make_file_name_out_of_network_info(network_info, str_type):
n = network_info['n']
tau1 = network_info['tau1']
tau2 = network_info['tau2']
mu = network_info['mu']
min_degree = network_info['min_degree']
# min_community = network_info['min_community']
file_name = 'Datasets/' + str_type + '_' + str(n)
file_name += '_' + str(tau1)
file_name += '_' + str(tau2)
file_name += '_' + str(mu)
file_name += '_' + str(min_degree)
# file_name += '_' + str(min_community)
if str_type == 'network' or str_type == 'info':
file_name += '.mtx'
else:
file_name += '.txt'
return file_name
def write_network(network_info, network):
file_name = make_file_name_out_of_network_info(network_info, 'network')
nx.write_edgelist(network, file_name, delimiter='\t', data=False)
def write_partition(network_info, partition):
file_name = make_file_name_out_of_network_info(network_info, 'partition')
with open(file_name, 'w') as file:
for community in sorted(partition):
community.sort()
line = str(community) + ' (' + str(len(community)) + ')\n'
file.write(line)
def write_communities(network_info, communities):
file_name = make_file_name_out_of_network_info(network_info, 'communities')
with open(file_name, 'w') as file:
for key, value in sorted(communities.items()):
value.sort()
line = str(key) + ' : ' + str(value) + ' (' + str(len(value)) + ')\n'
file.write(line)
def make_networks_for_experiments():
network_specs = []
# 1. for testing size of the network (7 networks)
network_specs.append({'n': 100, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 500, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 5000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 10000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 50000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
network_specs.append({'n': 100000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10})
# 2. for testing compactness of the network (8 networks)
# network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10}) # already generated
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 15})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 20})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 25})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 30})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 35})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 40})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 45})
# 3. for testing mixing parameter of the network (8 networks)
# network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.10, 'min_degree': 10}) # already generated
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.15, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.20, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.25, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.30, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.35, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.40, 'min_degree': 10})
network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.45, 'min_degree': 10})
for ith_network in range(len(network_specs)):
network_spec = network_specs[ith_network]
lfr_network = create_network_using_lfr(network_spec)
partition = extract_partition(lfr_network)
communities = extract_communities(partition)
write_network_info(lfr_network, network_spec, partition)
write_network(network_spec, lfr_network)
write_partition(network_spec, partition)
write_communities(network_spec, communities)
print('Network', ith_network + 1, '/', len(network_specs), ' is generated.')
print('All networks are generated.')
| en | 0.509414 | # min_community = network_info['min_community'] # return nx.LFR_benchmark_graph(n, tau1, tau2, mu, min_degree=min_degree, min_community=min_community) # min_community = network_info['min_community'] # file_name += '_' + str(min_community) # 1. for testing size of the network (7 networks) # 2. for testing compactness of the network (8 networks) # network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.1, 'min_degree': 10}) # already generated # 3. for testing mixing parameter of the network (8 networks) # network_specs.append({'n': 1000, 'tau1': 20, 'tau2': 10, 'mu': 0.10, 'min_degree': 10}) # already generated | 2.77609 | 3 |
mjrl/policies/mpc_actor.py | jean-moorman/mjrl | 239 | 6615046 | import numpy as np
from trajopt.utils import gather_paths_parallel
class MPCActor(object):
def __init__(self, env, H, paths_per_cpu,
num_cpu=1,
kappa=1.0,
gamma=1.0,
mean=None,
filter_coefs=None,
seed=123,
):
self.env, self.seed = env, seed
self.n, self.m = env.observation_dim, env.action_dim
self.H, self.paths_per_cpu, self.num_cpu = H, paths_per_cpu, num_cpu
self.mean, self.filter_coefs, self.kappa, self.gamma = mean, filter_coefs, kappa, gamma
if mean is None:
self.mean = np.zeros(self.m)
if filter_coefs is None:
self.filter_coefs = [np.ones(self.m), 1.0, 0.0, 0.0]
self.env.reset()
self.env.set_seed(seed)
self.env.reset(seed=seed)
self.act_sequence = np.ones((self.H, self.m)) * self.mean
self.ctr = 1
def score_trajectory(self, paths):
scores = np.zeros(len(paths))
for i in range(len(paths)):
scores[i] = 0.0
for t in range(paths[i]["rewards"].shape[0]):
scores[i] += (self.gamma**t)*paths[i]["rewards"][t]
return scores
def get_action(self, env_state):
# Set to env_state
# Shoot trajectories
# Return optimal action
seed = self.seed + self.ctr * 1000
paths = gather_paths_parallel(self.env.env_id,
env_state,
self.act_sequence,
self.filter_coefs,
seed,
self.paths_per_cpu,
self.num_cpu,
)
num_traj = len(paths)
R = self.score_trajectory(paths)
S = np.exp(self.kappa*(R-np.max(R)))
act = np.sum([paths[i]["actions"][0] * S[i] for i in range(num_traj)], axis=0)
act = act / (np.sum(S) + 1e-6)
return act | import numpy as np
from trajopt.utils import gather_paths_parallel
class MPCActor(object):
def __init__(self, env, H, paths_per_cpu,
num_cpu=1,
kappa=1.0,
gamma=1.0,
mean=None,
filter_coefs=None,
seed=123,
):
self.env, self.seed = env, seed
self.n, self.m = env.observation_dim, env.action_dim
self.H, self.paths_per_cpu, self.num_cpu = H, paths_per_cpu, num_cpu
self.mean, self.filter_coefs, self.kappa, self.gamma = mean, filter_coefs, kappa, gamma
if mean is None:
self.mean = np.zeros(self.m)
if filter_coefs is None:
self.filter_coefs = [np.ones(self.m), 1.0, 0.0, 0.0]
self.env.reset()
self.env.set_seed(seed)
self.env.reset(seed=seed)
self.act_sequence = np.ones((self.H, self.m)) * self.mean
self.ctr = 1
def score_trajectory(self, paths):
scores = np.zeros(len(paths))
for i in range(len(paths)):
scores[i] = 0.0
for t in range(paths[i]["rewards"].shape[0]):
scores[i] += (self.gamma**t)*paths[i]["rewards"][t]
return scores
def get_action(self, env_state):
# Set to env_state
# Shoot trajectories
# Return optimal action
seed = self.seed + self.ctr * 1000
paths = gather_paths_parallel(self.env.env_id,
env_state,
self.act_sequence,
self.filter_coefs,
seed,
self.paths_per_cpu,
self.num_cpu,
)
num_traj = len(paths)
R = self.score_trajectory(paths)
S = np.exp(self.kappa*(R-np.max(R)))
act = np.sum([paths[i]["actions"][0] * S[i] for i in range(num_traj)], axis=0)
act = act / (np.sum(S) + 1e-6)
return act | en | 0.388086 | # Set to env_state # Shoot trajectories # Return optimal action | 2.460338 | 2 |
tests/validation/helpers.py | alexanderzimmerman/sapphire | 10 | 6615047 | <reponame>alexanderzimmerman/sapphire
"""Helper functions for test code"""
def check_scalar_solution_component(
solution,
component,
coordinates,
expected_values,
absolute_tolerances,
subcomponent = None):
"""Verify the scalar values of a specified solution component.
Args:
solution (fe.Function): The solution to be verified.
component (int): Index to a scalar solution component
to be verified. The solution is often vector-valued and
based on a mixed formulation.
coordinates (List[Tuple[float]]): Spatial coordinates
to be verified. Each tuple contains a float for each
spatial dimension and will be converted to a `fe.Point`.
expected_values (Tuple[float]): Truth values
at each coordinate.
absolute_tolerances (Tuple[float]): Used to assert absolute error
is not too large. Specify a tolerance for each value.
"""
assert(len(expected_values) == len(coordinates))
indices = range(len(expected_values))
for i, expected_value, tolerance in zip(
indices, expected_values, absolute_tolerances):
values = solution.at(coordinates[i])
value = values[component]
if not(subcomponent == None):
value = value[subcomponent]
print("Expected {} and found {}.".format(expected_value, value))
absolute_error = abs(value - expected_value)
assert absolute_error <= tolerance
| """Helper functions for test code"""
def check_scalar_solution_component(
solution,
component,
coordinates,
expected_values,
absolute_tolerances,
subcomponent = None):
"""Verify the scalar values of a specified solution component.
Args:
solution (fe.Function): The solution to be verified.
component (int): Index to a scalar solution component
to be verified. The solution is often vector-valued and
based on a mixed formulation.
coordinates (List[Tuple[float]]): Spatial coordinates
to be verified. Each tuple contains a float for each
spatial dimension and will be converted to a `fe.Point`.
expected_values (Tuple[float]): Truth values
at each coordinate.
absolute_tolerances (Tuple[float]): Used to assert absolute error
is not too large. Specify a tolerance for each value.
"""
assert(len(expected_values) == len(coordinates))
indices = range(len(expected_values))
for i, expected_value, tolerance in zip(
indices, expected_values, absolute_tolerances):
values = solution.at(coordinates[i])
value = values[component]
if not(subcomponent == None):
value = value[subcomponent]
print("Expected {} and found {}.".format(expected_value, value))
absolute_error = abs(value - expected_value)
assert absolute_error <= tolerance | en | 0.678497 | Helper functions for test code Verify the scalar values of a specified solution component. Args: solution (fe.Function): The solution to be verified. component (int): Index to a scalar solution component to be verified. The solution is often vector-valued and based on a mixed formulation. coordinates (List[Tuple[float]]): Spatial coordinates to be verified. Each tuple contains a float for each spatial dimension and will be converted to a `fe.Point`. expected_values (Tuple[float]): Truth values at each coordinate. absolute_tolerances (Tuple[float]): Used to assert absolute error is not too large. Specify a tolerance for each value. | 3.607045 | 4 |
eventviz/views/timeline.py | mattoufoutu/EventViz | 1 | 6615048 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, request, url_for, redirect
import eventviz
from eventviz import settings
from eventviz.db import connection, get_fieldnames, get_event_types, get_item
timeline = Blueprint('timeline', __name__)
@timeline.route('/', methods=['GET', 'POST'])
def index():
project = eventviz.project
if project is None:
# TODO: send flash message
return redirect(url_for('main.index'))
db = connection['eventviz_%s' % project]
available_fields = get_fieldnames(project)
displayed_fields = ['method', 'querystring']
group = None
if request.method == 'POST':
form_fields = request.form.getlist('fields')
if form_fields:
displayed_fields = form_fields
if 'group' in request.form:
group = request.form['group']
if group == 'event_type':
group = None
data = []
events = []
for event_type in get_event_types(project):
for db_item in db[event_type].find():
db_item_id = str(db_item['_id'])
item = {
'start': db_item['time'].strftime(settings.JS_DATE_FORMAT),
'group': db_item.get(group, 'E_NOGROUP') if group is not None else event_type,
'content': ' - '.join(map(lambda f: str(db_item.get(f, 'N/A')), displayed_fields)),
'className': '%s eventtype-%s' % (db_item_id, event_type)
}
data.append(item)
events.append(db_item_id)
events.append('event_type')
filters = {
'fields': ','.join(displayed_fields),
'group_by': group or 'event_type'
}
return render_template(
'timeline.html',
page='timeline',
project=project,
event_fields=available_fields,
data=data,
events=events,
filters=filters
)
@timeline.route('/<string:event_type>/<string:event_id>')
def event_details(event_type, event_id):
if event_type not in get_event_types(eventviz.project):
return redirect(url_for('timeline.index'))
event = get_item(eventviz.project, event_type, event_id)
del event['_id']
return render_template('event_details.html', event=event)
| # -*- coding: utf-8 -*-
from flask import Blueprint, render_template, request, url_for, redirect
import eventviz
from eventviz import settings
from eventviz.db import connection, get_fieldnames, get_event_types, get_item
timeline = Blueprint('timeline', __name__)
@timeline.route('/', methods=['GET', 'POST'])
def index():
project = eventviz.project
if project is None:
# TODO: send flash message
return redirect(url_for('main.index'))
db = connection['eventviz_%s' % project]
available_fields = get_fieldnames(project)
displayed_fields = ['method', 'querystring']
group = None
if request.method == 'POST':
form_fields = request.form.getlist('fields')
if form_fields:
displayed_fields = form_fields
if 'group' in request.form:
group = request.form['group']
if group == 'event_type':
group = None
data = []
events = []
for event_type in get_event_types(project):
for db_item in db[event_type].find():
db_item_id = str(db_item['_id'])
item = {
'start': db_item['time'].strftime(settings.JS_DATE_FORMAT),
'group': db_item.get(group, 'E_NOGROUP') if group is not None else event_type,
'content': ' - '.join(map(lambda f: str(db_item.get(f, 'N/A')), displayed_fields)),
'className': '%s eventtype-%s' % (db_item_id, event_type)
}
data.append(item)
events.append(db_item_id)
events.append('event_type')
filters = {
'fields': ','.join(displayed_fields),
'group_by': group or 'event_type'
}
return render_template(
'timeline.html',
page='timeline',
project=project,
event_fields=available_fields,
data=data,
events=events,
filters=filters
)
@timeline.route('/<string:event_type>/<string:event_id>')
def event_details(event_type, event_id):
if event_type not in get_event_types(eventviz.project):
return redirect(url_for('timeline.index'))
event = get_item(eventviz.project, event_type, event_id)
del event['_id']
return render_template('event_details.html', event=event) | en | 0.28715 | # -*- coding: utf-8 -*- # TODO: send flash message | 2.161421 | 2 |
src/models/__init__.py | dwd-umd/cccc-apis | 3 | 6615049 | <reponame>dwd-umd/cccc-apis
"""CATCH-APIs data models."""
| """CATCH-APIs data models.""" | en | 0.338656 | CATCH-APIs data models. | 1.125485 | 1 |
src/Norma_diff_step_2.py | zocean/Norma | 1 | 6615050 | #!/home/yangz6/Software/Python-2.7.5/python-2.7.5
# Programmer : <NAME>
# Contact: <EMAIL>
# Last-modified: 28 Mar 2019 13:02:07
import os,sys,argparse
from progressbar import ProgressBar
from bx.bbi.bigwig_file import BigWigFile
'''import custom function/class'''
from utility import *
def parse_arg():
''' This Function Parse the Argument '''
p=argparse.ArgumentParser( description = 'Example: %(prog)s -h', epilog='Library dependency :')
p.add_argument('-v','--version',action='version',version='%(prog)s 0.1')
p.add_argument('--res',type=int,dest="res",help="resolution in kb")
p.add_argument('--genome',type=str,dest="genome",help="chromosome size file")
p.add_argument('--bw_list',type=str,dest="bw_list",nargs="+",help="bigwig files")
p.add_argument('--label',type=str,dest="label",nargs="+",help="label associated with bigwig files")
p.add_argument('--mode',type=str,dest="mode",nargs="+",help="annotation mode")
p.add_argument('--output',type=str,dest="output",help="output file")
if len(sys.argv) < 2:
print p.print_help()
exit(1)
return p.parse_args()
def make_genome_window(genome_size, res):
'''
cut genome into windows
'''
win_list = []
for chrom in sorted(genome_size.keys()):
# if chromosome size < resolution
if genome_size[chrom] < res:
win_list.append(Region(chrom, 0, genome_size[chrom]))
# cut chromosome into windows
else:
bin_size = int(genome_size[chrom])/int(res) + 1
start = 0
stop = res
for nn in range(bin_size):
# take care the last window
if stop > genome_size[chrom]:
stop = genome_size[chrom]
win = Region(chrom, start, stop)
win_list.append(win)
start = stop
stop += res
return win_list
def main():
global args
args = parse_arg()
args.res = args.res*1000
# check parameters
try:
assert len(args.bw_list) == len(args.label)
except AssertionError:
print >>sys.stderr, "number of bigwig file and number of label must be matched"
exit(1)
print >>sys.stderr, "# check parameters done"
# parse the bigwig file
anno_list = []
mode_list = args.mode
label_list = args.label
for nn in range(len(args.bw_list)):
anno_list.append(BigWigFile(open(args.bw_list[nn])))
print >>sys.stderr, "# load data done"
# build the table
genome_size = load_genome_size(args.genome)
win_list = make_genome_window(genome_size, args.res)
# make the annotation
print >>sys.stderr, "# begin annotation"
progress = ProgressBar()
for nn in progress(range(len(win_list))):
win = win_list[nn]
for mm in range(len(anno_list)):
win.get_anno(anno_list[mm], label_list[mm], mode_list[mm], genome_size)
progress.finish()
# report
print >>sys.stderr, "# begin report"
win_list = sorted(win_list, key = lambda x: (x.chrom, x.start))
fout = open(args.output, 'w')
print >>fout, win_list[0].header()
progress = ProgressBar()
for nn in progress(range(len(win_list))):
win = win_list[nn]
is_na = True
for label in win.label:
if win.anno[label] != 'NA':
is_na = False
break
if is_na:
continue
else:
print >>fout, win.write()
progress.finish()
fout.close()
if __name__=="__main__":
main()
| #!/home/yangz6/Software/Python-2.7.5/python-2.7.5
# Programmer : <NAME>
# Contact: <EMAIL>
# Last-modified: 28 Mar 2019 13:02:07
import os,sys,argparse
from progressbar import ProgressBar
from bx.bbi.bigwig_file import BigWigFile
'''import custom function/class'''
from utility import *
def parse_arg():
''' This Function Parse the Argument '''
p=argparse.ArgumentParser( description = 'Example: %(prog)s -h', epilog='Library dependency :')
p.add_argument('-v','--version',action='version',version='%(prog)s 0.1')
p.add_argument('--res',type=int,dest="res",help="resolution in kb")
p.add_argument('--genome',type=str,dest="genome",help="chromosome size file")
p.add_argument('--bw_list',type=str,dest="bw_list",nargs="+",help="bigwig files")
p.add_argument('--label',type=str,dest="label",nargs="+",help="label associated with bigwig files")
p.add_argument('--mode',type=str,dest="mode",nargs="+",help="annotation mode")
p.add_argument('--output',type=str,dest="output",help="output file")
if len(sys.argv) < 2:
print p.print_help()
exit(1)
return p.parse_args()
def make_genome_window(genome_size, res):
'''
cut genome into windows
'''
win_list = []
for chrom in sorted(genome_size.keys()):
# if chromosome size < resolution
if genome_size[chrom] < res:
win_list.append(Region(chrom, 0, genome_size[chrom]))
# cut chromosome into windows
else:
bin_size = int(genome_size[chrom])/int(res) + 1
start = 0
stop = res
for nn in range(bin_size):
# take care the last window
if stop > genome_size[chrom]:
stop = genome_size[chrom]
win = Region(chrom, start, stop)
win_list.append(win)
start = stop
stop += res
return win_list
def main():
global args
args = parse_arg()
args.res = args.res*1000
# check parameters
try:
assert len(args.bw_list) == len(args.label)
except AssertionError:
print >>sys.stderr, "number of bigwig file and number of label must be matched"
exit(1)
print >>sys.stderr, "# check parameters done"
# parse the bigwig file
anno_list = []
mode_list = args.mode
label_list = args.label
for nn in range(len(args.bw_list)):
anno_list.append(BigWigFile(open(args.bw_list[nn])))
print >>sys.stderr, "# load data done"
# build the table
genome_size = load_genome_size(args.genome)
win_list = make_genome_window(genome_size, args.res)
# make the annotation
print >>sys.stderr, "# begin annotation"
progress = ProgressBar()
for nn in progress(range(len(win_list))):
win = win_list[nn]
for mm in range(len(anno_list)):
win.get_anno(anno_list[mm], label_list[mm], mode_list[mm], genome_size)
progress.finish()
# report
print >>sys.stderr, "# begin report"
win_list = sorted(win_list, key = lambda x: (x.chrom, x.start))
fout = open(args.output, 'w')
print >>fout, win_list[0].header()
progress = ProgressBar()
for nn in progress(range(len(win_list))):
win = win_list[nn]
is_na = True
for label in win.label:
if win.anno[label] != 'NA':
is_na = False
break
if is_na:
continue
else:
print >>fout, win.write()
progress.finish()
fout.close()
if __name__=="__main__":
main()
| en | 0.472275 | #!/home/yangz6/Software/Python-2.7.5/python-2.7.5 # Programmer : <NAME> # Contact: <EMAIL> # Last-modified: 28 Mar 2019 13:02:07 import custom function/class This Function Parse the Argument cut genome into windows # if chromosome size < resolution # cut chromosome into windows # take care the last window # check parameters # parse the bigwig file # build the table # make the annotation # report | 2.746628 | 3 |