hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2765c7c3e9e989737c9e28cfcc8a7675bc22b1e5
| 907
|
py
|
Python
|
main.py
|
denissearenas/face_recognition_image
|
63f43ae188cc12ba443d2aeff84959eba95c9049
|
[
"MIT"
] | null | null | null |
main.py
|
denissearenas/face_recognition_image
|
63f43ae188cc12ba443d2aeff84959eba95c9049
|
[
"MIT"
] | null | null | null |
main.py
|
denissearenas/face_recognition_image
|
63f43ae188cc12ba443d2aeff84959eba95c9049
|
[
"MIT"
] | null | null | null |
import logging
from logging.config import fileConfig
import os, os.path
import imageRecognition
#Test Folder
TestFolder = 'WorkingFolder/TestImages/'
# Create the Working folders
working_folders = ['logs','.metadata','WorkingFolder','./Workingfolder/OutputImages']
[os.makedirs(folder) for folder in working_folders if not os.path.exists(folder)]
# Load log config
fileConfig('logging_config.ini')
logger = logging.getLogger()
if __name__ == "__main__":
encodings = imageRecognition.loadEncodings()
if len(os.listdir(TestFolder)) > 0:
for file in os.listdir(TestFolder):
name_image = os.path.join(TestFolder,file)
filename = 'output'
if file.rfind('.') >= 0:
filename = file[:file.rfind('.')]
imageRecognition.tagPeople_cv2(TestFolder+file, encodings, tolerance=0.60, output_filename = filename)
| 24.513514
| 114
| 0.680265
| 100
| 907
| 6.03
| 0.48
| 0.029851
| 0.063018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00831
| 0.203969
| 907
| 36
| 115
| 25.194444
| 0.82687
| 0.059537
| 0
| 0
| 0
| 0
| 0.133255
| 0.0625
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2768921c04ac38d6998b1d53e7d2b264cb24e683
| 755
|
py
|
Python
|
Medio 3/ex056.py
|
Gustavsantos/python1
|
5520f2d2ee591157942008fdcd6bd42eb521f1a6
|
[
"MIT"
] | null | null | null |
Medio 3/ex056.py
|
Gustavsantos/python1
|
5520f2d2ee591157942008fdcd6bd42eb521f1a6
|
[
"MIT"
] | null | null | null |
Medio 3/ex056.py
|
Gustavsantos/python1
|
5520f2d2ee591157942008fdcd6bd42eb521f1a6
|
[
"MIT"
] | null | null | null |
total = 0
media = 0
hmais = 0
no = ''
contm = 0
from datetime import date
atual = date.today().year
for p in range(1,5):
print('{}° Pessoa'.format(p))
nome = str(input('Nome: ')).strip().capitalize()
ns = int(input('O ano em que nasceu: '))
sexo = str(input('Sexo: ')).strip().upper()
idade = atual - ns
total += idade
media = total/4
if p == 1 and sexo == 'M':
hmais = idade
no = nome
if idade > hmais and sexo == 'M':
hmais = idade
no = nome
if idade < 20 and sexo == 'F':
contm += 1
print('Existem, {} mulheres com menos de 20 anos'.format(contm))
print('O homem mais, velho tem {} e se chama {}'.format(hmais, no))
print('A media de idade, é de {} anos'.format(media))
| 26.964286
| 67
| 0.564238
| 116
| 755
| 3.681034
| 0.508621
| 0.04918
| 0.037471
| 0.06089
| 0.145199
| 0.145199
| 0.145199
| 0.145199
| 0.145199
| 0
| 0
| 0.023551
| 0.268874
| 755
| 27
| 68
| 27.962963
| 0.748188
| 0
| 0
| 0.153846
| 0
| 0
| 0.209272
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2768edd67418ad70fbc7628b2d0db9a2e7e067b6
| 9,575
|
py
|
Python
|
modules/cdp.py
|
experiencedft/defisaver-sim
|
1d1f05078efb634286df450b125677a1685a066e
|
[
"MIT"
] | 13
|
2021-02-01T11:08:21.000Z
|
2022-01-13T05:29:11.000Z
|
modules/cdp.py
|
experiencedft/defisaver-sim
|
1d1f05078efb634286df450b125677a1685a066e
|
[
"MIT"
] | null | null | null |
modules/cdp.py
|
experiencedft/defisaver-sim
|
1d1f05078efb634286df450b125677a1685a066e
|
[
"MIT"
] | 5
|
2021-01-27T22:01:55.000Z
|
2022-02-20T22:14:16.000Z
|
'''
The CDP module contains all the tools required to simulate a collateralized debt position, such as increasing or decreasing
its leverage (boost and repay), closing the vault to calculate its lifetime profit, adding collateral or drawing more debt.
The position is represented as an object whose methods provide all the above interactions. It can be leveraged or non leveraged.
For the purpose of this simulation, a vault is considered leveraged if part or all of the debt is used to buy more collateral.
'''
import numpy as np
class CDP():
'''
Attributes
___________
collateral: float
the amount of collateral in the position, in unit of the collateral asset
debt: float
the amountof debt of the position, in unit of the debt asset
automated: bool
a boolean flag indicating whether the CDP is automated
automation_settings: dictionnary
a dictionnary containing the automation settings
{"repay from": ..., "repay to": ..., "boost from": ..., "boost to": ...}
min_automation_debt: float
the minimum debt required for automation to be enabled, in amount of debt asset
min_ratio: float
the minimum collateralization ratio admitted by the protocol, below which liquidation occurs
'''
def __init__(self, initial_collateral: float, initial_debt: float, min_ratio: float) -> None:
'''
min_ratio in %
'''
self.collateral = initial_collateral
self.debt = initial_debt
self.isAutomated = False
self.automation_settings = {"repay from": 0, "repay to": 0, "boost from": 0, "boost to": 0}
self.min_ratio = min_ratio
# TODO: pass this as an argument later on and include change in simulate.py and related function calls
self.min_automation_debt = 0
def getCollateralizationRatio(self, price: float):
'''
Returns the collateralization ratio in %
'''
return 100*self.collateral*price/self.debt
def changeCollateral(self, deltaCollateral: float):
'''
Add deltaCollateral to the position's collateral. Note: deltaCollateral may be negative.
'''
self.collateral += deltaCollateral
def changeDebt(self, deltaDebt: float):
'''
Add deltaDebt to the position's debt. Note: deltaDebt may be negative.
'''
self.debt += deltaDebt
def close(self, price: float) -> float:
'''
Close the vault by paying back all of the debt and return the amount of collateral left.
Assumes infinite liquidity at the current price.
Param:
price: float
The current price of the collateral denominated in the debt asset.
'''
if self.debt > 0:
# The amount of collateral to sell to pay back the debt
collateralToSell = self.debt/price
self.collateral -= collateralToSell
self.debt = 0
return self.collateral
def automate(self, repay_from: float, repay_to: float, boost_from: float, boost_to: float):
'''
Enable or update automation for a CDP with the given automation settings.
Param:
automation_settings:
each param is an automation setting in the order of repay from, repay to,
boost from, boost to
'''
assert repay_from > self.min_ratio + 10
self.isAutomated = True
self.automation_settings["repay from"] = repay_from
self.automation_settings["repay to"] = repay_to
self.automation_settings["boost from"] = boost_from
self.automation_settings["boost to"] = boost_to
def disableAutomation(self):
self.isAutomated = False
def boostTo(self, target: float, price: float, gas_price_in_gwei: float, service_fee: float):
'''
Given the current price of the collateral asset denominated in the debt asset, check whether
the collateralization ratio is above threshold, and if yes, boost to the target ratio.
A boost is defined as generating more debt from the position and buying collateral with it.
Params:
target:
target collateralization ratio (in %)
price:
current price of the collateral denominated in the debt asset
gas_price_in_gwei:
current on-chain gas price in gwei (nanoETH)
serice_fee:
current fee charged by DeFi Saver (in %)
'''
#Check that it's possible to boost with the desired target
if self.debt == 0 or target/100 < self.collateral*price/self.debt:
# Fixed estimate of 1M gas consumed by the boost operation to calculate the gas fee in
# ETH
g = 1000000*gas_price_in_gwei*1e-9
# Target collateralization ratio
t = target/100
c = self.collateral
d = self.debt
p = price
gamma = 1 - service_fee/100
# print("gas cost in USD: ", g*p)
# print("gas cost limit: ", (p*c - t*d)/(5*(t - gamma) + 1))
# Gas cost must be below 20% of the boost amount
if p*g < (p*c - t*d)/(5*(t - gamma) + 1):
#The gas charged to the user is capped at a price of 499 gwei
if gas_price_in_gwei > 499:
g = 1000000*499*1e-9
# Calculate debt increase (> 0)required to arrive to the target collateralization ratio
deltaDebt = (p*c - p*g - t*d)/(t - gamma)
# print("debt change: ", deltaDebt)
# print("gas_cost/debt_change: ", p*g/deltaDebt)
# Calculate corresponding collateral increase (> 0)
deltaCollateral = (gamma*deltaDebt - p*g)/p
# Update position
self.debt += deltaDebt
self.collateral += deltaCollateral
assert self.debt > 0
assert self.collateral > 0
# Return True if boost took place
return True
else:
return False
else:
# If boost not possible with desired parameters
return False
def repayTo(self, target: float, price: float, gas_price_in_gwei: float, service_fee: float):
'''
Given the current price of the collateral asset denominated in the debt asset, check whether
the collateralization ratio is below threshold, and if yes, repay to the target ratio.
A repay is defined as selling some of the collateral from the position to acquire more of the
debt asset and repay part of the debt with it.
Params:
target:
target collateralization ratio in %
price:
current price of the collateral denominated in the debt asset
gas_price_in_gwei:
current on-chain gas price in gwei (nanoETH)
serice_fee:
current fee charged by DeFi Saver (in %)
'''
collateralization = self.collateral*price/self.debt
# Check that it's possible to repay with the desired target
assert self.debt != 0
# The current CRatio must be below the target OR below min_ratio + 10%
if collateralization < target/100:
# Fixed estimate of 1M gas consumed by the repay operation to calculate the gas fee in
# ETH
if gas_price_in_gwei > 499:
gas_price_in_gwei = 499
g = 1000000*gas_price_in_gwei*1e-9
# Target collateralization ratio
t = target/100
c = self.collateral
d = self.debt
p = price
gamma = 1 - service_fee/100
# print("gas cost in USD: ", p*g)
# print("gas cost in ETH: ", g)
# print("gas cost limit: ", (t*d - p*c)/(5*(gamma*t - 1) + t))
# print("collateralization in %: ", 100*collateralization)
# print("min repay threshold: ", self.min_ratio + 10)
# Gas cost must be lower than 20% of repay amount OR we must be below the min repay ratio
if 100*collateralization < self.min_ratio + 10:
isEmergencyRepay = True
else:
isEmergencyRepay = False
if p*g < (t*d - p*c)/(5*(gamma*t - 1) - t) or isEmergencyRepay:
# In case of an emergency repay, this might exceed the previous 20%. In this case, cap the charged amount to 20%.
if p*g > (t*d - p*c)/(5*(gamma*t - 1) - t):
g = (1/p)*(t*d - p*c)/(5*(gamma*t - 1) - t)
# Calculate collateral decrease (> 0) required to arrive to the target collateralization ratio
deltaCollateral = (t*d + t*p*g - p*c)/(p*(gamma*t-1))
# print("collateral change: ", deltaCollateral)
# print("gas_cost/collateral_change: ", g/deltaCollateral)
deltaDebt = gamma*p*deltaCollateral - p*g
if self.debt < self.min_automation_debt :
self.isAutomated = False
# Update position
self.collateral -= deltaCollateral
self.debt -= deltaDebt
assert self.collateral > 0
assert self.debt > 0
# Return True if repay took place
return True
else:
return False
else:
return False
| 43.522727
| 129
| 0.593211
| 1,195
| 9,575
| 4.680335
| 0.184937
| 0.024316
| 0.019667
| 0.027534
| 0.328983
| 0.302879
| 0.272126
| 0.264616
| 0.211336
| 0.188807
| 0
| 0.018687
| 0.334935
| 9,575
| 220
| 130
| 43.522727
| 0.859611
| 0.497963
| 0
| 0.428571
| 0
| 0
| 0.016893
| 0
| 0
| 0
| 0
| 0.004545
| 0.071429
| 1
| 0.107143
| false
| 0
| 0.011905
| 0
| 0.22619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
276b88f7bc15b02ea6b733a96f259241381fe73b
| 5,683
|
py
|
Python
|
clustertools/test/test_experiment.py
|
jm-begon/clustertools
|
264198d0ffbd60b883b7b6a2af79341425c7729b
|
[
"BSD-3-Clause"
] | 7
|
2017-05-31T15:28:28.000Z
|
2021-03-25T12:36:48.000Z
|
clustertools/test/test_experiment.py
|
jm-begon/clustertools
|
264198d0ffbd60b883b7b6a2af79341425c7729b
|
[
"BSD-3-Clause"
] | 42
|
2017-06-09T07:35:50.000Z
|
2019-08-29T15:23:29.000Z
|
clustertools/test/test_experiment.py
|
jm-begon/clustertools
|
264198d0ffbd60b883b7b6a2af79341425c7729b
|
[
"BSD-3-Clause"
] | 3
|
2017-05-29T13:39:18.000Z
|
2019-06-24T09:43:01.000Z
|
# -*- coding: utf-8 -*-
from functools import partial
from nose.tools import assert_equal, assert_in, assert_less, assert_raises, \
with_setup, assert_true
from nose.tools import assert_false
from clustertools import ParameterSet, Result, Experiment
from clustertools.state import RunningState, CompletedState, AbortedState, \
CriticalState, PartialState, LaunchableState
from clustertools.storage import PickleStorage
from .util_test import purge, prep, __EXP_NAME__, IntrospectStorage, \
TestComputation, InterruptedComputation, pickle_prep, pickle_purge, \
with_setup_
__author__ = "Begon Jean-Michel <jm.begon@gmail.com>"
__copyright__ = "3-clause BSD License"
# ----------------------------------------------------------------------- Result
def test_result():
expected = {"m"+str(x): x for x in range(1, 5)}
result = Result("m1", m2=2, m3=6)
result.m1 = 1
result.m3 = 3
result["m4"] = 4
assert_equal(len(expected), len(result))
for name, value in expected.items():
assert_equal(result[name], value)
for name, value in result.items():
# redundant
assert_equal(expected[name], value)
dict(result)
repr(result)
# ------------------------------------------------------------------ Computation
@with_setup(prep, purge)
def test_correct_computation():
computation = TestComputation()
intro_storage = computation.storage
result1 = computation(x1=5, x2=2, x3=50)
result2 = intro_storage.load_result(computation.comp_name)
for result in result1, result2:
assert_equal(len(result), 2) # One real metric + repr
assert_equal(result["mult"], 2 * 5)
assert_equal(len(intro_storage.result_history), 1) # Only one computation
assert_equal(len(intro_storage.state_history), 1) # Only one computation
states = list(intro_storage.state_history.values())[0]
# If correct, state should have followed the sequence:
# Running (p=0), Running (p=1), Critical, Partial, Completed
assert_equal(len(states), 5)
assert_true(isinstance(states[0], RunningState))
assert_true(isinstance(states[1], RunningState))
assert_true(isinstance(states[2], CriticalState))
assert_true(isinstance(states[3], PartialState))
assert_true(isinstance(states[4], CompletedState))
assert_equal(states[0].progress, 0.)
assert_equal(states[1].progress, 1.)
@with_setup(prep, purge)
def test_error_computation():
computation = TestComputation()
intro_storage = computation.storage
computation = computation.lazyfy(x1=5, x2=None, x3=50)
assert_raises(TypeError, computation) # 5*None
assert_equal(len(intro_storage.result_history), 0) # Computation not saved
assert_equal(len(intro_storage.state_history), 1) # Only one computation
states = list(intro_storage.state_history.values())[0]
# If correct (i.e. error occurs), state should have evolved as:
# Running, Aborted
assert_equal(len(states), 2)
assert_true(isinstance(states[0], RunningState))
assert_true(isinstance(states[1], AbortedState))
@with_setup(prep, purge)
def test_interrupted_computation():
computation = InterruptedComputation()
intro_storage = computation.storage
assert_raises(KeyboardInterrupt, computation)
assert_equal(len(intro_storage.result_history[computation.comp_name]), 0)
state_history = intro_storage.state_history[computation.comp_name]
# Running -> Launchable
assert_equal(len(state_history), 2)
assert_true(isinstance(state_history[0], RunningState))
assert_true(isinstance(state_history[1], LaunchableState))
@with_setup(prep, purge)
def test_has_parameters():
computation = TestComputation()
computation.lazyfy(p1="1", p2=2)
assert_true(computation.has_parameters(p1="1", p2=2))
assert_true(computation.has_parameters(p1="1"))
assert_true(computation.has_parameters(p2=2))
assert_false(computation.has_parameters(p3=""))
assert_false(computation.has_parameters(p1="1", p3=""))
assert_false(computation.has_parameters(p1="1", p2=2, p3=""))
# ------------------------------------------------------------------- Experiment
@with_setup(prep, purge)
def test_experiment():
parameter_set = ParameterSet()
parameter_set.add_parameters(x1=range(3), x2=range(3))
experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation,
IntrospectStorage)
assert_equal(len(list(experiment.yield_computations())), 9)
# start=3 : skip 0,1,2
assert_equal(len(list(experiment.yield_computations(start=3))), 6)
# capacity=6 : skip 6, 7, 8
assert_equal(len(list(experiment.yield_computations(capacity=6))), 6)
@with_setup_(partial(pickle_prep, exp_name="{}_1".format(__EXP_NAME__)),
partial(pickle_purge, exp_name="{}_1".format(__EXP_NAME__)))
def do_auto_refresh(auto_refresh):
parameter_set = ParameterSet()
parameter_set.add_parameters(x1=range(3), x2=range(3))
experiment = Experiment("{}_1".format(__EXP_NAME__), parameter_set,
TestComputation)
# There should be 9 computations
assert_equal(len(experiment), 9)
count = 0
for i, _ in enumerate(experiment.yield_computations(auto_refresh=auto_refresh)):
if i == 0:
state = CompletedState(
Experiment.name_computation(experiment.exp_name, 6)
)
PickleStorage(experiment.exp_name).update_state(state)
count += 1
print("Auto refresh?", auto_refresh, "--", count)
assert_equal(count, 8 if auto_refresh else 9)
def test_auto_refresh():
do_auto_refresh(True)
do_auto_refresh(False)
| 36.664516
| 84
| 0.688193
| 689
| 5,683
| 5.429608
| 0.216255
| 0.061748
| 0.052392
| 0.04865
| 0.418872
| 0.336006
| 0.285485
| 0.181502
| 0.160118
| 0.160118
| 0
| 0.024005
| 0.16435
| 5,683
| 154
| 85
| 36.902597
| 0.76374
| 0.118599
| 0
| 0.194444
| 0
| 0
| 0.019856
| 0
| 0
| 0
| 0
| 0
| 0.37037
| 1
| 0.074074
| false
| 0
| 0.064815
| 0
| 0.138889
| 0.009259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
276c363a6f57e3c85d7f037af185d706c5abdf10
| 1,657
|
py
|
Python
|
ed.py
|
zzx288/words
|
477516211cc43701ec4592a686f0bc06cbb9c141
|
[
"MIT"
] | null | null | null |
ed.py
|
zzx288/words
|
477516211cc43701ec4592a686f0bc06cbb9c141
|
[
"MIT"
] | null | null | null |
ed.py
|
zzx288/words
|
477516211cc43701ec4592a686f0bc06cbb9c141
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import codecs
def find_words(words):
split_words={}
count_all = 0
unused_words = u" \t\r\n,。:;、“‘”【】『』|=+-——()*&……%¥#@!~·《》?/?<>,.;:'\"[]{}_)(^$!`"
unused_english = u"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
for i in unused_words:
count_all += words.count(i)
for i in unused_english:
count_all += words.count(i)
lens = len(words)
len_deal = lens-count_all
for i in range(0,lens):
if(words[i] in unused_words or words[i] in unused_english):
continue
if(words[i+1] in unused_words or words[i+1] in unused_english):
continue
if words[i:i+2] in split_words:
split_words[words[i:i+2]][0]+=1
split_words[words[i:i+2]][1]=float(split_words[words[i:i+2]][0])/float(len_deal)
else:
split_words[words[i:i+2]]=[1,1/float(len_deal)]
return split_words
def read_file(a):
words = ""
i=0
pathdir = os.listdir(a)
for alldir in pathdir:
test = codecs.open(a+"\\"+alldir, 'r',encoding='utf-8')
words += test.read()
test.close()
i += 1
print(i)
return words
if __name__ == '__main__':
words = read_file('F:\\cs')
'''
test = codecs.open('F:\\760.xml', 'r',encoding='utf-8')
words = test.read()
test.close()
'''
print ("splitting......")
split=find_words(words)
ci = codecs.open('F:\\result.txt','a',encoding = 'utf-8')
for key in split.keys():
ci.write('('+key[0]+','+key[1]+','+str(split[key][1])+')\r\n')
ci.close
print("ok")
| 29.589286
| 92
| 0.556427
| 241
| 1,657
| 3.73029
| 0.290456
| 0.066741
| 0.038932
| 0.044494
| 0.322581
| 0.278087
| 0.231368
| 0.077864
| 0.077864
| 0
| 0
| 0.029506
| 0.243211
| 1,657
| 55
| 93
| 30.127273
| 0.677831
| 0.025951
| 0
| 0.090909
| 0
| 0.068182
| 0.12
| 0.074667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.045455
| 0
| 0.136364
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
276e00dcc9820a61c4d4ebff8a3b8b4d2a199a5f
| 467
|
py
|
Python
|
HackerRank/MinimumSwaps2.py
|
kokuraxc/play-ground
|
48b5291f3cca117e0cd0a17bf9255ec4dc1a5cdd
|
[
"MIT"
] | null | null | null |
HackerRank/MinimumSwaps2.py
|
kokuraxc/play-ground
|
48b5291f3cca117e0cd0a17bf9255ec4dc1a5cdd
|
[
"MIT"
] | null | null | null |
HackerRank/MinimumSwaps2.py
|
kokuraxc/play-ground
|
48b5291f3cca117e0cd0a17bf9255ec4dc1a5cdd
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/minimum-swaps-2
# Complete the minimumSwaps function below.
def minimumSwaps(arr):
steps = 0
# for i, a in enumerate(arr):
for i in range(len(arr)):
while arr[i] != i+1:
#arr = [a if x == i+1 else x for x in arr]
#print(arr)
left = arr[i]
right = arr[left-1]
arr[i] = right
arr[left-1] = left
steps += 1
return steps
| 25.944444
| 55
| 0.51606
| 67
| 467
| 3.597015
| 0.492537
| 0.049793
| 0.074689
| 0.099585
| 0.141079
| 0.141079
| 0
| 0
| 0
| 0
| 0
| 0.023411
| 0.359743
| 467
| 17
| 56
| 27.470588
| 0.782609
| 0.372591
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
277311deb8c817997aeecabf48fe1ce321691cbf
| 3,081
|
py
|
Python
|
source/conf.py
|
Kinovea/kinovea-docs
|
a2c4c9561bd4f8cc663efcaaed017c9c018b6b20
|
[
"CC0-1.0"
] | 4
|
2020-11-17T18:09:42.000Z
|
2021-12-29T07:34:29.000Z
|
source/conf.py
|
Kinovea/kinovea-docs
|
a2c4c9561bd4f8cc663efcaaed017c9c018b6b20
|
[
"CC0-1.0"
] | 4
|
2021-07-12T09:41:06.000Z
|
2021-11-01T19:22:05.000Z
|
source/conf.py
|
Kinovea/kinovea-docs
|
a2c4c9561bd4f8cc663efcaaed017c9c018b6b20
|
[
"CC0-1.0"
] | 1
|
2021-07-12T05:17:47.000Z
|
2021-07-12T05:17:47.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'Kinovea'
copyright = '2021, Kinovea documentation authors (CC0 1.0)'
author = 'Kinovea documentation authors'
# The full version, including alpha/beta/rc tags
release = '0.9.5'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_static_path = ['_static']
html_css_files = ['css/kinovea.css']
html_logo = 'images/logo/kinovea.svg'
html_copy_source = False
html_show_sourcelink = False
html_show_sphinx = False
html_theme_options = {
'logo_only': True,
'display_version': False,
'style_external_links': True,
'style_nav_header_background': "#404040",
# Collapse navigation (False makes it tree-like)
#'collapse_navigation': False,
}
html_context = {
'display_github': False,
}
pdf_documents = [('index', u'kinoveadoc', u'Kinovea documentation', u'Kinovea community'),]
# -- Options for Epub output ----------------------------------------------
# EPUB Output
epub_theme = "sphinx_rtd_theme"
#epub_theme = 'epub'
# Bibliographic Dublin Core info.
epub_description = "Kinovea reference manual"
epub_publisher = "Kinovea"
epub_title = project
epub_author = author
epub_copyright = copyright
# The cover page information. This is a tuple containing the filenames of
# the cover image and the html template.
#epub_cover = ('_static/cover.png', 'epub-cover.html')
epub_css_files = ['css/kinovea.css']
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in the file toc.ncx.
epub_tocdepth = 2
# Control whether to display URL addresses.
epub_show_urls = 'no'
| 30.205882
| 91
| 0.683544
| 402
| 3,081
| 5.09204
| 0.457711
| 0.00977
| 0.027357
| 0.018564
| 0.020518
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006823
| 0.143784
| 3,081
| 101
| 92
| 30.504951
| 0.769143
| 0.605972
| 0
| 0
| 0
| 0
| 0.344123
| 0.042589
| 0
| 0
| 0
| 0.009901
| 0
| 1
| 0
| false
| 0
| 0.025641
| 0
| 0.025641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
277599814e255a220a50444d8861eabc112abdd1
| 4,931
|
py
|
Python
|
financial_fundamentals/xbrl.py
|
Mahesh-Salunke/financial_fundamentals
|
421e7550e2c4b2cc9cc0ee8cb15ce2ad0d89234f
|
[
"Apache-2.0"
] | 122
|
2015-01-28T17:57:08.000Z
|
2022-02-12T12:24:55.000Z
|
financial_fundamentals/xbrl.py
|
Mahesh-Salunke/financial_fundamentals
|
421e7550e2c4b2cc9cc0ee8cb15ce2ad0d89234f
|
[
"Apache-2.0"
] | 1
|
2016-11-07T17:02:02.000Z
|
2016-11-09T20:51:50.000Z
|
financial_fundamentals/xbrl.py
|
Mahesh-Salunke/financial_fundamentals
|
421e7550e2c4b2cc9cc0ee8cb15ce2ad0d89234f
|
[
"Apache-2.0"
] | 49
|
2015-01-01T03:12:27.000Z
|
2021-07-06T10:19:31.000Z
|
'''
Created on Oct 8, 2013
@author: akittredge
'''
import dateutil.parser
import xmltodict
from financial_fundamentals.exceptions import ValueNotInFilingDocument
class XBRLMetricParams(object):
'''Bundle the parameters sufficient to extract a metric from an xbrl document.
'''
def __init__(self, possible_tags, context_type):
self.possible_tags = possible_tags
self.context_type = context_type
class DurationContext(object):
'''Encapsulate a time span XBRL context.'''
characteristic_key = 'startDate'
def __init__(self, start_date, end_date):
self.start_date = start_date
self.end_date = end_date
@property
def sort_key(self):
return self.start_date
def __repr__(self):
return '{}(start_date={}, end_date={})'.format(self.__class__,
self.start_date,
self.end_date)
@classmethod
def from_period(cls, period):
start_node = XBRLDocument.find_node(xml_dict=period, key='startDate')
start_date = dateutil.parser.parse(start_node).date()
end_node = XBRLDocument.find_node(xml_dict=period, key='endDate')
end_date = dateutil.parser.parse(end_node).date()
return cls(start_date, end_date)
class InstantContext(object):
characteristic_key = 'instant'
def __init__(self, instant):
self.instant = instant
@property
def sort_key(self):
return self.instant
def __repr__(self):
return '{}(instant={}'.format(self.__class__, self.instant)
@classmethod
def from_period(cls, period):
node = XBRLDocument.find_node(xml_dict=period, key='instant')
instant = dateutil.parser.parse(node).date()
return cls(instant=instant)
class XBRLDocument(object):
'''wrapper for XBRL documents, lazily downloads XBRL text.'''
def __init__(self, xbrl_url, gets_xbrl):
self._xbrl_url = xbrl_url
self._xbrl_dict_ = None
self._contexts = {}
self._get_xbrl = gets_xbrl
@property
def _xbrl_dict(self):
if not self._xbrl_dict_:
doc_text = self._get_xbrl(self._xbrl_url)
xml_dict = xmltodict.parse(doc_text)
self._xbrl_dict_ = self.find_node(xml_dict, 'xbrl')
return self._xbrl_dict_
def contexts(self, context_type):
contexts = self._contexts.get(context_type, {})
if not contexts:
context_nodes = self.find_node(xml_dict=self._xbrl_dict, key='context')
for context in context_nodes:
try:
period = self.find_node(xml_dict=context, key='period')
self.find_node(xml_dict=period, key=context_type.characteristic_key)
except KeyError:
continue
else:
contexts[context['@id']] = context_type.from_period(period)
self._contexts[context_type] = contexts
return contexts
def _latest_metric_value(self, possible_tags, contexts):
'''metric_params is a list of possible xbrl tags.
'''
for tag in possible_tags:
try:
metric_nodes = self._xbrl_dict[tag]
except KeyError:
continue
else:
if type(metric_nodes) != list:
metric_nodes = [metric_nodes]
break
else:
raise MetricNodeNotFound('Did not find any of {} in the document @ {}'\
.format(possible_tags, self._xbrl_url))
def key_func(value):
context_ref_id = value['@contextRef']
context = contexts[context_ref_id]
return context.sort_key
metric_node = sorted(metric_nodes,
key=key_func,
reverse=True)[0]
return float(metric_node['#text'])
def latest_metric_value(self, metric_params):
contexts = self.contexts(context_type=metric_params.context_type)
return self._latest_metric_value(possible_tags=metric_params.possible_tags,
contexts=contexts)
@staticmethod
def find_node(xml_dict, key):
'''OMG I hate XML.'''
try:
return xml_dict[key]
except KeyError:
return xml_dict['xbrli:{}'.format(key)]
@classmethod
def gets_XBRL_from_edgar(cls, xbrl_url):
from financial_fundamentals import edgar
return cls(xbrl_url=xbrl_url, gets_xbrl=edgar.get)
@classmethod
def gets_XBRL_locally(cls, file_path):
return cls(xbrl_url=file_path,
gets_xbrl=lambda file_path : open(file_path).read())
class MetricNodeNotFound(ValueNotInFilingDocument):
pass
| 34.482517
| 88
| 0.602515
| 549
| 4,931
| 5.087432
| 0.216758
| 0.027569
| 0.031507
| 0.042965
| 0.155747
| 0.110634
| 0.065879
| 0.042965
| 0
| 0
| 0
| 0.001761
| 0.309065
| 4,931
| 143
| 89
| 34.482517
| 0.818022
| 0.061651
| 0
| 0.222222
| 0
| 0
| 0.036779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.009259
| 0.037037
| 0.046296
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
277624309012d3684e6506d164e645ba545c1547
| 6,235
|
py
|
Python
|
geospacelab/datahub/sources/wdc/dst/downloader.py
|
JouleCai/GeoSpaceLab
|
6cc498d3c32501e946931de596a840c73e83edb3
|
[
"BSD-3-Clause"
] | 19
|
2021-08-07T08:49:22.000Z
|
2022-03-02T18:26:30.000Z
|
geospacelab/datahub/sources/wdc/dst/downloader.py
|
JouleCai/GeoSpaceLab
|
6cc498d3c32501e946931de596a840c73e83edb3
|
[
"BSD-3-Clause"
] | 4
|
2021-11-09T05:53:42.000Z
|
2022-03-25T11:49:37.000Z
|
geospacelab/datahub/sources/wdc/dst/downloader.py
|
JouleCai/GeoSpaceLab
|
6cc498d3c32501e946931de596a840c73e83edb3
|
[
"BSD-3-Clause"
] | 3
|
2021-11-07T11:41:20.000Z
|
2022-02-14T13:43:11.000Z
|
# Licensed under the BSD 3-Clause License
# Copyright (C) 2021 GeospaceLab (geospacelab)
# Author: Lei Cai, Space Physics and Astronomy, University of Oulu
__author__ = "Lei Cai"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "lei.cai@oulu.fi"
__docformat__ = "reStructureText"
import datetime
import numpy as np
import requests
import bs4
import pathlib
import re
import netCDF4
import cftime
import geospacelab.toolbox.utilities.pydatetime as dttool
import geospacelab.toolbox.utilities.pylogging as mylog
import geospacelab.datahub.sources.wdc as wdc
from geospacelab import preferences as prf
class Downloader(object):
def __init__(self, dt_fr, dt_to, data_file_root_dir=None, user_email=wdc.default_user_email):
self.dt_fr = dt_fr
self.dt_to = dt_to
self.user_email = user_email
self.done = False
if data_file_root_dir is None:
self.data_file_root_dir = prf.datahub_data_root_dir / 'WDC' / 'Dst'
else:
self.data_file_root_dir = pathlib.Path(data_file_root_dir)
self.url_base = "http://wdc.kugi.kyoto-u.ac.jp"
self.download()
def download(self):
diff_months = dttool.get_diff_months(self.dt_fr, self.dt_to)
dt0 = datetime.datetime(self.dt_fr.year, self.dt_fr.month, 1)
r = requests.get(self.url_base + '/dstae/')
soup = bs4.BeautifulSoup(r.text, 'html.parser')
form_tag = soup.find_all('form')
r_method = form_tag[0].attrs['method']
r_action_url = self.url_base + form_tag[0].attrs['action']
for i in range(diff_months + 1):
dt_fr = dttool.get_next_n_months(dt0, i)
dt_to = dttool.get_next_n_months(dt0, i + 1) - datetime.timedelta(seconds=1)
delta_seconds = (dt_to - dt_fr).total_seconds()
file_name = 'WDC_Dst_' + dt_fr.strftime('%Y%m') + '.nc'
file_path = self.data_file_root_dir / '{:4d}'.format(dt_fr.year) / file_name
if file_path.is_file():
mylog.simpleinfo.info(
"The file {} exists in the directory {}.".format(file_path.name, file_path.parent.resolve()))
self.done = True
continue
else:
file_path.parent.resolve().mkdir(parents=True, exist_ok=True)
form_dst = {
'SCent': str(int(dt_fr.year/100)),
'STens': str(int((dt_fr.year - np.floor(dt_fr.year/100)*100) / 10)),
'SYear': str(int((dt_fr.year - np.floor(dt_fr.year/10)*10))),
'SMonth': '{:02d}'.format(dt_fr.month),
'ECent': str(int(dt_to.year/100)),
'ETens': str(int((dt_to.year - np.floor(dt_to.year/100)*100) / 10)),
'EYear': str(int((dt_to.year - np.floor(dt_to.year/10)*10))),
'EMonth': '{:02d}'.format(dt_to.month),
"Image Type": "GIF",
"COLOR": "COLOR",
"AE Sensitivity": "100",
"Dst Sensitivity": "20",
"Output": 'DST',
"Out format": "IAGA2002",
"Email": self.user_email,
}
if r_method.lower() == 'get':
mylog.StreamLogger.info("Requesting data from WDC ...")
r_file = requests.get(r_action_url, params=form_dst)
if "No data for your request" in r_file.text or "DATE TIME DOY" not in r_file.text:
mylog.StreamLogger.warning("No data for your request!")
return
with open(file_path.with_suffix('.dat'), 'w') as f:
f.write(r_file.text)
mylog.StreamLogger.info("Preparing to save the data in the netcdf format ...")
self.save_to_netcdf(r_file.text, file_path)
def save_to_netcdf(self, r_text, file_path):
results = re.findall(
r'^(\d+-\d+-\d+ \d+:\d+:\d+.\d+)\s*(\d+)\s*([+\-\d.]+)',
r_text,
re.M
)
results = list(zip(*results))
# time_array = np.array([(datetime.datetime.strptime(dtstr+'000', "%Y-%m-%d %H:%M:%S.%f")
# - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1)
# for dtstr in results[0]])
dts = [datetime.datetime.strptime(dtstr+'000', "%Y-%m-%d %H:%M:%S.%f") for dtstr in results[0]]
time_array = np.array(cftime.date2num(dts, units='seconds since 1970-01-01 00:00:00.0'))
print('From {} to {}.'.format(
datetime.datetime.utcfromtimestamp(time_array[0]),
datetime.datetime.utcfromtimestamp(time_array[-1]))
)
dst_array = np.array(results[2])
dst_array.astype(np.float32)
num_rows = len(results[0])
fnc = netCDF4.Dataset(file_path, 'w')
fnc.createDimension('UNIX_TIME', num_rows)
fnc.title = "WDC DST index"
time = fnc.createVariable('UNIX_TIME', np.float64, ('UNIX_TIME',))
time.units = 'seconds since 1970-01-01 00:00:00.0'
dst = fnc.createVariable('Dst', np.float32, ('UNIX_TIME',))
time[::] = time_array[::]
dst[::] = dst_array[::]
# for i, res in enumerate(results):
# dt = datetime.datetime.strptime(res[0]+'000', "%Y-%m-%d %H:%M:%S.%f")
# time[i] = (dt - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1)
# asy_d[i] = float(res[2])
# asy_h[i] = float(res[3])
# sym_d[i] = float(res[4])
# sym_h[i] = float(res[5])
fnc.close()
mylog.StreamLogger.info("The requested data has been downloaded and saved in the file {}.".format(file_path))
self.done = True
if __name__ == "__main__":
dt_fr1 = datetime.datetime(2000, 1, 14)
dt_to1 = datetime.datetime(2000, 6, 16)
Downloader(dt_fr1, dt_to1, user_email="lei.cai@oulu.fi")
# form_dst = {'SCent': 20, 'STens': 1, 'SYear': 1, 'SMonth': '01', 'ECent': 20, 'ETens': 1, 'EYear': 1, 'EMonth': 12, "Image Type": "GIF", "COLOR": "COLOR", "AE Sensitivity": "100", "Dst Sensitivity": "20", "Output": 'DST', "Out format": "IAGA2002", "Email": "lei.cai@oulu.fi"}
| 38.018293
| 277
| 0.577225
| 845
| 6,235
| 4.062722
| 0.276923
| 0.018643
| 0.016312
| 0.026216
| 0.284882
| 0.170987
| 0.168948
| 0.154967
| 0.152054
| 0.124672
| 0
| 0.038935
| 0.27089
| 6,235
| 164
| 277
| 38.018293
| 0.716234
| 0.152847
| 0
| 0.037037
| 0
| 0.009259
| 0.155087
| 0.007213
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.111111
| 0
| 0.157407
| 0.009259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
277b82514a7515e164e8b41c060b3213bef8d2d0
| 473
|
py
|
Python
|
multi_domain/utils.py
|
newbieyd/multi-domain_NER
|
78443f79cebf7c2fe1058bc6ba2dc793d0907574
|
[
"Apache-2.0"
] | 3
|
2020-10-26T02:23:57.000Z
|
2021-01-28T09:29:35.000Z
|
multi_domain/utils.py
|
newbieyd/multi-domain_NER
|
78443f79cebf7c2fe1058bc6ba2dc793d0907574
|
[
"Apache-2.0"
] | null | null | null |
multi_domain/utils.py
|
newbieyd/multi-domain_NER
|
78443f79cebf7c2fe1058bc6ba2dc793d0907574
|
[
"Apache-2.0"
] | 1
|
2021-01-28T09:29:39.000Z
|
2021-01-28T09:29:39.000Z
|
import random
import torch
import numpy as np
# 设置随机种子,一旦固定种子,后面依次生成的随机数其实都是固定的
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# 计算评价指标:准确率,召回率,F1值
def calculate(data):
p = -1
r = -1
f1 = -1
if data[0] > 0:
p = data[2] / data[0]
if data[1] > 0:
r = data[2] / data[1]
if p != -1 and r != -1 and p + r != 0:
f1 = 2 * p * r / (p + r)
return p, r, f1
| 19.708333
| 43
| 0.5074
| 77
| 473
| 3.090909
| 0.363636
| 0.134454
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061688
| 0.348837
| 473
| 23
| 44
| 20.565217
| 0.711039
| 0.105708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
277dc18fc44eab6c4ec0aeec52c4030e30b5d869
| 967
|
py
|
Python
|
pysimplegui/DemoPrograms/Demo_Design_Pattern_Multiple_Windows2.py
|
konsan1101/py-etc
|
bcca13119b0d2453866988404fd1c4976f55d4d5
|
[
"MIT"
] | null | null | null |
pysimplegui/DemoPrograms/Demo_Design_Pattern_Multiple_Windows2.py
|
konsan1101/py-etc
|
bcca13119b0d2453866988404fd1c4976f55d4d5
|
[
"MIT"
] | 2
|
2020-06-06T00:30:56.000Z
|
2021-06-10T22:30:37.000Z
|
pysimplegui/DemoPrograms/Demo_Design_Pattern_Multiple_Windows2.py
|
konsan1101/py-etc
|
bcca13119b0d2453866988404fd1c4976f55d4d5
|
[
"MIT"
] | null | null | null |
import PySimpleGUI as sg
"""
PySimpleGUI The Complete Course
Lesson 7
Multiple Independent Windows
"""
# Design pattern 2 - First window remains active
layout = [[ sg.Text('Window 1'),],
[sg.Input()],
[sg.Text('', size=(20,1), key='-OUTPUT-')],
[sg.Button('Launch 2'), sg.Button('Exit')]]
window1 = sg.Window('Window 1', layout)
window2_active = False
while True:
event1, values1 = window1.read(timeout=100)
window1['-OUTPUT-'].update(values1[0])
if event1 is None or event1 == 'Exit':
break
if not window2_active and event1 == 'Launch 2':
window2_active = True
layout2 = [[sg.Text('Window 2')],
[sg.Button('Exit')]]
window2 = sg.Window('Window 2', layout2)
if window2_active:
ev2, vals2 = window2.read(timeout=100)
if ev2 is None or ev2 == 'Exit':
window2_active = False
window2.close()
window1.close()
| 24.794872
| 53
| 0.584281
| 119
| 967
| 4.705882
| 0.445378
| 0.116071
| 0.042857
| 0.046429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059659
| 0.271975
| 967
| 38
| 54
| 25.447368
| 0.735795
| 0.04757
| 0
| 0.086957
| 0
| 0
| 0.096386
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043478
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27824e9b0a8bb25e5664e1e7337a726628d28d71
| 2,000
|
py
|
Python
|
src/d007/index.py
|
Yangfan2016/learn-python
|
84a375cda9d51349ae0a0faf1dc6444ac83ed948
|
[
"MIT"
] | null | null | null |
src/d007/index.py
|
Yangfan2016/learn-python
|
84a375cda9d51349ae0a0faf1dc6444ac83ed948
|
[
"MIT"
] | null | null | null |
src/d007/index.py
|
Yangfan2016/learn-python
|
84a375cda9d51349ae0a0faf1dc6444ac83ed948
|
[
"MIT"
] | null | null | null |
# 练习1:在屏幕上显示跑马灯文字
from random import randint
import os
import time
def marquee():
content = "我很开心。。。"
while True:
os.system("clear")
print(content)
time.sleep(.2)
content = content[1:]+content[0]
# marquee()
# 练习2:设计一个函数产生指定长度的验证码,验证码由大小写字母和数字构成
def genrentae_code(l=4):
all = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
res = ''
for _ in range(l):
index = randint(0, len(all)-1)
res += all[index]
return res
# print(genrentae_code())
# print(genrentae_code(6))
# 练习3:设计一个函数返回给定文件名的后缀名
def get_suffix(filename):
pos = filename.rfind(".")
if pos > 0:
return filename[pos+1:]
return ""
# print(get_suffix("a.doc"))
# print(get_suffix("a.tmp.txt"))
# print(get_suffix("abac"))
# 练习4:设计一个函数返回传入的列表中最大和第二大的元素的值
def max2(arr):
l = len(arr)
m1 = arr[0]
m2 = arr[1]
if l < 2:
return m1, m2 if m1 > m2 else m2, m1
for i in range(2, l):
if arr[i] > m1:
m2 = m1
m1 = arr[i]
elif arr[i] > m2:
m2 = arr[i]
return m1, m2
# print(max2([1,3,5,7]))
# 练习5:计算指定的年月日是这一年的第几天
def is_leap_year(y):
return y % 4 == 0 and y % 100 != 0 or y % 400 == 0
def which_day(y, m, d):
map = {
1: 31,
3: 31,
5: 31,
7: 31,
8: 31,
10: 31,
12: 31,
4: 30,
6: 30,
9: 30,
11: 30,
2: 29 if is_leap_year(y) else 28,
}
day = d
for i in range(1, m):
day += map[i]
return day
# print(which_day(2019, 5, 25))
# 练习6:打印杨辉三角
def pascal_triangle(row):
if row < 2:
return print("1")
# if row<3:
# return print("1\n1-1")
arr = [1]
brr = arr
for i in range(1, row+1):
arr = [1]*i
for j in range(1, len(arr)-1):
arr[j] = brr[j-1]+brr[j]
print('-'.join(str(i) for i in arr))
brr = arr
pascal_triangle(5)
| 15.151515
| 74
| 0.515
| 290
| 2,000
| 3.496552
| 0.341379
| 0.034517
| 0.023669
| 0.032544
| 0.023669
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093939
| 0.34
| 2,000
| 131
| 75
| 15.267176
| 0.674242
| 0.184
| 0
| 0.029412
| 0
| 0
| 0.047708
| 0.038414
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102941
| false
| 0
| 0.044118
| 0.014706
| 0.264706
| 0.044118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2786bc277b70a50e0d89afd7f11a15c26b25b2fa
| 1,825
|
py
|
Python
|
tests/test_platform.py
|
rennerocha/bottery
|
a082cfa1c21f9aa32ea1526ea3004b581f9e0cd4
|
[
"MIT"
] | null | null | null |
tests/test_platform.py
|
rennerocha/bottery
|
a082cfa1c21f9aa32ea1526ea3004b581f9e0cd4
|
[
"MIT"
] | null | null | null |
tests/test_platform.py
|
rennerocha/bottery
|
a082cfa1c21f9aa32ea1526ea3004b581f9e0cd4
|
[
"MIT"
] | null | null | null |
import inspect
import pytest
from bottery.platform import BaseEngine
def test_baseengine_platform_name_not_implemented():
"""Check if attributes from the public API raise NotImplementedError"""
engine = BaseEngine()
with pytest.raises(NotImplementedError):
getattr(engine, 'platform')
@pytest.mark.asyncio
@pytest.mark.parametrize('method_name', ['build_message', 'configure'])
async def test_baseengine_not_implemented_calls(method_name):
"""Check if method calls from public API raise NotImplementedError"""
engine = BaseEngine()
with pytest.raises(NotImplementedError):
method = getattr(engine, method_name)
if inspect.iscoroutinefunction(method):
await method()
else:
method()
def sync_view(message):
return 'pong'
async def async_view(message):
return 'pong'
@pytest.mark.asyncio
@pytest.mark.parametrize('view', [sync_view, async_view], ids=['sync', 'async']) # noqa
async def test_get_response_from_views(view):
"""
Test if get_response can call an async/sync view and get its response.
"""
engine = BaseEngine()
response = await engine.get_response(view, 'ping')
assert response == 'pong'
def test_baseengine_handling_message():
fake_handler = type('Handler', (object,), {'check': lambda msg: True})
view = True
engine = BaseEngine()
engine.registered_handlers = [(fake_handler, view)]
returned_view = engine.discovery_view('new message')
assert returned_view
def test_baseengine_handler_not_found():
fake_handler = type('Handler', (object,), {'check': lambda msg: False})
view = True
engine = BaseEngine()
engine.registered_handlers = [(fake_handler, view)]
returned_view = engine.discovery_view('new message')
assert not returned_view
| 27.651515
| 88
| 0.706301
| 214
| 1,825
| 5.827103
| 0.313084
| 0.028067
| 0.054531
| 0.052927
| 0.439455
| 0.439455
| 0.378508
| 0.378508
| 0.311147
| 0.311147
| 0
| 0
| 0.18411
| 1,825
| 65
| 89
| 28.076923
| 0.837475
| 0.038904
| 0
| 0.414634
| 0
| 0
| 0.072819
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 1
| 0.097561
| false
| 0
| 0.073171
| 0.02439
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27871567eec68506ebdf03b82d43a00ac9173647
| 26,071
|
py
|
Python
|
sarkas/potentials/core.py
|
lucianogsilvestri/sarkas
|
f4ab00014d09976561fbd4349b9d0610e47a61e1
|
[
"MIT"
] | null | null | null |
sarkas/potentials/core.py
|
lucianogsilvestri/sarkas
|
f4ab00014d09976561fbd4349b9d0610e47a61e1
|
[
"MIT"
] | null | null | null |
sarkas/potentials/core.py
|
lucianogsilvestri/sarkas
|
f4ab00014d09976561fbd4349b9d0610e47a61e1
|
[
"MIT"
] | null | null | null |
"""
Module handling the potential class.
"""
from copy import deepcopy
from fmm3dpy import hfmm3d, lfmm3d
from numpy import array, ndarray, pi, sqrt, tanh
from warnings import warn
from ..utilities.exceptions import AlgorithmWarning
from ..utilities.fdints import fdm1h, invfd1h
from .force_pm import force_optimized_green_function as gf_opt
from .force_pm import update as pm_update
from .force_pp import update as pp_update
from .force_pp import update_0D as pp_update_0D
class Potential:
r"""
Parameters specific to potential choice.
Attributes
----------
a_rs : float
Short-range cutoff to deal with divergence of the potential for r -> 0.
box_lengths : array
Pointer to :attr:`sarkas.core.Parameters.box_lengths`.
box_volume : float
Pointer to :attr:`sarkas.core.Parameters.box_volume`.
force_error : float
Force error due to the choice of the algorithm.
fourpie0 : float
Coulomb constant :math:`4 \pi \epsilon_0`.
kappa : float
Inverse screening length.
linked_list_on : bool
Flag for choosing the Linked cell list algorithm.
matrix : numpy.ndarray
Matrix of potential's parameters.
measure : bool
Flag for calculating the histogram for the radial distribution function.
It is set to `False` during equilibration phase and changed to `True` during production phase.
method : str
Algorithm method. Choices = `["PP", "PPPM", "FMM", "Brute"]`. \n
`"PP"` = Linked Cell List (default).
`"PPPM"` = Particle-Particle Particle-Mesh.
`"FMM"` = Fast Multipole Method.
`"Brute"` = corresponds to calculating the distance between all pair of particles within a distance :math:`L/2`.
pbox_lengths : numpy.ndarray
Pointer to :attr:`sarkas.core.Parameters.pbox_lengths`
pbox_volume : float
Pointer to :attr:`sarkas.core.Parameters.pbox_lengths`
pppm_on : bool
Flag for turning on the PPPM algorithm.
QFactor : float
Sum of the squared of the charges.
rc : float
Cutoff radius for the Linked Cell List algorithm.
screening_length_type : str
Choice of ways to calculate the screening length. \n
Choices = `[thomas-fermi, tf, debye, debye-huckel, db, moliere, custom, unscreened]`. \n
Default = thomas-fermi
screening_length : float
Value of the screening length.
total_net_charge : float
Sum of all the charges.
type : str
Type of potential. \n
Choices = [`"coulomb"`, `"egs"`, `"lennardjones"`, `"moliere"`, `"qsp"`].
"""
a_rs: float = 0.0
box_lengths: ndarray = None
box_volume: float = 0.0
force_error: float = 0.0
fourpie0: float = 0.0
kappa: float = None
linked_list_on: bool = True
matrix: ndarray = None
measure: bool = False
method: str = "pp"
pbox_lengths: ndarray = None
pbox_volume: float = 0.0
pppm_on: bool = False
pppm_aliases: ndarray = array([3, 3, 3], dtype=int)
pppm_alpha_ewald: float = 0.0
pppm_cao: ndarray = array([3, 3, 3], dtype=int)
pppm_mesh: ndarray = array([8, 8, 8], dtype=int)
pppm_h_array: ndarray = array([1.0, 1.0, 1.0], dtype=float)
pppm_pm_err: float = 0.0
pppm_pp_err: float = 0.0
QFactor: float = 0.0
rc: float = None
num_species: ndarray = None
screening_length_type: str = "thomas-fermi"
screening_length: float = None
species_charges: ndarray = None
species_masses: ndarray = None
total_net_charge: float = 0.0
total_num_density: float = 0.0
total_num_ptcls: float = 0.0
type: str = "yukawa"
def __copy__(self):
"""
Make a shallow copy of the object using copy by creating a new instance of the object and copying its __dict__.
"""
# Create a new object
_copy = type(self)()
# copy the dictionary
_copy.from_dict(input_dict=self.__dict__)
return _copy
def __deepcopy__(self, memodict={}):
"""
Make a deepcopy of the object.
Parameters
----------
memodict: dict
Dictionary of id's to copies
Returns
-------
_copy: :class:`sarkas.potentials.core.Potential`
A new Potential class.
"""
id_self = id(self) # memorization avoids unnecessary recursion
_copy = memodict.get(id_self)
if _copy is None:
_copy = type(self)()
# Make a deepcopy of the mutable arrays using numpy copy function
for k, v in self.__dict__.items():
_copy.__dict__[k] = deepcopy(v, memodict)
return _copy
def __repr__(self):
sortedDict = dict(sorted(self.__dict__.items(), key=lambda x: x[0].lower()))
disp = "Potential( \n"
for key, value in sortedDict.items():
disp += "\t{} : {}\n".format(key, value)
disp += ")"
return disp
@staticmethod
def calc_electron_properties(params):
"""Calculate electronic parameters.
See Electron Properties webpage in documentation website.
Parameters
----------
params : :class:`sarkas.core.Parameters`
Simulation's parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release. \n"
"Use parameters.calc_electron_properties(species). You need to pass the species list.",
category=DeprecationWarning,
)
twopi = 2.0 * pi
spin_degeneracy = 2.0 # g in the notes
# Inverse temperature for convenience
beta_e = 1.0 / (params.kB * params.electron_temperature)
# Plasma frequency
params.electron_plasma_frequency = sqrt(
4.0 * pi * params.qe**2 * params.electron_number_density / (params.fourpie0 * params.me)
)
params.electron_debye_length = sqrt(
params.fourpie0 / (4.0 * pi * params.qe**2 * params.electron_number_density * beta_e)
)
# de Broglie wavelength
params.electron_deBroglie_wavelength = sqrt(twopi * params.hbar2 * beta_e / params.me)
lambda3 = params.electron_deBroglie_wavelength**3
# Landau length 4pi e^2 beta. The division by fourpie0 is needed for MKS units
params.electron_landau_length = 4.0 * pi * params.qe**2 * beta_e / params.fourpie0
# chemical potential of electron gas/(kB T), obtained by inverting the density equation.
params.electron_dimensionless_chemical_potential = invfd1h(
lambda3 * sqrt(pi) * params.electron_number_density / 4.0
)
# Thomas-Fermi length obtained from compressibility. See eq.(10) in Ref. [3]_
lambda_TF_sq = lambda3 / params.electron_landau_length
lambda_TF_sq /= spin_degeneracy / sqrt(pi) * fdm1h(params.electron_dimensionless_chemical_potential)
params.electron_TF_wavelength = sqrt(lambda_TF_sq)
# Electron WS radius
params.electron_WS_radius = (3.0 / (4.0 * pi * params.electron_number_density)) ** (1.0 / 3.0)
# Brueckner parameters
params.electron_rs = params.electron_WS_radius / params.a0
# Fermi wave number
params.electron_Fermi_wavenumber = (3.0 * pi**2 * params.electron_number_density) ** (1.0 / 3.0)
# Fermi energy
params.electron_Fermi_energy = params.hbar2 * params.electron_Fermi_wavenumber**2 / (2.0 * params.me)
# Other electron parameters
params.electron_degeneracy_parameter = params.kB * params.electron_temperature / params.electron_Fermi_energy
params.electron_relativistic_parameter = params.hbar * params.electron_Fermi_wavenumber / (params.me * params.c0)
# Eq. 1 in Murillo Phys Rev E 81 036403 (2010)
params.electron_coupling = params.qe**2 / (
params.fourpie0
* params.electron_Fermi_energy
* params.electron_WS_radius
* sqrt(1 + params.electron_degeneracy_parameter**2)
)
# Warm Dense Matter Parameter, Eq.3 in Murillo Phys Rev E 81 036403 (2010)
params.wdm_parameter = 2.0 / (params.electron_degeneracy_parameter + 1.0 / params.electron_degeneracy_parameter)
params.wdm_parameter *= 2.0 / (params.electron_coupling + 1.0 / params.electron_coupling)
if params.magnetized:
b_mag = sqrt((params.magnetic_field**2).sum()) # magnitude of B
if params.units == "cgs":
params.electron_cyclotron_frequency = params.qe * b_mag / params.c0 / params.me
else:
params.electron_cyclotron_frequency = params.qe * b_mag / params.me
params.electron_magnetic_energy = params.hbar * params.electron_cyclotron_frequency
tan_arg = 0.5 * params.hbar * params.electron_cyclotron_frequency * beta_e
# Perpendicular correction
params.horing_perp_correction = (params.electron_plasma_frequency / params.electron_cyclotron_frequency) ** 2
params.horing_perp_correction *= 1.0 - tan_arg / tanh(tan_arg)
params.horing_perp_correction += 1
# Parallel correction
params.horing_par_correction = 1 - (params.hbar * beta_e * params.electron_plasma_frequency) ** 2 / 12.0
# Quantum Anisotropy Parameter
params.horing_delta = params.horing_perp_correction - 1
params.horing_delta += (params.hbar * beta_e * params.electron_cyclotron_frequency) ** 2 / 12
params.horing_delta /= params.horing_par_correction
def calc_screening_length(self, species):
# Consistency
self.screening_length_type = self.screening_length_type.lower()
if self.screening_length_type in ["thomas-fermi", "tf"]:
# Check electron properties
if hasattr(self, "electron_temperature_eV"):
self.electron_temperature = self.eV2K * self.electron_temperature_eV
else:
self.electron_temperature = species[-1].temperature
self.screening_length = species[-1].ThomasFermi_wavelength
elif self.screening_length_type in ["debye", "debye-huckel", "dh"]:
self.screening_length = species[-1].debye_length
elif self.screening_length_type in ["kappa", "from_kappa"]:
self.screening_length = self.a_ws / self.kappa
elif self.screening_length_type in ["custom"]:
if self.screening_length is None:
raise AttributeError("potential.screening_length not defined!")
if not self.screening_length and not self.kappa:
warn("You have not defined the screening_length nor kappa. I will use the Thomas-Fermi length")
self.screening_length_type = "thomas-fermi"
self.screening_length = species[-1].ThomasFermi_wavelength
def copy_params(self, params):
"""
Copy necessary parameters.
Parameters
----------
params: :class:`sarkas.core.Parameters`
Simulation's parameters.
"""
self.measure = params.measure
self.units = params.units
self.dimensions = params.dimensions
# Copy needed parameters
self.box_lengths = params.box_lengths.copy()
self.pbox_lengths = params.pbox_lengths.copy()
self.box_volume = params.box_volume
self.pbox_volume = params.pbox_volume
# Needed physical constants
self.fourpie0 = params.fourpie0
self.a_ws = params.a_ws
self.kB = params.kB
self.eV2K = params.eV2K
self.eV2J = params.eV2J
self.hbar = params.hbar
self.QFactor = params.QFactor
self.T_desired = params.T_desired
self.coupling_constant = params.coupling_constant
self.total_num_ptcls = params.total_num_ptcls
self.total_net_charge = params.total_net_charge
self.total_num_density = params.total_num_density
self.num_species = params.num_species
self.species_charges = params.species_charges.copy()
self.species_masses = params.species_masses.copy()
if self.type == "lj":
self.species_lj_sigmas = params.species_lj_sigmas.copy()
def from_dict(self, input_dict: dict) -> None:
"""
Update attributes from input dictionary.
Parameters
----------
input_dict: dict
Dictionary to be copied.
"""
self.__dict__.update(input_dict)
def method_pretty_print(self):
"""Print algorithm information."""
print("\nALGORITHM: ", self.method)
# PP section
if self.method != "fmm":
print(f"rcut = {self.rc / self.a_ws:.4f} a_ws = {self.rc:.6e} ", end="")
print("[cm]" if self.units == "cgs" else "[m]")
pp_cells = (self.box_lengths / self.rc).astype(int)
print(f"No. of PP cells per dimension = {pp_cells}")
ptcls_in_loop = int(self.total_num_density * (self.dimensions * self.rc) ** self.dimensions)
print(f"No. of particles in PP loop = {ptcls_in_loop}")
dim_const = (self.dimensions + 1) / 3.0 * pi
pp_neighbors = int(self.total_num_density * dim_const * self.rc**self.dimensions)
print(f"No. of PP neighbors per particle = {pp_neighbors}")
if self.method == "pppm":
# PM Section
print(f"Charge assignment orders: {self.pppm_cao}")
print(f"FFT aliases: {self.pppm_aliases}")
print(f"Mesh: {self.pppm_mesh}")
print(
f"Ewald parameter alpha = {self.pppm_alpha_ewald * self.a_ws:.4f} / a_ws = {self.pppm_alpha_ewald:.6e} ",
end="",
)
print("[1/cm]" if self.units == "cgs" else "[1/m]")
h_a = self.pppm_h_array / self.a_ws
print(f"Mesh width = {h_a[0]:.4f}, {h_a[1]:.4f}, {h_a[2]:.4f} a_ws")
print(
f" = {self.pppm_h_array[0]:.4e}, {self.pppm_h_array[1]:.4e}, {self.pppm_h_array[2]:.4e} ",
end="",
)
print("[cm]" if self.units == "cgs" else "[m]")
halpha = self.pppm_h_array * self.pppm_alpha_ewald
inv_halpha = (1.0 / halpha).astype(int)
print(f"Mesh size * Ewald_parameter (h * alpha) = {halpha[0]:.4f}, {halpha[1]:.4f}, {halpha[2]:.4f} ")
print(f" ~ 1/{inv_halpha[0]}, 1/{inv_halpha[1]}, 1/{inv_halpha[2]}")
print(f"PP Force Error = {self.pppm_pp_err:.6e}")
print(f"PM Force Error = {self.pppm_pm_err:.6e}")
print(f"Tot Force Error = {self.force_error:.6e}")
def method_setup(self):
"""Setup algorithm's specific parameters."""
# Check for cutoff radius
if not self.method == "fmm":
self.linked_list_on = True # linked list on
mask = self.box_lengths > 0.0
min_length = self.box_lengths[mask].min()
if not self.rc:
warn(
f"\nThe cut-off radius is not defined. I will use the brute force method.",
category=AlgorithmWarning,
)
self.rc = min_length / 2.0
self.linked_list_on = False # linked list off
if self.rc > min_length / 2.0:
warn(
f"\nThe cut-off radius is larger than half of the minimum box length. "
f"I will use the brute force method.",
# f"L_min/ 2 = {0.5 * min_length:.4e} will be used as rc",
category=AlgorithmWarning,
)
self.rc = min_length / 2.0
self.linked_list_on = False # linked list off
if self.a_rs != 0.0:
warn("\nShort-range cut-off enabled. Use this feature with care!", category=AlgorithmWarning)
# renaming
if self.method == "p3m":
self.method == "pppm"
# Compute pppm parameters
if self.method == "pppm":
self.pppm_on = True
self.pppm_setup()
else:
self.linked_list_on = False
self.pppm_on = False
if self.type == "coulomb":
self.force_error = self.fmm_precision
else:
self.force_error = self.fmm_precision
def pppm_setup(self):
"""Calculate the pppm parameters."""
# Change lists to numpy arrays for Numba compatibility
if isinstance(self.pppm_mesh, list):
self.pppm_mesh = array(self.pppm_mesh, dtype=int)
elif not isinstance(self.pppm_mesh, ndarray):
raise TypeError(f"pppm_mesh is a {type(self.pppm_mesh)}. Please pass a list or numpy array.")
# Mesh array should be 3 even in 2D
if not len(self.pppm_mesh) == 3:
raise AlgorithmWarning(
f"len(potential.pppm_mesh) = {len(self.pppm_mesh)}.\n"
f"The PPPM mesh array should be of length 3 even in non 3D simulations."
)
if isinstance(self.pppm_aliases, list):
self.pppm_aliases = array(self.pppm_aliases, dtype=int)
elif not isinstance(self.pppm_aliases, ndarray):
raise TypeError(f"pppm_aliases is a {type(self.pppm_aliases)}. Please pass a list or numpy array.")
# In case you pass one number and not a list
if isinstance(self.pppm_cao, int):
caos = array([1, 1, 1], dtype=int) * self.pppm_cao
self.pppm_cao = caos.copy()
elif isinstance(self.pppm_cao, list):
self.pppm_cao = array(self.pppm_cao, dtype=int)
elif not isinstance(self.pppm_cao, ndarray):
raise TypeError(f"pppm_cao is a {type(self.pppm_cao)}. Please pass a list or numpy array.")
if self.pppm_cao.max() > 7:
raise AttributeError("\nYou have chosen a charge assignment order bigger than 7. Please choose a value <= 7")
# pppm parameters
self.pppm_h_array = self.box_lengths / self.pppm_mesh
# To avoid division by zero
mask = self.pppm_h_array == 0.0
self.pppm_h_array[mask] = 1.0
self.pppm_h_volume = self.pppm_h_array.prod()
# To avoid unnecessary loops
self.pppm_aliases[mask] = 0
# Pack constants together for brevity in input list
kappa = 1.0 / self.screening_length if self.type == "yukawa" else 0.0
constants = array([kappa, self.pppm_alpha_ewald, self.fourpie0])
# Calculate the Optimized Green's Function
self.pppm_green_function, self.pppm_kx, self.pppm_ky, self.pppm_kz, self.pppm_pm_err = gf_opt(
self.box_lengths, self.pppm_h_array, self.pppm_mesh, self.pppm_aliases, self.pppm_cao, constants
)
# Complete PM Force error calculation
self.pppm_pm_err *= sqrt(self.total_num_ptcls) * self.a_ws**2 * self.fourpie0
self.pppm_pm_err /= self.box_volume ** (2.0 / 3.0)
# Total Force Error
self.force_error = sqrt(self.pppm_pm_err**2 + self.pppm_pp_err**2)
def pretty_print(self):
"""Print potential information in a user-friendly way."""
print("\nPOTENTIAL: ", self.type)
self.pot_pretty_print(potential=self)
self.method_pretty_print()
def setup(self, params, species) -> None:
"""Setup the potential class.
Parameters
----------
params : :class:`sarkas.core.Parameters`
Simulation's parameters.
"""
# Enforce consistency
self.type = self.type.lower()
self.method = self.method.lower()
self.copy_params(params)
self.type_setup(species)
self.method_setup()
def type_setup(self, species):
# Update potential-specific parameters
# Coulomb potential
if self.type == "coulomb":
if self.method == "pp":
warn("Use the PP method with care for pure Coulomb interactions.", category=AlgorithmWarning)
from .coulomb import pretty_print_info, update_params
self.pot_update_params = update_params
update_params(self)
elif self.type == "yukawa":
# Yukawa potential
from .yukawa import pretty_print_info, update_params
self.calc_screening_length(species)
self.pot_update_params = update_params
update_params(self)
elif self.type == "egs":
# exact gradient-corrected screening (EGS) potential
from .egs import pretty_print_info, update_params
self.calc_screening_length(species)
self.pot_update_params = update_params
update_params(self)
elif self.type == "lj":
# Lennard-Jones potential
from .lennardjones import pretty_print_info, update_params
self.pot_update_params = update_params
update_params(self)
elif self.type == "moliere":
# Moliere potential
from .moliere import pretty_print_info, update_params
self.pot_update_params = update_params
update_params(self)
elif self.type == "qsp":
# QSP potential
from .qsp import pretty_print_info, update_params
self.pot_update_params = update_params
update_params(self, species)
elif self.type == "hs_yukawa":
# Hard-Sphere Yukawa
from .hs_yukawa import update_params
self.calc_screening_length(species)
self.pot_update_params = update_params
update_params(self)
self.pot_pretty_print = pretty_print_info
def update_linked_list(self, ptcls):
"""
Calculate the pp part of the acceleration.
Parameters
----------
ptcls : :class:`sarkas.particles.Particles`
Particles data.
"""
ptcls.potential_energy, ptcls.acc, ptcls.virial = pp_update(
ptcls.pos,
ptcls.id,
ptcls.masses,
self.box_lengths,
self.rc,
self.matrix,
self.force,
self.measure,
ptcls.rdf_hist,
)
if self.type != "lj":
# Mie Energy of charged systems
# J-M.Caillol, J Chem Phys 101 6080(1994) https: // doi.org / 10.1063 / 1.468422
dipole = ptcls.charges @ ptcls.pos
ptcls.potential_energy += 2.0 * pi * (dipole**2).sum() / (3.0 * self.box_volume * self.fourpie0)
def update_brute(self, ptcls):
"""
Calculate particles' acceleration and potential brutally.
Parameters
----------
ptcls: :class:`sarkas.particles.Particles`
Particles data.
"""
ptcls.potential_energy, ptcls.acc, ptcls.virial = pp_update_0D(
ptcls.pos,
ptcls.id,
ptcls.masses,
self.box_lengths,
self.rc,
self.matrix,
self.force,
self.measure,
ptcls.rdf_hist,
)
if self.type != "lj":
# Mie Energy of charged systems
# J-M.Caillol, J Chem Phys 101 6080(1994) https: // doi.org / 10.1063 / 1.468422
dipole = ptcls.charges @ ptcls.pos
ptcls.potential_energy += 2.0 * pi * (dipole**2).sum() / (3.0 * self.box_volume * self.fourpie0)
def update_pm(self, ptcls):
"""Calculate the pm part of the potential and acceleration.
Parameters
----------
ptcls : :class:`sarkas.particles.Particles`
Particles' data
"""
U_long, acc_l_r = pm_update(
ptcls.pos,
ptcls.charges,
ptcls.masses,
self.pppm_mesh,
self.pppm_h_array,
self.pppm_h_volume,
self.box_volume,
self.pppm_green_function,
self.pppm_kx,
self.pppm_ky,
self.pppm_kz,
self.pppm_cao,
)
# Ewald Self-energy
U_long += self.QFactor * self.pppm_alpha_ewald / sqrt(pi)
# Neutrality condition
U_long += -pi * self.total_net_charge**2.0 / (2.0 * self.box_volume * self.pppm_alpha_ewald**2)
ptcls.potential_energy += U_long
ptcls.acc += acc_l_r
def update_pppm(self, ptcls):
"""Calculate particles' potential and accelerations using pppm method.
Parameters
----------
ptcls : :class:`sarkas.particles.Particles`
Particles' data.
"""
self.update_linked_list(ptcls)
self.update_pm(ptcls)
def update_fmm_coulomb(self, ptcls):
"""Calculate particles' potential and accelerations using FMM method.
Parameters
----------
ptcls : sarkas.core.Particles
Particles' data
"""
out_fmm = lfmm3d(eps=self.fmm_precision, sources=ptcls.pos.transpose(), charges=ptcls.charges, pg=2)
potential_energy = ptcls.charges @ out_fmm.pot.real / self.fourpie0
acc = -(ptcls.charges * out_fmm.grad.real / ptcls.masses) / self.fourpie0
ptcls.acc = acc.transpose()
return potential_energy
def update_fmm_yukawa(self, ptcls):
"""Calculate particles' potential and accelerations using FMM method.
Parameters
----------
ptcls : sarkas.core.Particles
Particles' data
"""
out_fmm = hfmm3d(
eps=self.fmm_precision,
zk=1j / self.screening_length,
sources=ptcls.pos.transpose(),
charges=ptcls.charges,
pg=2,
)
potential_energy = ptcls.charges @ out_fmm.pot.real / self.fourpie0
acc = -(ptcls.charges * out_fmm.grad.real / ptcls.masses) / self.fourpie0
ptcls.acc = acc.transpose()
return potential_energy
| 36.260083
| 121
| 0.603199
| 3,204
| 26,071
| 4.720974
| 0.150125
| 0.035965
| 0.018842
| 0.022213
| 0.372736
| 0.290824
| 0.255322
| 0.221671
| 0.193045
| 0.16032
| 0
| 0.018169
| 0.296997
| 26,071
| 718
| 122
| 36.310585
| 0.807126
| 0.21825
| 0
| 0.21466
| 0
| 0.013089
| 0.108622
| 0.020964
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049738
| false
| 0.010471
| 0.044503
| 0
| 0.191099
| 0.081152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
278e170fcb4a1f505f51883094b18a872922bd6e
| 2,459
|
py
|
Python
|
scripts/create_grids.py
|
edwardoughton/taddle
|
f76ca6067e6fca6b699675ab038c31c9444e0a79
|
[
"MIT"
] | 9
|
2020-08-18T04:25:00.000Z
|
2022-03-18T16:42:33.000Z
|
scripts/create_grids.py
|
edwardoughton/arpu_predictor
|
f76ca6067e6fca6b699675ab038c31c9444e0a79
|
[
"MIT"
] | null | null | null |
scripts/create_grids.py
|
edwardoughton/arpu_predictor
|
f76ca6067e6fca6b699675ab038c31c9444e0a79
|
[
"MIT"
] | 4
|
2020-01-27T01:48:30.000Z
|
2021-12-01T16:48:17.000Z
|
"""
Create 10km x 10km grid using the country shapefile.
Written by Ed Oughton.
Winter 2020
"""
import argparse
import os
import configparser
import geopandas as gpd
from shapely.geometry import Polygon, mapping
import pandas as pd
import numpy as np
import rasterio
from rasterstats import zonal_stats
BASE_DIR = '.'
# repo imports
import sys
sys.path.append(BASE_DIR)
from config import VIS_CONFIG
COUNTRY_ABBRV = VIS_CONFIG['COUNTRY_ABBRV']
COUNTRIES_DIR = os.path.join(BASE_DIR, 'data', 'countries')
SHAPEFILE_DIR = os.path.join(COUNTRIES_DIR, COUNTRY_ABBRV, 'shapefile')
GRID_DIR = os.path.join(COUNTRIES_DIR, COUNTRY_ABBRV, 'grid')
def create_folders():
"""
Function to create new folder.
"""
os.makedirs(GRID_DIR, exist_ok=True)
def generate_grid(country):
"""
Generate a 10x10km spatial grid for the chosen country.
"""
filename = 'national_outline_{}.shp'.format(country)
country_outline = gpd.read_file(os.path.join(SHAPEFILE_DIR, filename))
country_outline.crs = "epsg:4326"
country_outline = country_outline.to_crs("epsg:3857")
xmin,ymin,xmax,ymax = country_outline.total_bounds
#10km sides, leading to 100km^2 area
length = 1e4
wide = 1e4
cols = list(range(int(np.floor(xmin)), int(np.ceil(xmax)), int(wide)))
rows = list(range(int(np.floor(ymin)), int(np.ceil(ymax)), int(length)))
rows.reverse()
polygons = []
for x in cols:
for y in rows:
polygons.append( Polygon([(x,y), (x+wide, y), (x+wide, y-length), (x, y-length)]))
grid = gpd.GeoDataFrame({'geometry': polygons})
intersection = gpd.overlay(grid, country_outline, how='intersection')
intersection.crs = "epsg:3857"
intersection = intersection.to_crs("epsg:4326")
final_grid = query_settlement_layer(intersection)
final_grid = final_grid[final_grid.geometry.notnull()]
final_grid.to_file(os.path.join(GRID_DIR, 'grid.shp'))
print('Completed grid generation process')
def query_settlement_layer(grid):
"""
Query the settlement layer to get an estimated population for each grid square.
"""
path = os.path.join(SHAPEFILE_DIR, f'{COUNTRY_ABBRV}.tif')
grid['population'] = pd.DataFrame(
zonal_stats(vectors=grid['geometry'], raster=path, stats='sum'))['sum']
grid = grid.replace([np.inf, -np.inf], np.nan)
return grid
if __name__ == '__main__':
create_folders()
generate_grid(COUNTRY_ABBRV)
| 26.44086
| 94
| 0.699471
| 341
| 2,459
| 4.876833
| 0.384164
| 0.043295
| 0.036079
| 0.023452
| 0.093806
| 0.044498
| 0.044498
| 0.044498
| 0
| 0
| 0
| 0.018701
| 0.173648
| 2,459
| 92
| 95
| 26.728261
| 0.799705
| 0.124034
| 0
| 0
| 0
| 0
| 0.100476
| 0.010952
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.215686
| 0
| 0.294118
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27907e0aff23ef4fbe2d4a38e27570505c4caa34
| 366
|
py
|
Python
|
test/test_all.py
|
beremaran/spdown
|
59e5ea6996be51ad015f9da6758e2ce556b9fb94
|
[
"MIT"
] | 2
|
2019-08-13T15:13:58.000Z
|
2019-10-04T09:09:24.000Z
|
test/test_all.py
|
beremaran/spdown
|
59e5ea6996be51ad015f9da6758e2ce556b9fb94
|
[
"MIT"
] | 4
|
2021-02-08T20:23:42.000Z
|
2022-03-11T23:27:07.000Z
|
test/test_all.py
|
beremaran/spdown
|
59e5ea6996be51ad015f9da6758e2ce556b9fb94
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import unittest
test_modules = [
'test.test_config',
'test.test_secrets',
'test.test_spotify',
'test.test_youtube'
]
if __name__ == "__main__":
suite = unittest.TestSuite()
for tm in test_modules:
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(tm))
unittest.TextTestRunner().run(test=suite)
| 19.263158
| 71
| 0.691257
| 42
| 366
| 5.690476
| 0.595238
| 0.133891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180328
| 366
| 18
| 72
| 20.333333
| 0.796667
| 0.054645
| 0
| 0
| 0
| 0
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2790d75b4b157f35c41640a672fd75216eb8137c
| 1,281
|
py
|
Python
|
tests/rec_util.py
|
pablohawz/tfg-Scan-Paint-clone
|
056cd50d9e4274620cf085a41ed9d326e16dd47b
|
[
"MIT"
] | null | null | null |
tests/rec_util.py
|
pablohawz/tfg-Scan-Paint-clone
|
056cd50d9e4274620cf085a41ed9d326e16dd47b
|
[
"MIT"
] | null | null | null |
tests/rec_util.py
|
pablohawz/tfg-Scan-Paint-clone
|
056cd50d9e4274620cf085a41ed9d326e16dd47b
|
[
"MIT"
] | null | null | null |
import os
import tempfile
from time import time
import numpy as np
import sounddevice as sd
from PySide2.QtWidgets import QApplication, QFileDialog
from scipy.io.wavfile import write
# Config
t = 3 # s
fs = 44100
def save(x, fs):
# You have to create a QApp in order to use a
# Widget (QFileDialg)
app = QApplication([])
fname, _ = QFileDialog.getSaveFileName(
None,
caption='Save audio to disk',
dir='C:/users/pablo/tfg',
filter='Audio Wav File (.wav)')
if fname == '':
return
if not fname.endswith('.wav'):
fname += '.wav'
write(fname, fs, x)
def main():
with tempfile.TemporaryDirectory() as dir:
# Rec
print('Rec!')
audio = sd.rec(frames=int(t*fs), samplerate=fs, channels=2)
sd.wait()
print('End!')
# Sum to mono
audio_mono = np.sum(audio, axis=1)
# Calculate dB
spl = 20 * np.log10(np.std(audio_mono) / 2e-5)
print(round(spl, 2))
path = os.path.join(dir, repr(time())+'.wav')
write(path, 44100, audio_mono)
r = input('Do you want to save it? [y]/n: ')
if r == '' or r == 'y':
save(audio_mono, fs)
print('Ciao')
if __name__ == '__main__':
main()
| 20.66129
| 67
| 0.565964
| 175
| 1,281
| 4.068571
| 0.542857
| 0.050562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023385
| 0.298985
| 1,281
| 61
| 68
| 21
| 0.769488
| 0.078845
| 0
| 0
| 0
| 0
| 0.103242
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.184211
| 0
| 0.263158
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2792236e3960ae778ac604767d58c8cfaef78404
| 10,977
|
py
|
Python
|
test/comptests/TestHybridQuasiGaussian.py
|
sschlenkrich/HybridMonteCarlo
|
72f54aa4bcd742430462b27b72d70369c01f9ac4
|
[
"MIT"
] | 3
|
2021-08-18T18:34:41.000Z
|
2021-12-24T07:05:19.000Z
|
test/comptests/TestHybridQuasiGaussian.py
|
sschlenkrich/HybridMonteCarlo
|
72f54aa4bcd742430462b27b72d70369c01f9ac4
|
[
"MIT"
] | null | null | null |
test/comptests/TestHybridQuasiGaussian.py
|
sschlenkrich/HybridMonteCarlo
|
72f54aa4bcd742430462b27b72d70369c01f9ac4
|
[
"MIT"
] | 3
|
2021-01-31T11:41:19.000Z
|
2022-03-25T19:51:20.000Z
|
#!/usr/bin/python
import sys
sys.path.append('./')
import unittest
import copy
import numpy as np
from hybmc.mathutils.Helpers import BlackImpliedVol, BlackVega
from hybmc.termstructures.YieldCurve import YieldCurve
from hybmc.models.AssetModel import AssetModel
from hybmc.models.HybridModel import HybridModel
from hybmc.models.HullWhiteModel import HullWhiteModel
from hybmc.models.QuasiGaussianModel import QuasiGaussianModel
from hybmc.simulations.McSimulation import McSimulation
from hybmc.simulations.Payoffs import Fixed, Pay, Asset, LiborRate, Max
import matplotlib.pyplot as plt
# a quick way to get a model
def HWModel(rate=0.01, vol=0.0050, mean=0.03):
curve = YieldCurve(rate)
times = np.array([ 10.0 ])
vols = np.array([ vol ])
return HullWhiteModel(curve, mean, times, vols)
def fwd(mcSim,p):
samples = np.array([
p.discountedAt(mcSim.path(k)) for k in range(mcSim.nPaths) ])
fwd = np.average(samples) / \
mcSim.model.domRatesModel.yieldCurve.discount(p.obsTime)
err = np.std(samples) / np.sqrt(samples.shape[0]) / \
mcSim.model.domRatesModel.yieldCurve.discount(p.obsTime)
return fwd, err
class TestHybridQuasiGaussian(unittest.TestCase):
# set up the stage for testing the models
def setUp(self):
### full smile/skew model
# domestic rates
domAlias = 'EUR'
eurCurve = YieldCurve(0.03)
d = 2
times = np.array([ 10.0 ])
sigma = np.array([ [ 0.0060 ],
[ 0.0040 ] ])
slope = np.array([ [ 0.10 ],
[ 0.15 ] ])
curve = np.array([ [ 0.05 ],
[ 0.10 ] ])
delta = np.array([ 1.0, 10.0 ])
chi = np.array([ 0.01, 0.15 ])
Gamma = np.array([ [1.0, 0.6],
[0.6, 1.0] ])
eurRatesModel = QuasiGaussianModel(eurCurve,d,times,sigma,slope,curve,delta,chi,Gamma)
# assets
forAliases = [ 'USD', 'GBP' ]
spotS0 = [ 1.0, 2.0 ]
spotVol = [ 0.3, 0.2 ]
forAssetModels = [
AssetModel(S0, vol) for S0, vol in zip(spotS0,spotVol) ]
# USD rates
usdCurve = YieldCurve(0.02)
d = 3
times = np.array([ 10.0 ])
sigma = np.array([ [ 0.0060 ],
[ 0.0050 ],
[ 0.0040 ] ])
slope = np.array([ [ 0.10 ],
[ 0.20 ],
[ 0.30 ] ])
curve = np.array([ [ 0.05 ],
[ 0.10 ],
[ 0.20 ] ])
delta = np.array([ 1.0, 5.0, 20.0 ])
chi = np.array([ 0.01, 0.05, 0.15 ])
Gamma = np.array([ [1.0, 0.8, 0.6],
[0.8, 1.0, 0.8],
[0.6, 0.8, 1.0] ])
usdRatesModel = QuasiGaussianModel(usdCurve,d,times,sigma,slope,curve,delta,chi,Gamma)
#
gbpRatesModel = HWModel()
#
# 'EUR_x_0', 'EUR_x_1', 'USD_logS', 'USD_x_0', 'USD_x_1', 'USD_x_2', 'GBP_logS', 'GBP_x'
corr = np.array([
[ 1.0, 0.0, 0.5, -0.5, 0.0, 0.0, -0.5, -0.5 ], # EUR_x_0
[ 0.0, 1.0, 0.0, 0.0, -0.5, 0.0, -0.5, 0.0 ], # EUR_x_1
[ 0.5, 0.0, 1.0, -0.5, -0.5, -0.5, 0.0, 0.0 ], # USD_logS
[ -0.5, 0.0, -0.5, 1.0, 0.0, 0.0, 0.0, 0.0 ], # USD_x_0
[ 0.0, -0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 0.0 ], # USD_x_1
[ 0.0, 0.0, -0.5, 0.0, 0.0, 1.0, 0.0, 0.0 ], # USD_x_2
[ -0.5, -0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.5 ], # GBP_logS
[ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 1.0 ], # GBP_x
])
#
# corr = np.identity(2 + 1 + 3 + 1 + 1 ) # overwrite
#
self.model = HybridModel(domAlias,eurRatesModel,forAliases,forAssetModels,[usdRatesModel,gbpRatesModel],corr)
### Gaussian model
# domestic rates
domAlias = 'EUR'
eurCurve = YieldCurve(0.03)
d = 2
times = np.array([ 10.0 ])
sigma = np.array([ [ 0.0060 ],
[ 0.0040 ] ])
slope = np.array([ [ 0.00 ],
[ 0.00 ] ])
curve = np.array([ [ 0.00 ],
[ 0.00 ] ])
delta = np.array([ 1.0, 10.0 ])
chi = np.array([ 0.01, 0.15 ])
Gamma = np.array([ [1.0, 0.6],
[0.6, 1.0] ])
eurRatesModel = QuasiGaussianModel(eurCurve,d,times,sigma,slope,curve,delta,chi,Gamma)
# assets
forAliases = [ 'USD', 'GBP' ]
spotS0 = [ 1.0, 2.0 ]
spotVol = [ 0.3, 0.2 ]
forAssetModels = [
AssetModel(S0, vol) for S0, vol in zip(spotS0,spotVol) ]
# USD rates
usdCurve = YieldCurve(0.02)
d = 3
times = np.array([ 10.0 ])
sigma = np.array([ [ 0.0060 ],
[ 0.0050 ],
[ 0.0040 ] ])
slope = np.array([ [ 0.10 ],
[ 0.20 ],
[ 0.30 ] ])
curve = np.array([ [ 0.05 ],
[ 0.10 ],
[ 0.20 ] ])
delta = np.array([ 1.0, 5.0, 20.0 ])
chi = np.array([ 0.01, 0.05, 0.15 ])
Gamma = np.array([ [1.0, 0.8, 0.6],
[0.8, 1.0, 0.8],
[0.6, 0.8, 1.0] ])
self.gaussianModel = HybridModel(domAlias,eurRatesModel,forAliases,forAssetModels,[usdRatesModel,gbpRatesModel],corr)
def test_ModelSetup(self):
self.assertListEqual(self.model.stateAliases(),
['EUR_x_0', 'EUR_x_1',
'EUR_y_0_0', 'EUR_y_0_1',
'EUR_y_1_0', 'EUR_y_1_1',
'EUR_s',
'USD_logS', 'USD_x_0', 'USD_x_1', 'USD_x_2',
'USD_y_0_0', 'USD_y_0_1', 'USD_y_0_2',
'USD_y_1_0', 'USD_y_1_1', 'USD_y_1_2',
'USD_y_2_0', 'USD_y_2_1', 'USD_y_2_2',
'USD_s',
'GBP_logS', 'GBP_x', 'GBP_s'])
self.assertListEqual(self.model.factorAliases(),
['EUR_x_0', 'EUR_x_1',
'USD_logS', 'USD_x_0', 'USD_x_1', 'USD_x_2',
'GBP_logS', 'GBP_x'])
# @unittest.skip('Too time consuming')
def test_HybridSimulation(self):
times = np.concatenate([ np.linspace(0.0, 10.0, 11), [10.5] ])
nPaths = 2**13
seed = 314159265359
# risk-neutral simulation
print('')
mcSim = McSimulation(self.model,times,nPaths,seed,False)
#
T = 10.0
P = Pay(Fixed(1.0),T)
fw, err = fwd(mcSim,P)
# domestic numeraire
print('1.0 @ %4.1lfy %8.6lf - mc_err = %8.6lf' % (T,fw,err))
# foreign assets
for k, alias in enumerate(self.model.forAliases):
p = Asset(T,alias)
xT = self.model.forAssetModels[k].X0 * \
self.model.forRatesModels[k].yieldCurve.discount(T) / \
self.model.domRatesModel.yieldCurve.discount(T)
fw, err = fwd(mcSim,p)
print(alias + ' @ %4.1lfy %8.6lf vs %8.6lf (curve) - mc_err = %8.6lf' % (T,fw,xT,err))
# domestic Libor rate
Tstart = 10.0
Tend = 10.5
L = Pay(LiborRate(T,Tstart,Tend,alias='EUR'),Tend)
fw, err = fwd(mcSim,L)
Lref = (mcSim.model.domRatesModel.yieldCurve.discount(Tstart) / \
mcSim.model.domRatesModel.yieldCurve.discount(Tend) - 1) / \
(Tend - Tstart)
print('L_EUR @ %4.1lfy %8.6lf vs %8.6lf (curve) - mc_err = %8.6lf' % (T,fw,Lref,err))
# foreign Lbor rates
for k, alias in enumerate(self.model.forAliases):
L = Pay(LiborRate(T,Tstart,Tend,alias=alias)*Asset(Tend,alias),Tend)
fw, err = fwd(mcSim,L)
fw *= mcSim.model.domRatesModel.yieldCurve.discount(Tend) / \
mcSim.model.forRatesModels[k].yieldCurve.discount(Tend) / \
mcSim.model.forAssetModels[k].X0
err *= mcSim.model.domRatesModel.yieldCurve.discount(Tend) / \
mcSim.model.forRatesModels[k].yieldCurve.discount(Tend) / \
mcSim.model.forAssetModels[k].X0
Lref = (mcSim.model.forRatesModels[k].yieldCurve.discount(Tstart) / \
mcSim.model.forRatesModels[k].yieldCurve.discount(Tend) - 1) / \
(Tend - Tstart)
print('L_%s @ %4.1lfy %8.6lf vs %8.6lf (curve) - mc_err = %8.6lf' % (alias,T,fw,Lref,err))
def test_HybridVolAdjusterCalculation(self):
model = copy.deepcopy(self.model)
# model = copy.deepcopy(self.gaussianModel)
hybVolAdjTimes = np.linspace(0.0, 20.0, 21)
model.recalculateHybridVolAdjuster(hybVolAdjTimes)
plt.plot(model.hybAdjTimes,model.hybVolAdj[0], 'r*', label='USD')
plt.plot(model.hybAdjTimes,model.hybVolAdj[1], 'b*', label='GBP')
plt.legend()
#
times = np.linspace(0.0,20.0,101)
plt.plot(times,[ model.hybridVolAdjuster(0,t) for t in times ] , 'r-')
plt.plot(times,[ model.hybridVolAdjuster(1,t) for t in times ] , 'b-')
plt.show()
#
# return
times = np.linspace(0.0, 10.0, 11)
nPaths = 2**13
seed = 314159265359
# risk-neutral simulation
print('')
mcSim = McSimulation(model,times,nPaths,seed,False)
#
T = 10.0
for k, alias in enumerate(model.forAliases):
# ATM forward
xT = model.forAssetModels[k].X0 * \
model.forRatesModels[k].yieldCurve.discount(T) / \
model.domRatesModel.yieldCurve.discount(T)
K = Fixed(xT)
Z = Fixed(0.0)
C = Pay(Max(Asset(T,alias)-K,Z),T)
fw, err = fwd(mcSim,C)
vol = BlackImpliedVol(fw,xT,xT,T,1.0)
vega = BlackVega(xT,xT,vol,T)
err /= vega
volRef = model.forAssetModels[k].sigma
print('C_%s @ %4.1lfy %8.6lf vs %8.6lf (curve) - mc_err = %8.6lf' % (alias,T,vol,volRef,err))
P = Pay(Max(K-Asset(T,alias),Z),T)
fw, err = fwd(mcSim,P)
vol = BlackImpliedVol(fw,xT,xT,T,-1.0)
vega = BlackVega(xT,xT,vol,T)
err /= vega
volRef = model.forAssetModels[k].sigma
print('P_%s @ %4.1lfy %8.6lf vs %8.6lf (curve) - mc_err = %8.6lf' % (alias,T,vol,volRef,err))
if __name__ == '__main__':
unittest.main()
| 41.579545
| 125
| 0.484376
| 1,402
| 10,977
| 3.710414
| 0.142653
| 0.032295
| 0.029412
| 0.027682
| 0.642637
| 0.589965
| 0.511726
| 0.463476
| 0.380431
| 0.365436
| 0
| 0.086606
| 0.366767
| 10,977
| 263
| 126
| 41.737643
| 0.661775
| 0.05548
| 0
| 0.490476
| 0
| 0.02381
| 0.059376
| 0
| 0
| 0
| 0
| 0
| 0.009524
| 1
| 0.028571
| false
| 0
| 0.061905
| 0
| 0.104762
| 0.038095
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
279582d504d9da0d858f00c0d357db9ba41aecb7
| 3,387
|
py
|
Python
|
champ_bringup/scripts/joint_calibrator_relay.py
|
lubitz99/champ
|
2e4c8606db9a365866726ea84e8107c14ee9446d
|
[
"BSD-3-Clause"
] | 923
|
2020-04-06T15:09:24.000Z
|
2022-03-30T15:34:08.000Z
|
champ_bringup/scripts/joint_calibrator_relay.py
|
lubitz99/champ
|
2e4c8606db9a365866726ea84e8107c14ee9446d
|
[
"BSD-3-Clause"
] | 73
|
2020-05-12T09:23:12.000Z
|
2022-03-28T06:22:16.000Z
|
champ_bringup/scripts/joint_calibrator_relay.py
|
lubitz99/champ
|
2e4c8606db9a365866726ea84e8107c14ee9446d
|
[
"BSD-3-Clause"
] | 229
|
2020-04-26T06:32:28.000Z
|
2022-03-29T08:07:28.000Z
|
#!/usr/bin/env python
'''
Copyright (c) 2019-2020, Juan Miguel Jimeno
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import rospy
from champ_msgs.msg import Joints
from sensor_msgs.msg import JointState
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
import rosparam
import os, sys
class JointsCalibratorRelay:
def __init__(self):
rospy.Subscriber("joints_calibrator", JointState, self.joints_cmd_callback)
joint_controller_topic = rospy.get_param('champ_controller/joint_controller_topic')
self.joint_minimal_pub = rospy.Publisher('cmd_joints', Joints, queue_size = 100)
self.joint_trajectory_pub = rospy.Publisher(joint_controller_topic, JointTrajectory, queue_size = 100)
joints_map = [None,None,None,None]
joints_map[3] = rospy.get_param('/joints_map/left_front')
joints_map[2] = rospy.get_param('/joints_map/right_front')
joints_map[1] = rospy.get_param('/joints_map/left_hind')
joints_map[0] = rospy.get_param('/joints_map/right_hind')
self.joint_names = []
for leg in reversed(joints_map):
for joint in leg:
self.joint_names.append(joint)
def joints_cmd_callback(self, joints):
joint_minimal_msg = Joints()
for i in range(12):
joint_minimal_msg.position.append(joints.position[i])
self.joint_minimal_pub.publish(joint_minimal_msg)
joint_trajectory_msg = JointTrajectory()
joint_trajectory_msg.joint_names = self.joint_names
point = JointTrajectoryPoint()
point.time_from_start = rospy.Duration(1.0 / 60.0)
point.positions = joint_minimal_msg.position
joint_trajectory_msg.points.append(point)
self.joint_trajectory_pub.publish(joint_trajectory_msg)
if __name__ == "__main__":
rospy.init_node('joints_calibrator_relay', anonymous=True)
j = JointsCalibratorRelay()
rospy.spin()
| 45.16
| 110
| 0.749631
| 457
| 3,387
| 5.376368
| 0.426696
| 0.03663
| 0.026455
| 0.030932
| 0.11803
| 0.098494
| 0.055352
| 0.055352
| 0.055352
| 0.055352
| 0
| 0.009058
| 0.18512
| 3,387
| 75
| 111
| 45.16
| 0.881159
| 0.455861
| 0
| 0
| 0
| 0
| 0.100763
| 0.081699
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.162162
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
279641443118aebc70b220bf9dae1dc53a9d2fc4
| 3,909
|
py
|
Python
|
touchdown/aws/vpc/vpc.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 14
|
2015-01-05T18:18:04.000Z
|
2022-02-07T19:35:12.000Z
|
touchdown/aws/vpc/vpc.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 106
|
2015-01-06T00:17:13.000Z
|
2019-09-07T00:35:32.000Z
|
touchdown/aws/vpc/vpc.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 5
|
2015-01-30T10:18:24.000Z
|
2022-02-07T19:35:13.000Z
|
# Copyright 2014 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument, serializers
from touchdown.core.plan import Plan
from touchdown.core.resource import Resource
from ..account import BaseAccount
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy, TagsMixin
class VPC(Resource):
resource_name = "vpc"
name = argument.String(field="Name", group="tags")
cidr_block = argument.IPNetwork(field="CidrBlock")
tenancy = argument.String(
default="default", choices=["default", "dedicated"], field="InstanceTenancy"
)
tags = argument.Dict()
account = argument.Resource(BaseAccount)
enable_dns_support = argument.Boolean(
default=True,
field="EnableDnsSupport",
serializer=serializers.Dict(Value=serializers.Identity()),
group="dns_support_attribute",
)
enable_dns_hostnames = argument.Boolean(
default=True,
field="EnableDnsHostnames",
serializer=serializers.Dict(Value=serializers.Identity()),
group="dns_hostnames_attribute",
)
class Describe(SimpleDescribe, Plan):
resource = VPC
service_name = "ec2"
api_version = "2015-10-01"
describe_action = "describe_vpcs"
describe_envelope = "Vpcs"
key = "VpcId"
def get_describe_filters(self):
return {"Filters": [{"Name": "tag:Name", "Values": [self.resource.name]}]}
def annotate_object(self, obj):
obj["EnableDnsSupport"] = self.client.describe_vpc_attribute(
Attribute="enableDnsSupport", VpcId=obj["VpcId"]
)["EnableDnsSupport"]
obj["EnableDnsHostnames"] = self.client.describe_vpc_attribute(
Attribute="enableDnsHostnames", VpcId=obj["VpcId"]
)["EnableDnsHostnames"]
return obj
class Apply(TagsMixin, SimpleApply, Describe):
create_action = "create_vpc"
waiter = "vpc_available"
def update_dnssupport_attribute(self):
diff = self.resource.diff(
self.runner,
self.object.get("EnableDnsSupport", {}),
group="dns_support_attribute",
)
if not diff.matches():
yield self.generic_action(
["Configure DNS Support Setting"] + list(diff.lines()),
self.client.modify_vpc_attribute,
VpcId=serializers.Identifier(),
EnableDnsSupport=serializers.Argument("enable_dns_support"),
)
def update_dnshostnames_attribute(self):
diff = self.resource.diff(
self.runner,
self.object.get("EnableDnsHostnames", {}),
group="dns_hostnames_attribute",
)
if not diff.matches():
yield self.generic_action(
["Configure DNS Hostnames Setting"] + list(diff.lines()),
self.client.modify_vpc_attribute,
VpcId=serializers.Identifier(),
EnableDnsHostnames=serializers.Argument("enable_dns_hostnames"),
)
def update_object(self):
for action in super(Apply, self).update_object():
yield action
for action in self.update_dnssupport_attribute():
yield action
for action in self.update_dnshostnames_attribute():
yield action
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_vpc"
# waiter = 'vpc_terminated'
| 32.575
| 84
| 0.660783
| 414
| 3,909
| 6.113527
| 0.369565
| 0.023706
| 0.02015
| 0.012643
| 0.27499
| 0.250494
| 0.219676
| 0.19439
| 0.149348
| 0.149348
| 0
| 0.005703
| 0.237401
| 3,909
| 119
| 85
| 32.84874
| 0.843341
| 0.147352
| 0
| 0.283951
| 0
| 0
| 0.155817
| 0.026522
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061728
| false
| 0
| 0.061728
| 0.012346
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2797b6dfc818a3de2bc52aaf5906014401475627
| 793
|
py
|
Python
|
estructuras de control secuenciales/ejercicio10.py
|
svcuellar/algoritmos_programacion
|
0813ee6a2ccb605557a7920bf82440b7388b49e8
|
[
"MIT"
] | null | null | null |
estructuras de control secuenciales/ejercicio10.py
|
svcuellar/algoritmos_programacion
|
0813ee6a2ccb605557a7920bf82440b7388b49e8
|
[
"MIT"
] | null | null | null |
estructuras de control secuenciales/ejercicio10.py
|
svcuellar/algoritmos_programacion
|
0813ee6a2ccb605557a7920bf82440b7388b49e8
|
[
"MIT"
] | null | null | null |
"""
entradas
cantidadchelinesaustriacos-->c-->float
cantidaddragmasgriegos-->dg-->float
cantidadpesetas-->p-->float
salidas
chelines_a_pesetas-->c_p-->float
dragmas_a_francosfrancese-->dg_ff-->float
pesetas_a_dolares-->p_d-->float
pesetas_a_lirasitalianas-->p_li-->float
"""
#entradas
c=float(input("Ingrese la cantidad de chelines austriacos "))
dg=float(input("Ingrese la cantidad de dragmas griegos "))
p=float(input("Ingrese la cantidad de pesetas "))
#caja negra
c_p=round((c*9.57), 2)
dg_ff=round(((c*0.957)/20.110), 2)
p_d=round((p/122.499), 2)
p_li=round((p/0.092289), 2)
#salidas
print(c, " chelines equivalen a", c_p, " pesetas")
print(dg, " dragmas griegos equivalen a", dg_ff, " francos franceses")
print(p, " pesetas equivalen a", p_d, " dolares y ", p_li, " liras italianas")
| 28.321429
| 78
| 0.726356
| 126
| 793
| 4.412698
| 0.365079
| 0.032374
| 0.091727
| 0.102518
| 0.156475
| 0.156475
| 0
| 0
| 0
| 0
| 0
| 0.040559
| 0.098361
| 793
| 28
| 78
| 28.321429
| 0.737063
| 0.369483
| 0
| 0
| 0
| 0
| 0.478615
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27981338330ee315b120f4f29b8d0163c165b34b
| 4,453
|
py
|
Python
|
st_model.py
|
saras108/Sentiment_Analysis
|
7e4e84637161cd005ebbcd303f68417726b5f098
|
[
"MIT"
] | null | null | null |
st_model.py
|
saras108/Sentiment_Analysis
|
7e4e84637161cd005ebbcd303f68417726b5f098
|
[
"MIT"
] | null | null | null |
st_model.py
|
saras108/Sentiment_Analysis
|
7e4e84637161cd005ebbcd303f68417726b5f098
|
[
"MIT"
] | null | null | null |
#importing necessary libraries
import numpy as np
import pandas as pd
import string
import streamlit as st
header = st.container()
dataset = st.container()
fearure = st.container()
model_training = st.container()
def get_data(file_name):
df = pd.read_csv(file_name , header = None)
return df
with header:
st.title("Emotion detection using Text")
with dataset:
st.header("Emotion Detection Datasets")
df = get_data("1-P-3-ISEAR.csv")
df.columns = ['sn','Target','Sentence']
df.drop('sn',inplace=True,axis =1)
df.head()
df.duplicated().sum()
df.drop_duplicates(inplace = True)
st.subheader("Lets check if the dataset is fairly distrributed.")
col1 , col2 = st.columns(2)
target_count = df['Target'].value_counts()
col1.table(target_count)
col2.text("Line Chart of the total output counts")
col2.line_chart(target_count )
st.markdown("From the above data, we can easily say the data iss fairly distributed.")
with fearure:
st.header("Learning about Feature and converting them")
def lowercase(text):
text = text.lower()
return text
# df['Sentence'] = df['Sentence'].apply(lowercase)
def remove_punc(text):
text = "".join([char for char in text if char not in string.punctuation and not char.isdigit()])
return text
df['Sentence'] = df['Sentence'].apply(lowercase).apply(remove_punc)
#Removing the stop words
import nltk
nltk.download('omw-1.4')
nltk.download('stopwords')
from nltk.corpus import stopwords
def remove_stopwords(text):
text = [w for w in text.split() if w not in stopwords.words('english')]
return ' '.join(text)
df['Sentence'] = df['Sentence'].apply(remove_stopwords)
#Lemmatization i.e changing words into it's root form
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
from nltk.corpus import wordnet
lemmatizer = WordNetLemmatizer()
def lemmatize(text):
text = [lemmatizer.lemmatize(word,'v') for word in text.split()]
return ' '.join(text)
df['Sentence'] = df['Sentence'].apply(lemmatize)
st.markdown('As the part of data pre-processing, we have done the following things:')
st.text(" - Converting the sentence to lowercase ")
st.text(" -Removing the Punction ")
st.text(" -Removing the stop words ")
st.text(" -Lemmatization i.e changing words into it is root form ,")
st.markdown("After all these our data looks like-")
st.dataframe(df.head())
st.markdown("After doing Train Test split we will apply TGIF, It is technique to transform text into a meaningful vector of numbers. TFIDF penalizes words that come up too often and dont really have much use. So it rescales the frequency of words that are common which makes scoring more balanced")
with model_training:
from sklearn.model_selection import train_test_split
X = df['Sentence']
y = df['Target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2,random_state=10)
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(min_df=2, max_df=0.5, ngram_range=(1, 2))
train_tfidf = tfidf.fit_transform(X_train)
test_tfidf = tfidf.transform(X_test)
from sklearn.linear_model import LogisticRegression
logistic = LogisticRegression(max_iter=1000)
logistic.fit(train_tfidf,y_train)
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
nb.fit(train_tfidf,y_train)
st.header('Checking The Accuracy using diffrent model.')
import joblib
joblib.dump(logistic, './mymodel/logistic_model.joblib')
joblib.dump(nb, './mymodel/naive_bayes_model.joblib')
joblib.dump(tfidf, './mymodel/tfidf_model.joblib')
sel_col , disp_col = st.columns(2)
with sel_col:
sel_col.subheader("Logistic Regression")
sel_col.markdown("Logistic Regression Train Error")
sel_col.write(logistic.score(train_tfidf, y_train))
sel_col.markdown("Logistic Regression Test Error")
sel_col.write( logistic.score(test_tfidf, y_test))
with disp_col:
disp_col.subheader("Naive Bias")
disp_col.markdown("Naive Bias Train Error")
disp_col.write(nb.score(train_tfidf, y_train))
disp_col.markdown("Naive Bias Test Error")
disp_col.write(nb.score(test_tfidf, y_test))
| 29.885906
| 302
| 0.688974
| 624
| 4,453
| 4.804487
| 0.342949
| 0.03002
| 0.018679
| 0.021348
| 0.183456
| 0.113409
| 0.078052
| 0.05537
| 0
| 0
| 0
| 0.007042
| 0.202785
| 4,453
| 148
| 303
| 30.087838
| 0.837465
| 0.034359
| 0
| 0.042553
| 0
| 0.010638
| 0.277467
| 0.021648
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053191
| false
| 0
| 0.138298
| 0
| 0.244681
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
279b776bfdce89147881347913d489e839a74293
| 3,989
|
py
|
Python
|
PyPrometheus.py
|
RusDavies/PyPrometheus
|
8c0bb9489f42423942982829024d7359a374d7b1
|
[
"MIT"
] | null | null | null |
PyPrometheus.py
|
RusDavies/PyPrometheus
|
8c0bb9489f42423942982829024d7359a374d7b1
|
[
"MIT"
] | null | null | null |
PyPrometheus.py
|
RusDavies/PyPrometheus
|
8c0bb9489f42423942982829024d7359a374d7b1
|
[
"MIT"
] | null | null | null |
from PyPrometheusQueryClient import PrometheusQueryClient
import json
from pathlib import Path
from datetime import datetime
class Prometheus:
def __init__(self, url, metrics_config_file=None, cache_path=None, cache_ttl=3600, ssl_verify=True, starttime=None, endtime=None):
self._metrics_config_file = metrics_config_file
self._starttime = starttime
self._endtime = endtime
self.pqc = PrometheusQueryClient(url=url, cache_path=cache_path,
cache_ttl=cache_ttl, ssl_verify=ssl_verify)
self._load_metrics_config()
self.prometheus_data = {}
#---
def _load_metrics_config(self, metrics_config_file=None):
if (metrics_config_file):
self._metrics_config_file = metrics_config_file
if (not self._metrics_config_file):
raise ValueError('No metrics config file set. Cannot continue.')
path = Path(self._metrics_config_file)
if(not path.exists()):
raise ValueError("The configuration file '{}' does not exist".format(self._metrics_config_file))
with open(path, 'r') as f:
self._metrics_config = json.loads( f.read() )
return
def get_metrics(self, report_progress):
for (metric, metadata) in self._metrics_config.items():
if metadata['active'] == False:
continue
if (not metric in self.pqc.metrics):
raise ValueError("Metric '{}' is unknown".format(metric))
if (report_progress):
print("Getting results for metric '{}'{}".format(metric, ' ' * 40), end='\r')
_ = self.get_metric(metric, metadata)
def get_metric(self, metric, metadata=None, starttime:datetime=None, endtime:datetime=None):
# Order of precidence: start and end times passed as params first; otherwise those set on the class.
if(not starttime):
starttime = self._starttime
if(not endtime):
endtime = self._endtime
# Make sure we have actual start and end times
if(not starttime or not endtime):
raise ValueError('Both starttime and endtime must be set')
# Convert str objects to the expected datatime formats
# if( isinstance(starttime, str) ):
# starttime = datetime.strptime(starttime, '%Y-%m-%dT%H:%M:%SZ')
# if( isinstance(endtime, str) ):
# endtime = datetime.strptime(endtime, '%Y-%m-%dT%H:%M:%SZ')
# Make sure we're give an actual metric name
if (not metric or len(metric) == 0):
raise ValueError("Metric '{}' cannot be None")
# Make sure the metrics are present in the list retrived from the server
if (not metric in self.pqc.metrics):
raise ValueError("Metric '{}' is not available on the server".format(metric))
# If we're not passed the metadata, try to reocover it from our metrics config.
if (not metadata):
metadata = self._metrics_config.get(metric, {})
#
# Now do the real work
#
# Set up the stub of the result
self.prometheus_data[metric] = {}
self.prometheus_data[metric]['metadata'] = metadata
self.prometheus_data[metric]['title'] = metric
# Pull the data via the PrometheusQueryClient, depending on
deltas = metadata.get('deltas', None)
if (deltas == None):
(data, df) = self.pqc.get_metric(metric, start=starttime, end=endtime)
elif (deltas == True):
(data, df) = self.pqc.get_with_deltas(metric, start=starttime, end=endtime)
else:
(data, df) = self.pqc.get_without_deltas(metric, start=starttime, end=endtime)
self.prometheus_data[metric]['data'] = data
self.prometheus_data[metric]['df'] = df
return self.prometheus_data[metric]
| 37.990476
| 134
| 0.613688
| 476
| 3,989
| 4.991597
| 0.283613
| 0.093013
| 0.078704
| 0.05303
| 0.155303
| 0.111111
| 0.074074
| 0.042088
| 0.042088
| 0.042088
| 0
| 0.002457
| 0.285786
| 3,989
| 105
| 135
| 37.990476
| 0.83152
| 0.174731
| 0
| 0.067797
| 0
| 0
| 0.086081
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.067797
| 0
| 0.186441
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
279d9301e8e9b967d31f0c36c000b8b79e8eab38
| 5,557
|
py
|
Python
|
tests/validate_schema_guide.py
|
dieghernan/citation-file-format
|
cfad34b82aa882d8169a0bcb6a21ad19cb4ff401
|
[
"CC-BY-4.0"
] | 257
|
2017-12-18T14:09:32.000Z
|
2022-03-28T17:58:19.000Z
|
tests/validate_schema_guide.py
|
Seanpm2001-DIGITAL-Command-Language/citation-file-format
|
52647a247e9b1a5b04154934f39615b5ee8c4d65
|
[
"CC-BY-4.0"
] | 307
|
2017-10-16T12:17:40.000Z
|
2022-03-18T11:18:49.000Z
|
tests/validate_schema_guide.py
|
Seanpm2001-DIGITAL-Command-Language/citation-file-format
|
52647a247e9b1a5b04154934f39615b5ee8c4d65
|
[
"CC-BY-4.0"
] | 344
|
2018-09-19T03:00:26.000Z
|
2022-03-31T01:39:11.000Z
|
import pytest
import os
import json
import jsonschema
from ruamel.yaml import YAML
def test():
def extract_snippets():
start = 0
end = len(markdown)
while start < end:
snippet_start = markdown.find("```yaml\n", start, end)
if snippet_start == -1:
break
snippet_end = markdown.find("```\n", snippet_start + 8, end)
text = markdown[snippet_start:snippet_end + 4]
indent_size = 0
while text[8:][indent_size] == " ":
indent_size += 1
unindented = "\n"
for line in text[8:-4].split("\n"):
unindented += line[indent_size:]
unindented += "\n"
snippets.append(dict(start=snippet_start, end=snippet_end + 4, text=unindented))
start = snippet_end + 4
return snippets
with open("schema-guide.md", "r") as f:
markdown = f.read()
snippets = list()
snippets = extract_snippets()
yaml = YAML(typ='safe')
yaml.constructor.yaml_constructors[u'tag:yaml.org,2002:timestamp'] = yaml.constructor.yaml_constructors[u'tag:yaml.org,2002:str']
schema_path = os.path.join(os.path.dirname(__file__), "..", "schema.json")
with open(schema_path, "r") as sf:
schema_data = json.loads(sf.read())
for i_snippet, snippet in enumerate(snippets):
if "# incorrect" in snippet["text"]:
continue
instance = yaml.load(snippet["text"])
passes = False
while not passes:
try:
jsonschema.validate(instance=instance, schema=schema_data, format_checker=jsonschema.FormatChecker())
passes = True
print("snippet {0}/{1} (chars {2}-{3}): OK".format(i_snippet + 1, len(snippets), snippet["start"], snippet["end"]))
except jsonschema.ValidationError as e:
path = "" if len(e.relative_path) == 0 else "/".join([str(p) for p in e.relative_path]) + "/"
if path == "":
if e.message.startswith("'authors' is a required property"):
instance["authors"] = []
elif e.message.startswith("'cff-version' is a required property"):
instance["cff-version"] = "1.2.0"
elif e.message.startswith("'message' is a required property"):
instance["message"] = "testmessage"
elif e.message.startswith("'title' is a required property"):
instance["title"] = "testtitle"
else:
raise Exception("undefined behavior: " + e.message)
elif path.startswith("authors"):
if e.message.startswith("[] is too short"):
instance["authors"].append({"name": "testname"})
else:
raise Exception("undefined behavior: " + e.message)
elif path.startswith("references"):
index = int(path.split("/")[1])
if e.message.startswith("'authors' is a required property"):
instance["references"][index]["authors"] = []
elif e.message.startswith("'title' is a required property"):
instance["references"][index]["title"] = "testtitle"
elif e.message.startswith("'type' is a required property"):
instance["references"][index]["type"] = "generic"
elif e.message.startswith("[] is too short"):
instance["references"][index]["authors"].append({"name": "testname"})
elif path.startswith("references/{0}/conference".format(index)):
if e.message.startswith("'name' is a required property"):
instance["references"][index]["conference"]["name"] = "testname"
else:
raise Exception("undefined behavior: " + e.message)
elif path.startswith("preferred-citation"):
if e.message.startswith("'authors' is a required property"):
instance["preferred-citation"]["authors"] = []
elif e.message.startswith("'title' is a required property"):
instance["preferred-citation"]["title"] = "testtitle"
elif e.message.startswith("'type' is a required property"):
instance["preferred-citation"]["type"] = "generic"
elif e.message.startswith("[] is too short"):
instance["preferred-citation"]["authors"].append({"name": "testname"})
else:
raise Exception("undefined behavior: " + e.message)
else:
print("Found a problem with snippet at char position {0}-{1}:\n {2}\n{3}".format(snippet["start"], snippet["end"], snippet["text"], e.message))
raise e
| 57.28866
| 171
| 0.479935
| 512
| 5,557
| 5.150391
| 0.242188
| 0.057641
| 0.095563
| 0.079257
| 0.459234
| 0.427759
| 0.427759
| 0.361775
| 0.361775
| 0.326887
| 0
| 0.010131
| 0.396077
| 5,557
| 96
| 172
| 57.885417
| 0.775626
| 0
| 0
| 0.208791
| 0
| 0.010989
| 0.196329
| 0.013137
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021978
| false
| 0.032967
| 0.054945
| 0
| 0.087912
| 0.021978
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27a5750fd3834a5dd24fb63cbde3fd11a0fdfdd0
| 4,613
|
py
|
Python
|
flaskprediction/routes.py
|
killswitchh/flask-prediction-app
|
a8bdff96fa2dc05544991a705970d1550ac9a034
|
[
"MIT"
] | null | null | null |
flaskprediction/routes.py
|
killswitchh/flask-prediction-app
|
a8bdff96fa2dc05544991a705970d1550ac9a034
|
[
"MIT"
] | 1
|
2020-08-29T18:39:05.000Z
|
2020-08-30T09:43:47.000Z
|
flaskprediction/routes.py
|
killswitchh/flask-prediction-app
|
a8bdff96fa2dc05544991a705970d1550ac9a034
|
[
"MIT"
] | null | null | null |
import secrets
from flask import Flask , render_template , url_for , send_from_directory
from flaskprediction import app
from flaskprediction.utils.predict import Predictor
from flaskprediction.forms import CarDetailsForm , TitanicDetailsForm , BostonDetailsForm , HeightDetailsForm, CatImageForm
from PIL import Image
import os
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html')
@app.route("/classifier", methods=['GET' , 'POST'])
def classifier():
return render_template('classification.html')
@app.route("/regressor", methods=['GET' , 'POST'])
def regressor():
return render_template('regression.html')
@app.route("/classifier/titanic", methods=['GET' , 'POST'])
def titanic():
message = ""
form = TitanicDetailsForm()
if form.validate_on_submit():
parameter_list = [form.p_id.data , form.p_class.data, form.sex.data ,form.age.data,form.sibsp.data,form.parch.data,form.fare.data,form.embarked.data]
predictor = Predictor()
print(parameter_list)
answer = predictor.calculate_probability_titanic(parameter_list)
message = ""
return render_template('titanic.html' , title='Titanic Classifier' , form = form , message= message,answer = answer)
else:
message = "Enter Passenger Details"
return render_template('titanic.html' , title='Titanic Classifier' , form = form , message= message)
@app.route("/classifier/car" , methods=['GET' , 'POST'])
def car():
message = ""
form = CarDetailsForm()
if form.validate_on_submit():
parameter_list = list(map(int,[form.price.data , form.maintenance.data,form.no_of_doors.data, form.capacity.data ,form.size_of_luggage_boot.data,form.safety.data]))
predictor = Predictor()
answer = predictor.calculate_probability_car(parameter_list)
message = ""
return render_template('car.html' , title='Car Classifier' , form = form , message= message,answer = answer)
else:
message = "Select All Values"
return render_template('car.html' , title='Car Classifier' , form = form , message= message)
@app.route("/regressor/boston" , methods=['GET' , 'POST'])
def boston():
message = ""
form = BostonDetailsForm()
if form.validate_on_submit():
parameter_list = [form.crim.data , form.zn.data, form.chas.data ,form.nox.data,form.rm.data,form.age.data,form.dis.data,form.ptratio.data , form.black.data , form.lstat.data]
predictor = Predictor()
answer = predictor.calculate_price_boston(parameter_list)
message = ""
return render_template('boston.html' , title='Boston Regressor' , form = form , message= message,answer = answer)
else:
message = "Select All Values"
return render_template('boston.html' , title='boston Regressor' , form = form , message= message)
@app.route("/regressor/height" , methods=['GET' , 'POST'])
def height():
message = ""
form = HeightDetailsForm()
if form.validate_on_submit():
parameter_list = [form.sex.data , form.height.data]
predictor = Predictor()
answer = predictor.calculate_weight(parameter_list)
message = ""
return render_template('height.html' , title='Weight Prediction' , form = form , message= message,answer = answer)
else:
message = "Select All Values"
return render_template('height.html' , title='Weight Prediction' , form = form , message= message)
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/pics', picture_fn)
output_size = (64, 64)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_path
@app.route("/classifier/cat" , methods=['GET' , 'POST'])
def cat():
message = ""
form = CatImageForm()
if form.validate_on_submit():
picture_file = form.cat_picture.data
image_file = save_picture(picture_file)
predictor = Predictor()
answer = predictor.find_cat(image_file)
message = ""
return render_template('cat.html' , title='Cat Prediction' , form = form , message= message,answer = answer)
else:
message = "Upload A Picture"
return render_template('cat.html' , title='Cat Prediction' , form = form , message= message)
| 39.767241
| 182
| 0.681986
| 556
| 4,613
| 5.517986
| 0.22482
| 0.057366
| 0.084746
| 0.071708
| 0.454368
| 0.434811
| 0.363755
| 0.321056
| 0.28292
| 0.267927
| 0
| 0.00133
| 0.185346
| 4,613
| 116
| 183
| 39.767241
| 0.815061
| 0
| 0
| 0.28866
| 0
| 0
| 0.133073
| 0.005202
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103093
| false
| 0.010309
| 0.072165
| 0.041237
| 0.329897
| 0.010309
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27a6c1cdc477a10a4c9b691137650bb8e9980229
| 11,859
|
py
|
Python
|
examples/cadre_dymos.py
|
johnjasa/CADRE
|
a4ffd61582b8474953fc309aa540838a14f29dcf
|
[
"Apache-2.0"
] | null | null | null |
examples/cadre_dymos.py
|
johnjasa/CADRE
|
a4ffd61582b8474953fc309aa540838a14f29dcf
|
[
"Apache-2.0"
] | null | null | null |
examples/cadre_dymos.py
|
johnjasa/CADRE
|
a4ffd61582b8474953fc309aa540838a14f29dcf
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import unittest
import numpy as np
from openmdao.api import Problem, Group, pyOptSparseDriver, DirectSolver, SqliteRecorder
from dymos import Phase
from dymos.utils.indexing import get_src_indices_by_row
from dymos.phases.components import ControlInterpComp
from CADRE.odes_dymos.cadre_orbit_ode import CadreOrbitODE
from CADRE.attitude_dymos.angular_velocity_comp import AngularVelocityComp
from CADRE.odes_dymos.cadre_systems_ode import CadreSystemsODE
GM = 398600.44
rmag = 7000.0
period = 2 * np.pi * np.sqrt(rmag ** 3 / GM)
vcirc = np.sqrt(GM / rmag)
duration = period
duration = 6 * 3600.0
p = Problem(model=Group())
p.driver = pyOptSparseDriver()
p.driver.options['optimizer'] = 'SNOPT'
p.driver.options['dynamic_simul_derivs'] = True
p.driver.opt_settings['Major iterations limit'] = 1000
p.driver.opt_settings['Major feasibility tolerance'] = 1.0E-4
p.driver.opt_settings['Major optimality tolerance'] = 1.0E-4
p.driver.opt_settings['Major step limit'] = 0.1
p.driver.opt_settings['iSumm'] = 6
p.driver.recording_options['includes'] = ['*']
p.driver.recording_options['record_objectives'] = True
p.driver.recording_options['record_constraints'] = True
p.driver.recording_options['record_desvars'] = True
recorder = SqliteRecorder("cases.sql")
p.driver.add_recorder(recorder)
NUM_SEG = 30
TRANSCRIPTION_ORDER = 3
orbit_phase = Phase('radau-ps',
ode_class=CadreOrbitODE,
num_segments=NUM_SEG,
transcription_order=TRANSCRIPTION_ORDER,
compressed=False)
p.model.add_subsystem('orbit_phase', orbit_phase)
orbit_phase.set_time_options(fix_initial=True, fix_duration=True, duration_ref=duration)
orbit_phase.set_state_options('r_e2b_I', defect_scaler=1000, fix_initial=True, units='km')
orbit_phase.set_state_options('v_e2b_I', defect_scaler=1000, fix_initial=True, units='km/s')
# orbit_phase.set_state_options('SOC', defect_scaler=1, fix_initial=True, units=None)
# orbit_phase.add_design_parameter('P_bat', opt=False, units='W')
orbit_phase.add_control('Gamma', opt=True, lower=-90, upper=90, units='deg', ref0=-90, ref=90,
continuity=True, rate_continuity=True)
# Add a control interp comp to interpolate the rates of O_BI from the orbit phase.
faux_control_options = {'O_BI': {'units': None, 'shape': (3, 3)}}
p.model.add_subsystem('obi_rate_interp_comp',
ControlInterpComp(control_options=faux_control_options,
time_units='s',
grid_data=orbit_phase.grid_data),
promotes_outputs=[('control_rates:O_BI_rate', 'Odot_BI')])
control_input_nodes_idxs = orbit_phase.grid_data.subset_node_indices['control_input']
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3, 3))
p.model.connect('orbit_phase.rhs_all.O_BI', 'obi_rate_interp_comp.controls:O_BI',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('orbit_phase.time.dt_dstau',
('obi_rate_interp_comp.dt_dstau', 'w_B_rate_interp_comp.dt_dstau'))
# Use O_BI and Odot_BI to compute the angular velocity vector
p.model.add_subsystem('angular_velocity_comp',
AngularVelocityComp(num_nodes=orbit_phase.grid_data.num_nodes))
p.model.connect('orbit_phase.rhs_all.O_BI', 'angular_velocity_comp.O_BI')
p.model.connect('Odot_BI', 'angular_velocity_comp.Odot_BI')
# Add another interpolation comp to compute the rate of w_B
faux_control_options = {'w_B': {'units': '1/s', 'shape': (3,)}}
p.model.add_subsystem('w_B_rate_interp_comp',
ControlInterpComp(control_options=faux_control_options,
time_units='s',
grid_data=orbit_phase.grid_data),
promotes_outputs=[('control_rates:w_B_rate', 'wdot_B')])
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3,))
p.model.connect('angular_velocity_comp.w_B', 'w_B_rate_interp_comp.controls:w_B',
src_indices=src_idxs, flat_src_indices=True)
# Now add the systems phase
systems_phase = Phase('radau-ps',
ode_class=CadreSystemsODE,
num_segments=NUM_SEG,
transcription_order=TRANSCRIPTION_ORDER,
compressed=False)
p.model.add_subsystem('systems_phase', systems_phase)
systems_phase.set_time_options(fix_initial=True, fix_duration=True, duration_ref=duration)
systems_phase.set_state_options('SOC', defect_ref=10, lower=0.2, fix_initial=True, units=None)
systems_phase.set_state_options('w_RW', defect_ref=10000, fix_initial=True, units='1/s')
systems_phase.set_state_options('data', defect_ref=10, fix_initial=True, units='Gibyte')
systems_phase.set_state_options('temperature', ref0=273, ref=373, defect_ref=1000,
fix_initial=True, units='degK')
systems_phase.add_design_parameter('LD', opt=False, units='d')
systems_phase.add_design_parameter('fin_angle', opt=True, lower=0., upper=np.pi / 2.)
systems_phase.add_design_parameter('antAngle', opt=True, lower=-np.pi / 4, upper=np.pi / 4)
systems_phase.add_design_parameter('cellInstd', opt=True, lower=0.0, upper=1.0, ref=1.0)
# Add r_e2b_I and O_BI as non-optimized controls, allowing them to be connected to external sources
systems_phase.add_control('r_e2b_I', opt=False, units='km')
systems_phase.add_control('O_BI', opt=False)
systems_phase.add_control('w_B', opt=False)
systems_phase.add_control('wdot_B', opt=False)
systems_phase.add_control('P_comm', opt=True, lower=0.0, upper=30.0, units='W')
systems_phase.add_control('Isetpt', opt=True, lower=1.0E-4, upper=0.4, units='A')
systems_phase.add_objective('data', loc='final', ref=-1.0)
# Connect r_e2b_I and O_BI values from all nodes in the orbit phase to the input values
# in the attitude phase.
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3,))
p.model.connect('orbit_phase.states:r_e2b_I', 'systems_phase.controls:r_e2b_I',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('angular_velocity_comp.w_B', 'systems_phase.controls:w_B',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('wdot_B', 'systems_phase.controls:wdot_B',
src_indices=src_idxs, flat_src_indices=True)
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3, 3))
p.model.connect('orbit_phase.rhs_all.O_BI', 'systems_phase.controls:O_BI',
src_indices=src_idxs, flat_src_indices=True)
p.model.options['assembled_jac_type'] = 'csc'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.setup(check=True)
# from openmdao.api import view_model
# view_model(p.model)
# Initialize values in the orbit phase
p['orbit_phase.t_initial'] = 0.0
p['orbit_phase.t_duration'] = duration
# p['systems_phase.states:w_RW'][:, 0] = 0.0
# p['systems_phase.states:w_RW'][:, 1] = 0.0
# p['systems_phase.states:w_RW'][:, 2] = 0.0
# Default starting orbit
# [ 2.89078958e+03 5.69493134e+03 -2.55340189e+03 2.56640460e-01
# 3.00387409e+00 6.99018448e+00]
p['orbit_phase.states:r_e2b_I'][:, 0] = 2.89078958e+03
p['orbit_phase.states:r_e2b_I'][:, 1] = 5.69493134e+03
p['orbit_phase.states:r_e2b_I'][:, 2] = -2.55340189e+03
p['orbit_phase.states:v_e2b_I'][:, 0] = 2.56640460e-01
p['orbit_phase.states:v_e2b_I'][:, 1] = 3.00387409e+00
p['orbit_phase.states:v_e2b_I'][:, 2] = 6.99018448e+00
# Initialize values in the systems phase
p['systems_phase.t_initial'] = 0.0
p['systems_phase.t_duration'] = duration
# p['systems_phase.states:w_RW'][:, 0] = 0.0
# p['systems_phase.states:w_RW'][:, 1] = 0.0
# p['systems_phase.states:w_RW'][:, 2] = 0.0
p['systems_phase.states:SOC'] = systems_phase.interpolate(ys=[1, .5], nodes='state_input')
p['systems_phase.states:w_RW'] = 100.0
p['systems_phase.states:data'] = systems_phase.interpolate(ys=[0, 10], nodes='state_input')
p['systems_phase.states:temperature'] = 273.0
# p['systems_phase.states:v_e2b_I'][:, 0] = 0.0
# p['systems_phase.states:v_e2b_I'][:, 1] = vcirc
# p['systems_phase.states:v_e2b_I'][:, 2] = 0.0
p['systems_phase.controls:P_comm'] = 0.01
p['systems_phase.controls:Isetpt'] = 0.1
p['systems_phase.design_parameters:LD'] = 5233.5
p['systems_phase.design_parameters:fin_angle'] = np.radians(70.0)
p['systems_phase.design_parameters:cellInstd'] = 0.0
p.run_model()
# Simulate the orbit phase to get a (exact) guess to the orbit history solution.
exp_out = orbit_phase.simulate()
# import matplotlib.pyplot as plt
# from mpl_toolkits import mplot3d
#
# plt.figure()
# ax = plt.axes(projection='3d')
# # plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-')
# ax.plot3D(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], exp_out.get_values('r_e2b_I')[:, 2], 'b-')
# plt.show()
p['orbit_phase.states:r_e2b_I'] = orbit_phase.interpolate(ys=exp_out.get_values('r_e2b_I'), xs=exp_out.get_values('time'), nodes='state_input')
p['orbit_phase.states:v_e2b_I'] = orbit_phase.interpolate(ys=exp_out.get_values('v_e2b_I'), xs=exp_out.get_values('time'), nodes='state_input')
p.run_driver()
r_e2b_I = p.model.orbit_phase.get_values('r_e2b_I')
v_e2b_I = p.model.orbit_phase.get_values('v_e2b_I')
rmag_e2b = p.model.orbit_phase.get_values('rmag_e2b_I')
# exp_out = systems_phase.simulate(times=500)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(orbit_phase.get_values('r_e2b_I')[:, 0], orbit_phase.get_values('r_e2b_I')[:, 1], 'ro')
plt.figure()
# plt.plot(exp_out.get_values('time')[:, 0], exp_out.get_values('data')[:, 1], 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('data'), 'ro')
plt.figure()
# plt.plot(exp_out.get_values('time')[:, 0], exp_out.get_values('data')[:, 1], 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_comm'), 'r-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_sol'), 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_RW'), 'g-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_bat'), 'k-')
plt.figure()
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('SOC'), 'r-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('dXdt:SOC'), 'r--')
plt.show()
# plt.figure()
# plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-')
# plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro')
# assert_rel_error(self, rmag_e2b, rmag * np.ones_like(rmag_e2b), tolerance=1.0E-9)
# delta_trua = 2 * np.pi * (duration / period)
# assert_rel_error(self, r_e2b_I[-1, :],
# rmag * np.array([np.cos(delta_trua), np.sin(delta_trua), 0]),
# tolerance=1.0E-9)
# assert_rel_error(self, v_e2b_I[-1, :],
# vcirc * np.array([-np.sin(delta_trua), np.cos(delta_trua), 0]),
# tolerance=1.0E-9)
# def test_partials(self):
# np.set_printoptions(linewidth=10000, edgeitems=1024)
# cpd = self.p.check_partials(compact_print=True, out_stream=None)
# assert_check_partials(cpd, atol=1.0E-4, rtol=1.0)
#
# def test_simulate(self):
# phase = self.p.model.orbit_phase
# exp_out = phase.simulate(times=500)
#
# import matplotlib.pyplot as plt
#
# plt.figure()
# plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-')
# plt.plot(phase.get_values('r_e2b_I')[:, 0], phase.get_values('r_e2b_I')[:, 1], 'ro')
#
# # plt.figure()
# # plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-')
# # plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro')
#
# plt.show()
| 42.812274
| 143
| 0.70436
| 1,872
| 11,859
| 4.162927
| 0.151175
| 0.09547
| 0.01604
| 0.036571
| 0.570384
| 0.473374
| 0.4232
| 0.371744
| 0.346336
| 0.328372
| 0
| 0.039112
| 0.141918
| 11,859
| 276
| 144
| 42.967391
| 0.72671
| 0.267645
| 0
| 0.178082
| 0
| 0
| 0.208895
| 0.128774
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.075342
| 0
| 0.075342
| 0.006849
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27a730a5c6d3019f232b6aef55d357908663ff70
| 959
|
py
|
Python
|
deso/Media.py
|
AdityaChaudhary0005/DeSo.py
|
5cb3c757fb21bad472da921c0148675c8957eb17
|
[
"MIT"
] | 11
|
2021-11-12T18:20:22.000Z
|
2022-03-16T02:12:06.000Z
|
deso/Media.py
|
AdityaChaudhary0005/DeSo.py
|
5cb3c757fb21bad472da921c0148675c8957eb17
|
[
"MIT"
] | 6
|
2021-11-25T04:30:44.000Z
|
2021-12-15T12:33:24.000Z
|
deso/Media.py
|
AdityaChaudhary0005/DeSo.py
|
5cb3c757fb21bad472da921c0148675c8957eb17
|
[
"MIT"
] | 8
|
2021-11-19T19:14:50.000Z
|
2022-01-31T21:27:32.000Z
|
from deso.utils import getUserJWT
import requests
class Media:
def __init__(self, publicKey=None, seedHex=None, nodeURL="https://node.deso.org/api/v0/"):
self.SEED_HEX = seedHex
self.PUBLIC_KEY = publicKey
self.NODE_URL = nodeURL
def uploadImage(self, fileList):
#uploads image to images.deso.org
try:
if type(fileList) == type("str"):
fileList = [
('file', (fileList, open(
fileList, "rb"), 'image/png'))
]
jwt_token = getUserJWT(self.SEED_HEX)
# print(encoded_jwt)
endpointURL = self.NODE_URL + "upload-image"
payload = {'UserPublicKeyBase58Check': self.PUBLIC_KEY,
'JWT': jwt_token}
response = requests.post(endpointURL, data=payload, files=fileList)
return response
except Exception as e:
return e
| 31.966667
| 95
| 0.554745
| 98
| 959
| 5.295918
| 0.571429
| 0.026975
| 0.042389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004769
| 0.344108
| 959
| 29
| 96
| 33.068966
| 0.82035
| 0.05318
| 0
| 0
| 0
| 0
| 0.094923
| 0.02649
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27a8cc8eee02c003f65618c441f8c80b6ada0052
| 1,790
|
py
|
Python
|
s3-scan-tar/tests/test_models.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | 4
|
2021-03-05T15:39:24.000Z
|
2021-09-15T06:11:45.000Z
|
s3-scan-tar/tests/test_models.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | 631
|
2020-04-27T10:39:18.000Z
|
2022-03-31T14:51:38.000Z
|
s3-scan-tar/tests/test_models.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | 3
|
2020-02-20T15:48:03.000Z
|
2021-12-16T22:50:40.000Z
|
import pytest
from app.models import AVScanResult
@pytest.fixture
def _scan_result() -> AVScanResult:
clean = 10
virus = 0
skipped = 0
return AVScanResult(clean, virus, skipped)
def test_avscanresult_init(_scan_result):
result = AVScanResult(10, 0, 0)
assert result == _scan_result
def test_status_has_correct_values():
scan_found_virus = AVScanResult(9, 1, 0)
scan_found_nothing = AVScanResult(10, 0, 0)
assert scan_found_virus.get_status() == "Ikke ok"
assert scan_found_nothing.get_status() == "ok"
def test_correct_message_when_no_virus_found(_scan_result):
expected_message = (
"Status etter virus scan: ok\n\n"
"Antall filer kontrollert: 10 av 10\n"
" - Filer uten virus: 10\n"
" - Filer med virus: 0\n"
" - Filer ikke kontrollert pga. filstørrelse: 0"
)
assert expected_message == _scan_result.generate_message()
# assert _scan_result.get_message() == expected_message
def test_correct_message_when_virus_found():
expected_message = (
"Status etter virus scan: Ikke ok\n\n"
"Antall filer kontrollert: 10 av 10\n"
" - Filer uten virus: 8\n"
" - Filer med virus: 2\n"
" - Filer ikke kontrollert pga. filstørrelse: 0"
)
actual = AVScanResult(8, 2, 0)
assert expected_message == actual.generate_message()
def test_correct_message_when_skipped_files():
expected_message = (
"Status etter virus scan: Ikke ok\n\n"
"Antall filer kontrollert: 10 av 15\n"
" - Filer uten virus: 8\n"
" - Filer med virus: 2\n"
" - Filer ikke kontrollert pga. filstørrelse: 5"
)
actual = AVScanResult(8, 2, 5)
assert expected_message == actual.generate_message()
| 29.833333
| 62
| 0.655307
| 232
| 1,790
| 4.818966
| 0.219828
| 0.048301
| 0.037567
| 0.056351
| 0.54025
| 0.478533
| 0.321109
| 0.28712
| 0.28712
| 0.28712
| 0
| 0.031947
| 0.248045
| 1,790
| 59
| 63
| 30.338983
| 0.798663
| 0.029609
| 0
| 0.333333
| 0
| 0
| 0.303746
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.133333
| false
| 0
| 0.044444
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27a97aed4e6639ade2261db847e3a6e16989a40c
| 1,424
|
py
|
Python
|
autoload/activate_this.py
|
BonaBeavis/vim-venom
|
a4ed892bd844de51c92e7b59dbc975db02c939b9
|
[
"Vim"
] | 24
|
2020-04-26T11:50:40.000Z
|
2022-02-22T08:05:36.000Z
|
autoload/activate_this.py
|
BonaBeavis/vim-venom
|
a4ed892bd844de51c92e7b59dbc975db02c939b9
|
[
"Vim"
] | 5
|
2021-01-26T12:41:12.000Z
|
2022-01-11T15:40:43.000Z
|
autoload/activate_this.py
|
BonaBeavis/vim-venom
|
a4ed892bd844de51c92e7b59dbc975db02c939b9
|
[
"Vim"
] | 4
|
2020-05-02T21:45:36.000Z
|
2022-03-25T13:51:00.000Z
|
# -*- coding: utf-8 -*-
"""Activate virtualenv for current interpreter:
Source: https://github.com/pypa/virtualenv
Use exec(open(this_file).read(), {'__file__': this_file}).
"""
import os
import site
import sys
try:
abs_file = os.path.abspath(__file__)
except NameError:
raise AssertionError(
"You must use exec(open(this_file).read(), {'__file__': this_file}))")
# Prepend bin to PATH (this file is inside the bin directory)
bin_dir = os.path.dirname(abs_file)
os.environ["PATH"] = os.pathsep.join(
[bin_dir] + os.environ.get("PATH", "").split(os.pathsep))
# Virtual env is right above bin directory
base = os.path.dirname(bin_dir)
os.environ["VIRTUAL_ENV"] = base
# Concat site-packages library path
IS_WIN = sys.platform == "win32"
IS_PYPY = hasattr(sys, "pypy_version_info")
IS_JYTHON = sys.platform.startswith("java")
if IS_JYTHON or IS_WIN:
site_packages = os.path.join(base, "Lib", "site-packages")
elif IS_PYPY:
site_packages = os.path.join(base, "site-packages")
else:
python_lib = "python{}.{}".format(*sys.version_info)
site_packages = os.path.join(base, "lib", python_lib, "site-packages")
# Add the virtual environment libraries to the host python import mechanism
prev_length = len(sys.path)
site.addsitedir(site_packages)
sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
sys.real_prefix = sys.prefix
sys.prefix = base
# vim: set ts=4 sw=4 tw=80 et :
| 30.297872
| 78
| 0.714185
| 218
| 1,424
| 4.477064
| 0.422018
| 0.098361
| 0.02459
| 0.055328
| 0.157787
| 0.157787
| 0.131148
| 0.071721
| 0.071721
| 0
| 0
| 0.006509
| 0.136938
| 1,424
| 46
| 79
| 30.956522
| 0.787632
| 0.287921
| 0
| 0
| 0
| 0
| 0.167665
| 0.027944
| 0
| 0
| 0
| 0
| 0.035714
| 1
| 0
| false
| 0
| 0.107143
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27abc06bb50512111945d911b3687183e05cd80c
| 2,731
|
py
|
Python
|
tattrdb/models.py
|
gmjosack/tattrdb
|
88d46eb049d05a1f0531531c49c2209c2bbbf562
|
[
"MIT"
] | 1
|
2018-11-24T02:33:15.000Z
|
2018-11-24T02:33:15.000Z
|
tattrdb/models.py
|
gmjosack/tattrdb
|
88d46eb049d05a1f0531531c49c2209c2bbbf562
|
[
"MIT"
] | null | null | null |
tattrdb/models.py
|
gmjosack/tattrdb
|
88d46eb049d05a1f0531531c49c2209c2bbbf562
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy import (
Table, Column, Integer, String, Text, Boolean,
ForeignKey, Enum, DateTime
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, sessionmaker
Session = sessionmaker()
Model = declarative_base()
def connect(uri):
engine = create_engine(uri)
Session.configure(bind=engine)
return Session()
def _sync(connection):
""" This will build the database for whatever connection you pass."""
Model.metadata.create_all(connection.bind)
host_tags = Table("host_tags", Model.metadata,
Column("host_id", Integer, ForeignKey("hosts.id"), primary_key=True),
Column("tag_id", Integer, ForeignKey("tags.id"), primary_key=True)
)
class Tag(Model):
__tablename__ = 'tags'
id = Column(Integer(), primary_key=True, nullable=False)
tagname = Column(String(length=255), unique=True)
def as_dict(self):
return {
"id": self.id,
"tagname": self.tagname,
"hosts": [host.hostname for host in self.hosts],
}
class HostAttributes(Model):
__tablename__ = "host_attributes"
host_id = Column(Integer, ForeignKey("hosts.id"), primary_key=True)
attribute_id = Column(Integer, ForeignKey("attributes.id"), primary_key=True)
value = Column(String(length=255), nullable=False)
attribute = relationship("Attribute", lazy="joined", backref="host_assocs")
class Attribute(Model):
__tablename__ = 'attributes'
id = Column(Integer(), primary_key=True, nullable=False)
attrname = Column(String(length=255), unique=True)
hosts = relationship("Host", secondary="host_attributes", lazy="joined", backref="real_attributes")
def as_dict(self):
values = {}
for host_assoc in self.host_assocs:
if host_assoc.value not in values:
values[host_assoc.value] = []
values[host_assoc.value].append(host_assoc.host.hostname)
return {
"id": self.id,
"attrname": self.attrname,
"values": values,
}
class Host(Model):
__tablename__ = 'hosts'
id = Column(Integer(), primary_key=True, nullable=False)
hostname = Column(String(length=255), unique=True)
tags = relationship(
"Tag", secondary=host_tags, lazy="joined", backref="hosts")
attributes = relationship("HostAttributes", lazy="joined", backref="host")
def as_dict(self):
return {
"id": self.id,
"hostname": self.hostname,
"tags": [tag.tagname for tag in self.tags],
"attributes": {attr.attribute.attrname: attr.value for attr in self.attributes}
}
| 28.154639
| 103
| 0.656902
| 314
| 2,731
| 5.557325
| 0.251592
| 0.040115
| 0.05616
| 0.036676
| 0.2
| 0.2
| 0.146705
| 0.103152
| 0
| 0
| 0
| 0.005618
| 0.217869
| 2,731
| 96
| 104
| 28.447917
| 0.81133
| 0.022702
| 0
| 0.184615
| 0
| 0
| 0.093985
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.061538
| 0.030769
| 0.523077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27ae7ed160d61ff6977fb0ea0dc61ee80279d33b
| 152,955
|
py
|
Python
|
modules/cockatoo/_knitnetwork.py
|
fstwn/Cockatoo
|
0c5f9c515053bfc31e62d20fddc4ae9bece09d88
|
[
"MIT"
] | 9
|
2020-09-26T03:41:21.000Z
|
2021-11-29T06:52:35.000Z
|
modules/cockatoo/_knitnetwork.py
|
fstwn/Cockatoo
|
0c5f9c515053bfc31e62d20fddc4ae9bece09d88
|
[
"MIT"
] | 9
|
2020-08-10T19:38:03.000Z
|
2022-02-24T08:41:32.000Z
|
modules/cockatoo/_knitnetwork.py
|
fstwn/Cockatoo
|
0c5f9c515053bfc31e62d20fddc4ae9bece09d88
|
[
"MIT"
] | 3
|
2020-12-26T08:43:56.000Z
|
2021-10-17T19:37:52.000Z
|
# PYTHON STANDARD LIBRARY IMPORTS ---------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import OrderedDict
from math import radians
from math import pi
from operator import itemgetter
# DUNDER ----------------------------------------------------------------------
__all__ = [
"KnitNetwork"
]
# THIRD PARTY MODULE IMPORTS --------------------------------------------------
import networkx as nx
# LOCAL MODULE IMPORTS --------------------------------------------------------
from cockatoo._knitnetworkbase import KnitNetworkBase
from cockatoo._knitmappingnetwork import KnitMappingNetwork
from cockatoo._knitdinetwork import KnitDiNetwork
from cockatoo.environment import RHINOINSIDE
from cockatoo.exception import KnitNetworkError
from cockatoo.exception import KnitNetworkGeometryError
from cockatoo.exception import NoEndNodesError
from cockatoo.exception import NoWeftEdgesError
from cockatoo.exception import MappingNetworkError
from cockatoo.utilities import pairwise
# RHINO IMPORTS ---------------------------------------------------------------
if RHINOINSIDE:
import rhinoinside
rhinoinside.load()
from Rhino.Geometry import Brep as RhinoBrep
from Rhino.Geometry import Curve as RhinoCurve
from Rhino.Geometry import Line as RhinoLine
from Rhino.Geometry import Interval as RhinoInterval
from Rhino.Geometry import Mesh as RhinoMesh
from Rhino.Geometry import NurbsSurface as RhinoNurbsSurface
from Rhino.Geometry import Point3d as RhinoPoint3d
from Rhino.Geometry import Polyline as RhinoPolyline
from Rhino.Geometry import Surface as RhinoSurface
from Rhino.Geometry import Vector3d as RhinoVector3d
else:
from Rhino.Geometry import Brep as RhinoBrep
from Rhino.Geometry import Curve as RhinoCurve
from Rhino.Geometry import Line as RhinoLine
from Rhino.Geometry import Interval as RhinoInterval
from Rhino.Geometry import Mesh as RhinoMesh
from Rhino.Geometry import NurbsSurface as RhinoNurbsSurface
from Rhino.Geometry import Point3d as RhinoPoint3d
from Rhino.Geometry import Polyline as RhinoPolyline
from Rhino.Geometry import Surface as RhinoSurface
from Rhino.Geometry import Vector3d as RhinoVector3d
# CLASS DECLARATION -----------------------------------------------------------
class KnitNetwork(KnitNetworkBase):
"""
Datastructure for representing a network (graph) consisting of nodes with
special attributes aswell as 'warp' edges, 'weft' edges and contour edges
which are neither 'warp' nor 'weft'.
Used for the automatic generation of knitting patterns based on mesh or
NURBS surface geometry.
Inherits from :class:`KnitNetworkBase`.
Notes
-----
The implemented algorithms are strongly based on the paper
*Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_.
Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
The implementation was further influenced by concepts and ideas presented
in the papers *Automatic Machine Knitting of 3D Meshes* [3]_,
*Visual Knitting Machine Programming* [4]_ and
*A Compiler for 3D Machine Knitting* [5]_.
References
----------
.. [1] Popescu, Mariana et al. *Automated Generation of Knit Patterns
for Non-developable Surfaces*
See: `Automated Generation of Knit Patterns for Non-developable
Surfaces <https://block.arch.ethz.ch/brg/files/
POPESCU_DMSP-2017_automated-generation-knit-patterns_1505737906.
pdf>`_
.. [2] Popescu, Mariana *KnitCrete - Stay-in-place knitted formworks for
complex concrete structures*
See: `KnitCrete - Stay-in-place knitted formworks for complex
concrete structures <https://block.arch.ethz.ch/brg/files/
POPESCU_2019_ETHZ_PhD_KnitCrete-Stay-in-place-knitted-fabric-
formwork-for-complex-concrete-structures_small_1586266206.pdf>`_
.. [3] Narayanan, Vidya; Albaugh, Lea; Hodgins, Jessica; Coros, Stelian;
McCann, James *Automatic Machine Knitting of 3D Meshes*
See: `Automatic Machine Knitting of 3D Meshes
<https://textiles-lab.github.io/publications/2018-autoknit/>`_
.. [4] Narayanan, Vidya; Wu, Kui et al. *Visual Knitting Machine
Programming*
See: `Visual Knitting Machine Programming
<https://textiles-lab.github.io/publications/2019-visualknit/>`_
.. [5] McCann, James; Albaugh, Lea; Narayanan, Vidya; Grow, April;
Matusik, Wojciech; Mankoff, Jen; Hodgins, Jessica
*A Compiler for 3D Machine Knitting*
See: `A Compiler for 3D Machine Knitting
<https://la.disneyresearch.com/publication/machine-knitting-
compiler/>`_
"""
# INITIALIZATION ----------------------------------------------------------
def __init__(self, data=None, **attr):
"""
Initialize a KnitNetwork (inherits NetworkX graph) with edges, name,
graph attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
network is created. The data can be an edge list, any
KnitNetworkBase or NetworkX graph object.
name : string, optional (default='')
An optional name for the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
"""
# initialize using original init method
super(KnitNetwork, self).__init__(data=data, **attr)
# also copy the mapping_network attribute if it is already available
if data and isinstance(data, KnitNetwork) and data.mapping_network:
self.mapping_network = data.mapping_network
else:
self.mapping_network = None
@classmethod
def create_from_contours(cls, contours, course_height,
reference_geometry=None):
"""
Create and initialize a KnitNetwork based on a set of contours, a
given course height and an optional reference geometry.
The reference geometry is a mesh or surface which should be described
by the network. While it is optional, it is **HIGHLY** recommended to
provide it!
Parameters
----------
contours : :obj:`list` of :class:`Rhino.Geometry.Polyline`
or :class:`Rhino.Geometry.Curve`
Ordered contours (i.e. isocurves, isolines) to initialize the
KnitNetwork with.
course_height : float
The course height for sampling the contours.
reference_geometry : :class:`Rhino.Geometry.Mesh`
or :class:`Rhino.Geometry.Surface`
Optional underlying geometry that this network is based on.
Returns
-------
KnitNetwork : KnitNetwork
A new, initialized KnitNetwork instance.
Notes
-----
This method will automatically call initialize_position_contour_edges()
on the newly created network!
Raises
------
KnitNetworkGeometryError
If a supplied contour is not a valid instance of
:obj:`Rhino.Geometry.Polyline` or :obj:`Rhino.Geometry.Curve`.
"""
# create network
network = cls(reference_geometry=reference_geometry)
# assign reference_geometry if present and valid
if reference_geometry:
if isinstance(reference_geometry, RhinoMesh):
network.graph["reference_geometry"] = reference_geometry
elif isinstance(reference_geometry, RhinoBrep):
if reference_geometry.IsSurface:
network.graph["reference_geometry"] = RhinoNurbsSurface(
reference_geometry.Surfaces[0])
elif isinstance(reference_geometry, RhinoSurface):
network.graph["reference_geometry"] = reference_geometry
else:
network.graph["reference_geometry"] = None
# divide the contours and fill network with nodes
nodenum = 0
for i, crv in enumerate(contours):
# check input
if not isinstance(crv, RhinoCurve):
if isinstance(crv, RhinoPolyline):
crv = crv.ToPolylineCurve()
else:
errMsg = ("Contour at index {} is not ".format(i) +
"a valid Curve or Polyline!")
raise KnitNetworkGeometryError(errMsg)
# compute divisioncount and divide contour
dc = round(crv.GetLength() / course_height)
tcrv = crv.DivideByCount(dc, True)
if not tcrv:
dpts = [crv.PointAtStart, crv.PointAtEnd]
else:
dpts = [crv.PointAt(t) for t in tcrv]
# loop over all nodes on the current contour
for j, point in enumerate(dpts):
# declare node attributes
vpos = i
vnum = j
if j == 0 or j == len(dpts) - 1:
vleaf = True
else:
vleaf = False
# create network node from rhino point
network.node_from_point3d(nodenum,
point,
position=vpos,
num=vnum,
leaf=vleaf,
start=False,
end=False,
segment=None,
increase=False,
decrease=False,
color=None)
# increment counter
nodenum += 1
# call position contour initialization
network.initialize_position_contour_edges()
return network
# TEXTUAL REPRESENTATION OF NETWORK ---------------------------------------
def __repr__(self):
"""
Return a textual description of the network.
Returns
-------
description : str
A textual description of the network.
"""
if self.name != "":
name = self.name
else:
name = "KnitNetwork"
nn = len(self.nodes())
ce = len(self.contour_edges)
wee = len(self.weft_edges)
wae = len(self.warp_edges)
data = ("({} Nodes, {} Position Contours, {} Weft, {} Warp)")
data = data.format(nn, ce, wee, wae)
return name + data
def ToString(self):
"""
Return a textual description of the network.
Returns
-------
description : str
A textual description of the network.
Notes
-----
Used for overloading the Grasshopper display in data parameters.
"""
return repr(self)
# INITIALIZATION OF POSITION CONTOUR EDGES --------------------------------
def initialize_position_contour_edges(self):
"""
Creates all initial position contour edges as neither 'warp' nor 'weft'
by iterating over all nodes in the network and grouping them based on
their 'position' attribute.
Notes
-----
This method is automatically called when creating a KnitNetwork using
the create_from_contours method!
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all nodes by position
posList = self.all_nodes_by_position(data=True)
for i, pos in enumerate(posList):
for j, node in enumerate(pos):
k = j + 1
if k < len(pos):
self.create_contour_edge(node, pos[k])
# INITIALIZATION OF 'WEFT' EDGES BETWEEN 'LEAF' NODES ---------------------
def initialize_leaf_connections(self):
"""
Create all initial connections of the 'leaf' nodes by iterating over
all position contours and creating 'weft' edges between the 'leaf'
nodes of the position contours.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all leaves
leafNodes = self.all_leaves_by_position(True)
# loop through all the positions leaves
for i, lpos in enumerate(leafNodes):
j = i + 1
# loop through pairs of leaves
if j < len(leafNodes):
startLeaf = lpos[0]
endLeaf = lpos[1]
nextStart = leafNodes[j][0]
nextEnd = leafNodes[j][1]
# add edges to the network
self.create_weft_edge(startLeaf, nextStart)
self.create_weft_edge(endLeaf, nextEnd)
# INITIALIZATION OF PRELIMINARY 'WEFT' EDGES ------------------------------
def attempt_weft_connection(self, node, candidate, source_nodes,
max_connections=4, verbose=False):
"""
Method for attempting a 'weft' connection to a candidate
node based on certain parameters.
Parameters
----------
node : :obj:`tuple`
2-tuple representing the source node for the possible 'weft' edge.
candidate ::obj:`tuple`
-tuple representing the target node for the possible 'weft' edge.
source_nodes : :obj:`list`
List of nodes on the position contour of node. Used to check if
the candidate node already has a connection.
max_connections : int, optional
The new 'weft' connection will only be made if the candidate nodes
number of connected neighbors is below this.
Defaults to ``4``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console.
Defaults to ``False``.
Returns
-------
bool
``True`` if the connection has been made,
``False`` otherwise.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
# get connected neighbors
connecting_neighbors = self[candidate[0]]
# only do something if the maximum is not reached
if len(connecting_neighbors) < max_connections:
# determine if the node is already connected to a node from
# the input source nodes
isConnected = False
for cn in connecting_neighbors:
if cn in [v[0] for v in source_nodes]:
isConnected = True
# print info on verbose setting
v_print("Candidate node {} is ".format(candidate[0]) +
"already connected! " +
"Skipping to next " +
"node...")
break
# check the flag and act accordingly
if not isConnected:
# print info on verbose setting
v_print("Connecting node {} to best ".format(node[0]) +
"candidate {}.".format(candidate[0]))
# if all conditions are met, make the 'weft' connection
if node[1]["position"] < candidate[1]["position"]:
self.create_weft_edge(node, candidate)
else:
self.create_weft_edge(candidate, node)
return True
else:
return False
else:
return False
def _create_initial_weft_connections(self,
contour_set,
force_continuous_start=False,
force_continuous_end=False,
max_connections=4,
precise=False,
verbose=False):
"""
Private method for creating initial 'weft' connections for the supplied
set of contours, starting from the first contour in the set and
propagating to the last contour in the set.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
if len(contour_set) < 2:
v_print("Not enough contours in contour set!")
return
# print info on verbose output
v_print("Creating initial 'weft' connections for contour set...")
# loop over all nodes of positions (list of lists of tuples)
for i, pos in enumerate(contour_set):
# pos is a list of tuples (nodes)
if i < len(contour_set):
j = i + 1
if j == len(contour_set):
break
# get initial and target nodes without 'leaf' nodes
initial_nodes = contour_set[i][1:-1]
target_nodes = contour_set[j][1:-1]
# options for continuous start and end
if force_continuous_start:
initial_nodes = initial_nodes[1:]
target_nodes = target_nodes[1:]
if force_continuous_end:
initial_nodes = initial_nodes[:-1]
target_nodes = target_nodes[:-1]
# skip if one of the contours has no nodes
if len(initial_nodes) == 0 or len(target_nodes) == 0:
continue
# define forbidden node index
forbidden_node = -1
# loop through all nodes on the current position
for k, node in enumerate(initial_nodes):
# print info on verbose setting
v_print("Processing node {} on position {}:".format(
node[0], node[1]["position"]))
# get the geometry for the current node
thisPt = node[1]["geo"]
# filtering according to forbidden nodes
target_nodes = [tn for tn in target_nodes
if tn[0] >= forbidden_node]
if len(target_nodes) == 0:
continue
# get four closest nodes on adjacent contour
if precise:
allDists = [thisPt.DistanceTo(tv[1]["geo"])
for tv in target_nodes]
else:
allDists = [thisPt.DistanceToSquared(tv[1]["geo"])
for tv in target_nodes]
# sort the target nodes by distance to current node
allDists, sorted_target_nodes = zip(
*sorted(zip(allDists,
target_nodes),
key=itemgetter(0)))
# the four closest nodes are the possible connections
possible_connections = sorted_target_nodes[:4]
# print info on verbose setting
v_print("Possible connections: {}".format(
[pc[0] for pc in possible_connections]))
# handle edge case where there is no possible
# connection or just one
if len(possible_connections) == 0:
# skip if there are no possible connections
continue
elif len(possible_connections) == 1:
# attempt to connect to only possible candidate
fCand = possible_connections[0]
res = self.attempt_weft_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand[0]
continue
# get the contours current direction
if k < len(initial_nodes)-1:
contourDir = RhinoLine(
thisPt,
initial_nodes[k+1][1]["geo"]).Direction
elif k == len(initial_nodes)-1:
contourDir = RhinoLine(
initial_nodes[k-1][1]["geo"], thisPt).Direction
contourDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"]
for pc in possible_connections]
candidateDirections = [RhinoLine(
thisPt, cp).Direction for cp in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between contour dir and possible conn dir
normals = [RhinoVector3d.CrossProduct(
contourDir, cd) for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(
contourDir, cd, n) for cd, n in zip(
candidateDirections, normals)]
# compute deltas as a mesaure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort possible connections by distance, then by delta
allDists, deltas, angles, most_perpendicular = zip(
*sorted(zip(
allDists,
deltas,
angles,
possible_connections[:]),
key=itemgetter(0, 1)))
# get node neighbors
nNeighbors = self[node[0]]
# compute angle difference
aDelta = angles[0] - angles[1]
# CONNECTION FOR LEAST ANGLE CHANGE -----------------------
if len(nNeighbors) > 2 and aDelta < radians(6.0):
# print info on verbose setting
v_print("Using procedure for least angle " +
"change connection...")
# get previous connected edge and its direction
prevEdges = self.node_weft_edges(node[0], data=True)
if len(prevEdges) > 1:
raise KnitNetworkError(
"More than one previous 'weft' connection! " +
"This was unexpeced...")
prevDir = prevEdges[0][2]["geo"].Direction
else:
prevDir = prevEdges[0][2]["geo"].Direction
prevDir.Unitize()
# get directions for the best two candidates
mpA = most_perpendicular[0]
mpB = most_perpendicular[1]
dirA = RhinoLine(thisPt, mpA[1]["geo"]).Direction
dirB = RhinoLine(thisPt, mpB[1]["geo"]).Direction
dirA.Unitize()
dirB.Unitize()
# get normals for angle measurement
normalA = RhinoVector3d.CrossProduct(prevDir, dirA)
normalB = RhinoVector3d.CrossProduct(prevDir, dirB)
# measure the angles
angleA = RhinoVector3d.VectorAngle(
prevDir,
dirA,
normalA)
angleB = RhinoVector3d.VectorAngle(
prevDir,
dirB,
normalB)
# select final candidate for connection by angle
if angleA < angleB:
fCand = mpA
else:
fCand = mpB
# attempt to connect to final candidate
res = self.attempt_weft_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node for next pass
if res:
forbidden_node = fCand[0]
# CONNECTION FOR MOST PERPENDICULAR --------------------
else:
# print info on verbose setting
v_print("Using procedure for most " +
"perpendicular connection...")
# define final candidate
fCand = most_perpendicular[0]
# attempt to connect to final candidate node
res = self.attempt_weft_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node if connection has been made
if res:
forbidden_node = fCand[0]
def _create_second_pass_weft_connections(self,
contour_set,
include_leaves=False,
least_connected=False,
precise=False,
verbose=False):
"""
Private method for creating second pass 'weft' connections for the
given set of contours.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
v_print = print if verbose else lambda *a, **k: None
# get attributes only once
position_attributes = nx.get_node_attributes(self, "position")
num_attributes = nx.get_node_attributes(self, "num")
if len(contour_set) < 2:
v_print("Not enough contours in contour set!")
return
# print info on verbose output
v_print("Creating second pass 'weft' connections for contour set...")
# loop over all nodes of positions (list of lists of tuples)
for i, pos in enumerate(contour_set):
# get initial nodes
initial_nodes = contour_set[i]
# get target position candidates
if (i > 0 and i < len(contour_set)-1 and
i != 0 and i != len(contour_set)-1):
target_positionA = contour_set[i-1][0][1]["position"]
target_positionB = contour_set[i+1][0][1]["position"]
elif i == 0:
target_positionA = None
target_positionB = contour_set[i+1][0][1]["position"]
elif i == len(contour_set)-1:
target_positionA = contour_set[i-1][0][1]["position"]
target_positionB = None
# loop through all nodes on current position
for k, node in enumerate(initial_nodes):
# print info on verbose setting
v_print(
"Processing node {} on position {}:".format(
node[0], node[1]["position"]))
# get connecting edges on target position
conWeftEdges = self.node_weft_edges(node[0], data=True)
conPos = []
if len(conWeftEdges) == 0 and verbose:
# print info on verbose setting
v_print("No previously connected weft edges...")
for weftEdge in conWeftEdges:
weftEdgeFrom = weftEdge[0]
weftEdgeTo = weftEdge[1]
if weftEdgeFrom != node[0]:
posEdgeTarget = position_attributes[weftEdgeFrom]
elif weftEdgeTo != node[0]:
posEdgeTarget = position_attributes[weftEdgeTo]
if posEdgeTarget not in conPos:
conPos.append(posEdgeTarget)
# select target position and continue in edge case scenarios
target_positions = []
if target_positionA == None:
if target_positionB in conPos:
v_print("Node is connected. Skipping...")
continue
target_positions.append(target_positionB)
elif target_positionB == None:
if target_positionA in conPos:
v_print("Node is connected. Skipping...")
continue
target_positions.append(target_positionA)
elif ((target_positionA in conPos) and
(target_positionB in conPos)):
v_print("Node is connected. Skipping...")
continue
elif ((target_positionB in conPos) and
(target_positionA not in conPos)):
target_positions.append(target_positionA)
elif ((target_positionA in conPos) and
(target_positionB not in conPos)):
target_positions.append(target_positionB)
elif (target_positionA != None and
target_positionB != None and len(conPos) == 0):
target_positions = [target_positionA, target_positionB]
# print info on verbose setting
if verbose and len(target_positions) > 1:
v_print("Two target positions: {}, {}".format(
*target_positions))
elif verbose and len(target_positions) == 1:
v_print("Target position: {}".format(target_positions[0]))
# skip if there are no target positions
if len(target_positions) == 0:
v_print("No target position! Skipping...")
continue
# only proceed if there is a target position
for target_position in target_positions:
# get target nodes
target_nodes = self.nodes_on_position(
target_position, True)
# get the point geo of this node
thisPt = node[1]["geo"]
# get a window of possible connections on the target
# position by looking for the previos node on this contour
# connected to target position, then propagating along
# the target position to the next node that is connected
# to this position. these two nodes will define the window
# NOTE: the current node should never have a connection
# to target position (theoretically!), otherwise it should
# have fallen through the checks by now
# print info on verbose setting
v_print("Target position is {}. ".format(target_position) +
"Computing window...")
# get the previous node on this contour
prevNode = initial_nodes[k-1]
# assume that the previous node has a connection
prevCon = self.node_weft_edges(prevNode[0], data=True)
# get possible connections from previous connection
possible_connections = []
for edge in prevCon:
edgeFrom = edge[0]
edgeTo = edge[1]
if edgeFrom != prevNode[0]:
prevNodeTargetPos = position_attributes[edgeFrom]
prevNodeTargetIndex = num_attributes[edgeFrom]
elif edgeTo != prevNode[0]:
prevNodeTargetPos = position_attributes[edgeTo]
prevNodeTargetIndex = num_attributes[edgeTo]
if prevNodeTargetPos == target_position:
possible_connections.append(
target_nodes[prevNodeTargetIndex])
# the farthest connection of the previous node is the first
# point for our window
if len(possible_connections) > 1:
possible_connections.sort(key=lambda x: x[1]["num"])
possible_connections.reverse()
start_of_window = possible_connections[0]
elif len(possible_connections) == 1:
start_of_window = possible_connections[0]
elif len(possible_connections) == 0:
# print info on verbose setting
v_print("No possible connection, skipping...")
continue
# get the next node on this pos that is
# connected to target position
if k < len(initial_nodes)-1:
future_nodes = initial_nodes[k+1:]
for futurenode in future_nodes:
filteredWeftEdges = []
futureWeftEdges = self.node_weft_edges(
futurenode[0], data=True)
for futureweft in futureWeftEdges:
fwn = (futureweft[1], self.node[futureweft[1]])
fwn_pos = fwn[1]["position"]
fwn_num = fwn[1]["num"]
if (fwn_pos == target_position and
fwn_num == start_of_window[1]["num"]):
# if the start of the window is found,
# it is the only possible connection
filteredWeftEdges = [futureweft]
break
if (fwn_pos == target_position and
fwn_num > start_of_window[1]["num"]):
filteredWeftEdges.append(futureweft)
else:
continue
if (not filteredWeftEdges or
len(filteredWeftEdges) == 0):
end_of_window = None
continue
# sort the filtered weft edges based on the 'num'
# attribute of their target node
filteredWeftEdges.sort(
key=lambda x: self.node[x[1]]["num"])
# get the end of the window from the first edge on
# the target position
end_of_window = (
filteredWeftEdges[0][1],
self.node[filteredWeftEdges[0][1]])
break
else:
end_of_window = None
# define the window
if end_of_window == None:
window = [start_of_window]
elif end_of_window == start_of_window:
window = [start_of_window]
else:
window = [(n, d) for n, d
in self.nodes_iter(data=True)
if n >= start_of_window[0]
and n <= end_of_window[0]]
if len(window) == 0:
# print info on verbose setting
v_print("Length of window is 0, skipping...")
elif len(window) == 1:
# print info on verbose setting
v_print("Window has only one node.")
v_print("Connecting to node {}".format(window[0][0]) +
" on position {}...".format(
window[0][1]["position"]))
# connect weft edge
if node[1]["position"] < window[0][1]["position"]:
self.create_weft_edge(node, window[0])
else:
self.create_weft_edge(window[0], node)
else:
# print info on verbose setting
v_print("Processing window nodes: {}".format(
[w[0] for w in window]))
# sort nodes in window by distance
if precise:
allDists = [thisPt.DistanceTo(pc[1]["geo"])
for pc in window]
else:
allDists = [thisPt.DistanceToSquared(pc[1]["geo"])
for pc in window]
allDists, window = zip(*sorted(zip(allDists, window),
key=itemgetter(0)))
if least_connected:
wn_count = [len(self[n[0]]) for n in window]
wn_count, allDists, window = zip(
*sorted(zip(allDists, wn_count, window),
key=itemgetter(0, 1)))
# set final candidate node
fCand = window[0]
else:
# get the contours current direction
if k < len(initial_nodes)-1:
contourDir = RhinoLine(
thisPt,
initial_nodes[k+1][1]["geo"]).Direction
elif k == len(initial_nodes)-1:
contourDir = RhinoLine(
initial_nodes[k-1][1]["geo"],
thisPt).Direction
contourDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"]
for pc in window]
candidateDirections = [
RhinoLine(thisPt, cp).Direction
for cp in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between contour dir and window dir
normals = [RhinoVector3d.CrossProduct(
contourDir, cd)
for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(
contourDir, cd, n) for cd, n in zip(
candidateDirections, normals)]
# compute deltas as a mesaure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort window by distance, then by delta
allDists, deltas, most_perpendicular = zip(*sorted(
zip(allDists,
deltas,
window),
key=itemgetter(0, 1)))
# set final candidate node for connection
fCand = most_perpendicular[0]
# print info on verbose setting
v_print("Connecting to node " +
"{} on position {}...".format(
fCand[0],
fCand[1]["position"]))
# connect weft edge to best target
if node[1]["position"] < fCand[1]["position"]:
self.create_weft_edge(node, fCand)
else:
self.create_weft_edge(fCand, node)
def initialize_weft_edges(self,
start_index=None,
propagate_from_center=False,
force_continuous_start=False,
force_continuous_end=False,
angle_threshold=radians(6.0),
max_connections=4,
least_connected=False,
precise=False,
verbose=False):
"""
Attempts to create all the preliminary 'weft' connections for the
network.
Parameters
----------
start_index : int, optional
This value defines at which index the list of contours is split.
If no index is supplied, will split the list at the longest
contour.
Defaults to ``None``.
propagate_from_center : bool, optional
If ``True``, will propagate left and right set of contours from
the center contour defined by start_index or the longest contour
( < | > ). Otherwise, the propagation of the contours left to the
center will start at the left boundary ( > | > ).
Defaults to ``False``
force_continuous_start : bool, optional
If ``True``, forces the first row of stitches to be continuous.
Defaults to ``False``.
force_continuous_end : bool, optional
If ``True``, forces the last row of stitches to be continuous.
Defaults to ``False``.
max_connections : int, optional
The maximum connections a node is allowed to have to be considered
for an additional 'weft' connection.
Defaults to ``4``.
least_connected : bool, optional
If ``True``, uses the least connected node from the found
candidates.
Defaults to ``False``
precise : bool, optional
If ``True``, the distance between nodes will be calculated using
the Rhino.Geometry.Point3d.DistanceTo method, otherwise the much
faster Rhino.Geometry.Point3d.DistanceToSquared method is used.
Defaults to ``False``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console. Great for
debugging and analysis.
Defaults to ``False``.
Raises
------
KnitNetworkError
If the supplied splitting index is too high.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all the positions / contours
AllPositions = self.all_nodes_by_position(data=True)
if start_index == None:
# get index of longest contour
start_index = self.longest_position_contour()[0]
elif start_index >= len(AllPositions):
raise KnitNetworkError("Supplied splitting index is too high!")
# if continuous start is True, connect the whole first row
if force_continuous_start:
chain = [pos[1] for pos in AllPositions]
for pair in pairwise(chain):
self.create_weft_edge(pair[0], pair[1])
# if continuous end is True, connect the whole last row
if force_continuous_end:
chain = [pos[-2] for pos in AllPositions]
for pair in pairwise(chain):
self.create_weft_edge(pair[0], pair[1])
# split position list into two sets based on start index
leftContours = AllPositions[0:start_index+1]
# optional propagation from center
# NOTE: this has shown problems / weird stitch geometries
if propagate_from_center:
leftContours.reverse()
rightContours = AllPositions[start_index:]
# create the initial weft connections
self._create_initial_weft_connections(
leftContours,
force_continuous_start=force_continuous_start,
force_continuous_end=force_continuous_end,
max_connections=max_connections,
precise=precise,
verbose=verbose)
self._create_initial_weft_connections(
rightContours,
force_continuous_start=force_continuous_start,
force_continuous_end=force_continuous_end,
max_connections=max_connections,
precise=precise,
verbose=verbose)
# create second pass weft connections
self._create_second_pass_weft_connections(
leftContours,
least_connected,
precise=precise,
verbose=verbose)
self._create_second_pass_weft_connections(
rightContours,
least_connected,
precise=precise,
verbose=verbose)
return True
# INITIALIZATION OF PRELIMINARY 'WARP' EDGES ------------------------------
def initialize_warp_edges(self, contour_set=None, verbose=False):
"""
Method for initializing first 'warp' connections once all preliminary
'weft' connections are made.
Parameters
----------
contour_set : :obj:`list`, optional
List of lists of nodes to initialize 'warp' edges. If none are
supplied, all nodes ordered by thei 'position' attributes are
used.
Defaults to ``None``.
verbose : bool, optional
If ``True``, will print verbose output to the console.
Defaults to ``False``.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# if no contour set is provided, use all contours of this network
if contour_set == None:
contour_set = self.all_nodes_by_position(data=True)
# loop through all positions in the set of contours
for i, pos in enumerate(contour_set):
# get all nodes on current contour
initial_nodes = contour_set[i]
# loop through all nodes on this contour
for k, node in enumerate(initial_nodes):
connected_edges = self.edges(node[0], data=True)
numweft = len(self.node_weft_edges(node[0]))
if (len(connected_edges) > 4 or numweft > 2
or i == 0 or i == len(contour_set)-1):
# set 'end' attribute for this node
self.node[node[0]]["end"] = True
# loop through all candidate edges
for j, edge in enumerate(connected_edges):
# if it's not a 'weft' edge, assign attributes
if not edge[2]["weft"]:
connected_node = edge[1]
# set 'end' attribute to conneted node
self.node[connected_node]["end"] = True
# set 'warp' attribute to current edge
self[edge[0]][edge[1]]["warp"] = True
# ASSIGNING OF 'SEGMENT' ATTRIBUTES FOR MAPPING NETWORK -------------------
def _traverse_weft_edge_until_end(self, start_end_node, start_node,
seen_segments, way_nodes=None,
way_edges=None, end_nodes=None):
"""
Private method for traversing a path of 'weft' edges until another
'end' node is discoverd.
"""
# initialize output lists
if way_nodes == None:
way_nodes = deque()
way_nodes.append(start_node[0])
if way_edges == None:
way_edges = deque()
if end_nodes == None:
end_nodes = deque()
# get the connected edges and filter them, sort out the ones that
# already have a 'segment' attribute assigned
connected_weft_edges = self.node_weft_edges(start_node[0], data=True)
filtered_weft_edges = []
for cwe in connected_weft_edges:
if cwe[2]["segment"] != None:
continue
if cwe in way_edges:
continue
elif (cwe[1], cwe[0], cwe[2]) in way_edges:
continue
filtered_weft_edges.append(cwe)
if len(filtered_weft_edges) > 1:
print(filtered_weft_edges)
print("More than one filtered candidate weft edge! " +
"Segment complete...?")
elif len(filtered_weft_edges) == 1:
fwec = filtered_weft_edges[0]
connected_node = (fwec[1], self.node[fwec[1]])
# if the connected node is an end node, the segment is finished
if connected_node[1]["end"]:
# find out which order to set segment attributes
if start_end_node > connected_node[0]:
segStart = connected_node[0]
segEnd = start_end_node
else:
segStart = start_end_node
segEnd = connected_node[0]
if (segStart, segEnd) in seen_segments:
segIndex = len([s for s in seen_segments
if s == (segStart, segEnd)])
else:
segIndex = 0
# append the relevant data to the lists
end_nodes.append(connected_node[0])
way_edges.append(fwec)
seen_segments.append((segStart, segEnd))
# set final 'segment' attributes to all the way nodes
for waynode in way_nodes:
self.node[waynode]["segment"] = (segStart,
segEnd,
segIndex)
# set final 'segment' attributes to all the way edges
for wayedge in way_edges:
self[wayedge[0]][wayedge[1]]["segment"] = (segStart,
segEnd,
segIndex)
# return the seen segments
return seen_segments
else:
# set the initial segment attribute to the node
self.node[connected_node[0]]["segment"] = (start_end_node,
None,
None)
# set the initial segment attribute to the edge
self[fwec[0]][fwec[1]]["segment"] = (start_end_node,
None,
None)
# append the relevant data to the lists
way_nodes.append(connected_node[0])
way_edges.append(fwec)
# call this method recursively until a 'end' node is found
return self._traverse_weft_edge_until_end(
start_end_node,
connected_node,
seen_segments,
way_nodes,
way_edges,
end_nodes)
else:
return seen_segments
def traverse_weft_edges_and_set_attributes(self, start_end_node):
"""
Traverse a path of 'weft' edges starting from an 'end' node until
another 'end' node is discovered. Set 'segment' attributes to nodes
and edges along the way.
start_end_node : :obj:`tuple`
2-tuple representing the node to start the traversal.
"""
# get connected weft edges and sort them by their connected node
weft_connections = self.node_weft_edges(start_end_node[0], data=True)
weft_connections.sort(key=lambda x: x[1])
# loop through all connected weft edges
seen_segments = []
for cwe in weft_connections:
# check if connected weft edge already has a segment attribute
if cwe[2]["segment"]:
continue
# get connected node
connected_node = (cwe[1], self.node[cwe[1]])
# check the connected node. if it is an end node, we are done
if connected_node[1]["end"]:
# get segment start and end
if start_end_node[0] > connected_node[0]:
segStart = connected_node[0]
segEnd = start_end_node[0]
else:
segStart = start_end_node[0]
segEnd = connected_node[0]
# get segment index
if (segStart, segEnd) in seen_segments:
segIndex = len([s for s in seen_segments
if s == (segStart, segEnd)])
else:
segIndex = 0
# set the final segment attribute to the edge
self[cwe[0]][cwe[1]]["segment"] = (segStart, segEnd, segIndex)
seen_segments.append((segStart, segEnd))
# if the connected node is not an end node, we need to travel
# until we find one
else:
seen_segments = self._traverse_weft_edge_until_end(
start_end_node[0],
connected_node,
seen_segments,
way_edges=[cwe])
def assign_segment_attributes(self):
"""
Get the segmentation for loop generation and assign 'segment'
attributes to 'weft' edges and nodes.
"""
if len(self.weft_edges) == 0:
errMsg = ("No 'weft' edges in KnitNetwork! Segmentation " +
"is impossible.")
raise NoWeftEdgesError(errMsg)
if len(self.end_nodes) == 0:
errMsg = ("No 'end' nodes in KnitNetwork! Segmentation " +
"is impossible.")
raise NoEndNodesError(errMsg)
# remove contour and 'warp' edges and store them
warp_storage = []
contour_storage = []
for edge in self.edges(data=True):
if not edge[2]["weft"]:
if edge[2]["warp"]:
warp_storage.append(edge)
else:
contour_storage.append(edge)
self.remove_edge(edge[0], edge[1])
# get all 'end' nodes ordered by their 'position' attribute
all_ends_by_position = self.all_ends_by_position(data=True)
# loop through all 'end' nodes
for position in all_ends_by_position:
for endnode in position:
self.traverse_weft_edges_and_set_attributes(endnode)
# add all previously removed edges back into the network
[self.add_edge(edge[0], edge[1], attr_dict=edge[2])
for edge in warp_storage + contour_storage]
# CREATION OF MAPPING NETWORK ---------------------------------------------
def create_mapping_network(self):
"""
Creates the corresponding mapping network for the final loop generation
from a KnitNetwork instance with fully assigned 'segment' attributes.
The created mapping network will be part of the KnitNetwork instance.
It can be accessed using the mapping_network property.
Notes
-----
All nodes without an 'end' attribute as well as all 'weft' edges are
removed by this step. Final nodes as well as final 'weft' and 'warp'
edges can only be created using the mapping network.
Returns
-------
success : bool
``True`` if the mapping network has been successfully created,
``False`` otherwise.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# create a new KnitMappingNetwork instance
MappingNetwork = KnitMappingNetwork()
# get all edges of the current network by segment
weft_edges = sorted(self.weft_edges, key=lambda x: x[2]["segment"])
warp_edges = self.warp_edges
# initialize deque container for segment ids
segment_ids = deque()
# loop through all 'weft' edges and fill container with unique ids
for edge in weft_edges:
segment_id = edge[2]["segment"]
if segment_id not in segment_ids:
segment_ids.append(segment_id)
# error checking
if len(segment_ids) == 0:
errMsg = (
"The network contains no 'weft' edges with a 'segment' " +
"attribute assigned to them. A KnitMappingNetwork can " +
"only be created from a KnitNetwork with initialized " +
"'weft' edges for courses and corresponding 'warp' " +
"edges connecting their 'end' nodes.")
raise NoWeftEdgesError(errMsg)
# loop through all unique segment ids
for id in segment_ids:
# get the corresponding edges for this id and sort them
segment_edges = [e for e in weft_edges if e[2]["segment"] == id]
segment_edges.sort(key=lambda x: x[0])
# extract start and end nodes
start_node = (id[0], self.node[id[0]])
endNode = (id[1], self.node[id[1]])
# get all the geometry of the individual edges
segment_geo = [e[2]["geo"] for e in segment_edges]
# create a segment contour edge in the mapping network
res = MappingNetwork.create_segment_contour_edge(
start_node,
endNode,
id,
segment_geo)
if not res:
errMsg = ("SegmentContourEdge at segment id {} could not be " +
"created!")
raise KnitNetworkError(errMsg)
# add all warp edges to the mapping network to avoid lookup hassle
for warp_edge in warp_edges:
if warp_edge[0] > warp_edge[1]:
warp_from = warp_edge[1]
warp_to = warp_edge[0]
else:
warp_from = warp_edge[0]
warp_to = warp_edge[1]
MappingNetwork.add_edge(warp_from, warp_to, attr_dict=warp_edge[2])
# set mapping network property for this instance
self.mapping_network = MappingNetwork
# ditch all edges that are not 'warp' and nodes without 'end' attribute
[self.remove_node(n) for n, d in self.nodes_iter(data=True)
if not d["end"]]
[self.remove_edge(s, e) for s, e, d in self.edges_iter(data=True)
if not d["warp"]]
return True
# MAPPING NETWORK PROPERTY ------------------------------------------------
def _get_mapping_network(self):
"""
Gets the associated mapping network for this KnitNetwork instance.
"""
return self._mapping_network
def _set_mapping_network(self, mapping_network):
"""
Setter for this instance's associated mapping network.
"""
# set mapping network to instance
if (isinstance(mapping_network, KnitMappingNetwork)
or mapping_network == None):
self._mapping_network = mapping_network
else:
raise ValueError("Input is not of type KnitMappingNetwork!")
mapping_network = property(_get_mapping_network,
_set_mapping_network,
None,
"The associated mapping network of this " +
"KnitNetwork instance.")
# RETRIEVAL OF NODES AND EDGES FROM MAPPING NETWORK -----------------------
def all_nodes_by_segment(self, data=False, edges=False):
"""
Returns all nodes of the network ordered by 'segment' attribute.
Note: 'end' nodes are not included!
Parameters
----------
data : bool, optional
If ``True``, the nodes contained in the output will be represented
as 2-tuples in the form of (node_identifier, node_data).
Defaults to ``False``
edges : bool, optional
If ``True``, the returned output list will contain 3-tuples in the
form of (segment_value, segment_nodes, segment_edge).
Defaults to ``False``.
Returns
-------
nodes_by_segment : :obj:`list` of :obj:`tuple`
List of 2-tuples in the form of (segment_value, segment_nodes) or
3-tuples in the form of (segment_value, segment_nodes,
segment_edge) depending on the ``edges`` argument.
Raises
------
MappingNetworkError
If the mapping network is not available for this instance.
"""
# retrieve mappingnetwork
mapnet = self.mapping_network
if not mapnet:
errMsg = ("Mapping network has not been built for this instance!")
raise MappingNetworkError(errMsg)
allSegments = mapnet.segment_contour_edges
allSegmentNodes = [(n, d) for n, d
in self.nodes_iter(data=True) if d["segment"]]
segdict = {}
for n in allSegmentNodes:
if n[1]["segment"] not in segdict:
segdict[n[1]["segment"]] = [n]
else:
segdict[n[1]["segment"]].append(n)
anbs = []
if data and edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, segnodes, segment))
elif data and not edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, segnodes))
elif not data and edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, [sn[0] for sn in segnodes], segment))
elif not data and not edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, [sn[0] for sn in segnodes]))
return anbs
# STITCH WIDTH SAMPLING ---------------------------------------------------
def sample_segment_contours(self, stitch_width):
"""
Samples the segment contours of the mapping network with the given
stitch width. The resulting points are added to the network as nodes
and a 'segment' attribute is assigned to them based on their origin
segment contour edge.
Parameters
----------
stitch_width : float
The width of a single stitch inside the knit.
Raises
------
MappingNetworkError
If the mapping network is not available for this instance.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# retrieve mapping network
mapnet = self.mapping_network
if not mapnet:
errMsg = ("Mapping network has not been built for this " +
"instance, sampling segment contours is impossible!")
raise MappingNetworkError(errMsg)
# get the highest index of all the nodes in the network
maxNode = max(self.nodes())
# get all the segment geometry ordered by segment number
segment_contours = mapnet.segment_contour_edges
# sample all segments with the stitch width
nodeindex = maxNode + 1
for i, seg in enumerate(segment_contours):
# get the geometry of the contour and reparametreize its domain
geo = seg[2]["geo"]
geo = geo.ToPolylineCurve()
geo.Domain = RhinoInterval(0.0, 1.0)
# compute the division points
crvlen = geo.GetLength()
density = int(round(crvlen / stitch_width))
if density == 0:
continue
divT = geo.DivideByCount(density, False)
divPts = [geo.PointAt(t) for t in divT]
# set leaf attribute
# TODO: better leaf strategy - this works but assigns false
# leaf nodes. usually not a problem but it should be fixed anyway
if self.node[seg[0]]["leaf"] and self.node[seg[1]]["leaf"]:
nodeLeaf = True
else:
nodeLeaf = False
# add all the nodes to the network
for j, pt in enumerate(divPts):
# add node to network
self.node_from_point3d(
nodeindex,
pt,
position=None,
num=j,
leaf=nodeLeaf,
start=False,
end=False,
segment=seg[2]["segment"],
increase=False,
decrease=False,
color=None)
# increment node index
nodeindex += 1
# CREATION OF FINAL 'WEFT' CONNECTIONS ------------------------------------
def create_final_weft_connections(self):
"""
Loop through all the segment contour edges and create all 'weft'
connections for this network.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all nodes by segment contour
SegmentValues, AllNodesBySegment = zip(*self.all_nodes_by_segment(
data=True))
# loop through all the segment contours
for i, segment in enumerate(AllNodesBySegment):
segval = SegmentValues[i]
firstNode = (segval[0], self.node[segval[0]])
lastNode = (segval[1], self.node[segval[1]])
if len(segment) == 0:
self.create_weft_edge(firstNode, lastNode, segval)
elif len(segment) == 1:
self.create_weft_edge(firstNode, segment[0], segval)
self.create_weft_edge(segment[0], lastNode, segval)
else:
# loop through all nodes on the current segment and create
# the final 'weft' edges
for j, node in enumerate(segment):
if j == 0:
self.create_weft_edge(firstNode, node, segval)
self.create_weft_edge(node, segment[j+1], segval)
elif j < len(segment)-1:
self.create_weft_edge(node, segment[j+1], segval)
elif j == len(segment)-1:
self.create_weft_edge(node, lastNode, segval)
# CREATION OF FINAL 'WARP' CONNECTIONS ------------------------------------
def attempt_warp_connection(self, node, candidate, source_nodes,
max_connections=4, verbose=False):
"""
Method for attempting a 'warp' connection to a candidate
node based on certain parameters.
Parameters
----------
node : node
The starting node for the possible 'weft' edge.
candidate : node
The target node for the possible 'weft' edge.
source_nodes : :obj:`list`
List of nodes on the position contour of node. Used to check if
the candidate node already has a connection.
max_connections : int, optional
The new 'weft' connection will only be made if the candidate nodes
number of connected neighbors is below this.
Defaults to ``4``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console.
Defaults to ``False``.
Returns
-------
result : bool
True if the connection has been made, otherwise false.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
connecting_neighbors = self[candidate[0]]
if len(connecting_neighbors) < max_connections:
isConnected = False
for cn in connecting_neighbors:
if cn in [v[0] for v in source_nodes]:
isConnected = True
# print info on verbose setting
v_print("Candidate node {} is ".format(candidate[0]) +
"already connected! Skipping to next node...")
break
if not isConnected:
# print info on verbose setting
v_print("Connecting node {} to best candidate {}.".format(
node[0],
candidate[0]))
# finally create the warp edge for good
self.create_warp_edge(node, candidate)
return True
else:
return False
else:
return False
def _create_initial_warp_connections(self, segment_pair, max_connections=4,
precise=False, verbose=False):
"""
Private method for creating first pass 'warp' connections for the
supplied pair of segment chains.
The pair is only defined as a list of nodes, the nodes have to be
supplied with their attribute data!
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
if len(segment_pair) < 2:
v_print("Not enough contour segments in supplied set!")
return
# print info on verbose output
v_print("Creating initial 'warp' connections for contour set...")
# get initial and target nodes without 'end' nodes
initial_nodes = segment_pair[0]
target_nodes = segment_pair[1]
# define forbidden node index
forbidden_node = -1
# do nothing if one of the sets is empty
if len(initial_nodes) == 0 or len(target_nodes) == 0:
return
# loop through all nodes on the current segment
for k, node in enumerate(initial_nodes):
# get geometry from current node
thisPt = node[1]["geo"]
# print info on verbose setting
v_print("Processing node {} on segment {}:".format(
node[0],
node[1]["segment"]))
# filtering according to forbidden nodes
if forbidden_node != -1:
target_nodes = [tnode for tx, tnode in enumerate(target_nodes)
if tx >= target_nodes.index(forbidden_node)]
if len(target_nodes) == 0:
continue
# compute distances to target nodes
if precise:
allDists = [thisPt.DistanceTo(tn[1]["geo"])
for tn in target_nodes]
else:
allDists = [thisPt.DistanceToSquared(tn[1]["geo"])
for tn in target_nodes]
# sort nodes after distances
allDists, sorted_target_nodes = zip(*sorted(
zip(allDists, target_nodes),
key=itemgetter(0)))
# the four nearest nodes are the possible connections
possible_connections = sorted_target_nodes[:4]
# print info on verbose setting
v_print("Possible connections: {}".format([pc[0] for pc in
possible_connections]))
# handle edge case where there is no possible connection or just
# one
if len(possible_connections) == 0:
continue
elif len(possible_connections) == 1:
# attempt to connect to only possible candidate
fCand = possible_connections[0]
res = self.attempt_warp_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand
continue
# get the segment contours current direction
if k < len(initial_nodes)-1:
contourDir = RhinoLine(thisPt,
initial_nodes[k+1][1]["geo"]).Direction
elif k == len(initial_nodes)-1:
contourDir = RhinoLine(
initial_nodes[k-1][1]["geo"], thisPt).Direction
contourDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"] for pc in possible_connections]
candidateDirections = [RhinoLine(
thisPt, cp).Direction for cp in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between segment contour dir and possible conn dir
normals = [RhinoVector3d.CrossProduct(
contourDir, cd) for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(
contourDir, cd, n) for cd, n in zip(
candidateDirections, normals)]
# compute deltas as a measure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort possible connections first by distance, then by delta
(allDists,
deltas,
angles,
most_perpendicular) = zip(*sorted(zip(allDists,
deltas,
angles,
possible_connections[:]),
key=itemgetter(0, 1)))
# compute angle difference
aDelta = angles[0] - angles[1]
# get node neighbors
nNeighbors = self[node[0]]
# CONNECTION FOR LEAST ANGLE CHANGE -------------------------------
if len(nNeighbors) > 2 and aDelta < radians(6.0):
# print info on verbose setting
v_print("Using procedure for least angle " +
"change connection...")
# get previous connected edge and its direction
prevEdges = self.node_warp_edges(node[0], data=True)
if len(prevEdges) > 1:
print("More than one previous " +
"'warp' connection! This was unexpected..." +
"Taking the first one..?")
prevDir = prevEdges[0][2]["geo"].Direction
else:
prevDir = prevEdges[0][2]["geo"].Direction
prevDir.Unitize()
# get directions for the best two candidates
mpA = most_perpendicular[0]
mpB = most_perpendicular[1]
dirA = RhinoLine(thisPt, mpA[1]["geo"]).Direction
dirB = RhinoLine(thisPt, mpB[1]["geo"]).Direction
dirA.Unitize()
dirB.Unitize()
# get normals for angle measurement
normalA = RhinoVector3d.CrossProduct(prevDir, dirA)
normalB = RhinoVector3d.CrossProduct(prevDir, dirB)
# measure the angles
angleA = RhinoVector3d.VectorAngle(prevDir, dirA, normalA)
angleB = RhinoVector3d.VectorAngle(prevDir, dirB, normalB)
# select final candidate for connection
if angleA < angleB:
fCand = mpA
else:
fCand = mpB
# attempt connection to final candidate
res = self.attempt_warp_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand
continue
# CONNECTION FOR MOST PERPENDICULAR -------------------------------
else:
# print info on verbose setting
v_print("Using procedure for most " +
"perpendicular connection...")
# define final candidate node
fCand = most_perpendicular[0]
# attempt connection to final candidate
res = self.attempt_warp_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand
def _create_second_pass_warp_connection(self, source_nodes, source_index,
window, precise=False,
verbose=False, reverse=False):
"""
Private method for creating second pass 'warp' connections for the
given set of contours.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
if len(window) == 0:
# print info on verbose setting
v_print("Length of window is 0, skipping...")
elif len(window) == 1:
# print info on verbose setting
v_print("Window has only one node.")
v_print("Connecting to node {}.".format(window[0][0]))
# connect 'warp' edge
if reverse:
self.create_warp_edge(window[0], source_nodes[source_index])
else:
self.create_warp_edge(source_nodes[source_index], window[0])
else:
# retrive the point of the current source node
thisPt = source_nodes[source_index][1]["geo"]
# print info on verbose setting
v_print("Processing window nodes: {}".format(
[w[0] for w in window]))
# sort nodes in window by distance
if precise:
allDists = [thisPt.DistanceTo(pc[1]["geo"])
for pc in window]
else:
allDists = [thisPt.DistanceToSquared(pc[1]["geo"])
for pc in window]
allDists, window = zip(*sorted(zip(allDists, window),
key=itemgetter(0)))
# get the contours current direction
if source_index < len(source_nodes)-1:
sourceDir = RhinoLine(
thisPt,
source_nodes[source_index+1][1]["geo"]).Direction
elif source_index == len(source_nodes)-1:
sourceDir = RhinoLine(source_nodes[source_index-1][1]["geo"],
thisPt).Direction
sourceDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"] for pc in window]
candidateDirections = [RhinoLine(thisPt, cp).Direction for cp
in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between contour dir and window dir
normals = [RhinoVector3d.CrossProduct(sourceDir, cd)
for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(sourceDir, cd, n) for cd, n
in zip(candidateDirections, normals)]
# compute deltas as a mesaure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort window by distance, then by delta
allDists, deltas, most_perpendicular = zip(*sorted(
zip(allDists,
deltas,
window),
key=itemgetter(0, 1)))
# set final candidate node for connection
fCand = most_perpendicular[0]
# print info on verbose setting
v_print("Connecting to node " +
"{} on segment {}...".format(fCand[0],
fCand[1]["segment"]))
# connect warp edge to best target
if reverse:
self.create_warp_edge(fCand, source_nodes[source_index])
else:
self.create_warp_edge(source_nodes[source_index], fCand)
def create_final_warp_connections(self, max_connections=4,
include_end_nodes=True, precise=False,
verbose=False):
"""
Create the final 'warp' connections by building chains of segment
contour edges and connecting them.
For each source chain, a target chain is found using an
'educated guessing' strategy. This means that the possible target
chains are guessed by leveraging known topology facts about the network
and its special 'end' nodes.
Parameters
----------
max_connections : int, optional
The number of maximum previous connections a candidate node for a
'warp' connection is allowed to have.
Defaults to ``4``.
include_end_nodes : bool, optional
If ``True``, 'end' nodes between adjacent segment contours in a
source chain will be included in the first pass of connecting
'warp' edges.
Defaults to ``True``.
precise : bool
If ``True``, the distance between nodes will be calculated using
the Rhino.Geometry.Point3d.DistanceTo method, otherwise the much
faster Rhino.Geometry.Point3d.DistanceToSquared method is used.
Defaults to ``False``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console. Great for
debugging and analysis.
Defaults to ``False``.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
# get all segment ids, nodes per segment and edges
SegmentValues, AllNodesBySegment, SegmentContourEdges = zip(
*self.all_nodes_by_segment(data=True, edges=True))
# build a dictionary of the segments by their index
SegmentDict = dict(zip(SegmentValues,
zip(SegmentContourEdges, AllNodesBySegment)))
# build source and target chains
source_chains, target_chain_dict = self.mapping_network.build_chains(
False,
True)
# initialize container dict for connected chains
connected_chains = dict()
# initialize segment mapping dictionaries
source_to_target = OrderedDict()
target_to_source = OrderedDict()
source_to_key = dict()
target_to_key = dict()
# ITERATE OVER SOURCE SEGMENT CHAINS ----------------------------------
# loop through all source chains and find targets in target chains
# using an 'educated guess strategy'
for i, source_chain in enumerate(source_chains):
# get the first and last node ('end' nodes)
firstNode = (source_chain[0][0][0],
self.node[source_chain[0][0][0]])
lastNode = (source_chain[0][-1][1],
self.node[source_chain[0][-1][1]])
# get the chain value of the current chain
chain_value = source_chain[1]
# extract the ids of the current chain
current_ids = tuple(source_chain[0])
# extract the current chains geometry
current_chain_geo_list = [SegmentDict[id][0][2]["geo"]
for id in current_ids]
current_chain_geo = RhinoCurve.JoinCurves(
[ccg.ToPolylineCurve() for ccg in current_chain_geo_list])[0]
current_chain_spt = current_chain_geo.PointAtNormalizedLength(0.5)
# retrieve the current segments from the segment dictionary by id
current_segment_nodes = [SegmentDict[id][1] for id in current_ids]
# retrieve the current nodes from the list of current segments
current_nodes = []
for j, csn in enumerate(current_segment_nodes):
if include_end_nodes and j > 0:
current_nodes.append((current_ids[j][0],
self.node[current_ids[j][0]]))
[current_nodes.append(n) for n in csn]
# reset the target key
target_key = None
# print info on verbose setting
v_print("--------------------------------------------------------")
v_print("Processing segment chain {} ...".format(source_chain))
# CASE 1 - ENCLOSED SHORT ROW <====> ALL CASES --------------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]
and key[1] == chain_value[1]
and key not in connected_chains]
if len(possible_target_keys) > 0:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
filtered_target_keys = []
possible_target_chain_dists = []
for j, ptc in enumerate(possible_target_chains):
# retrieve possible target geometry and join into one crv
ptc_geo_list = [SegmentDict[id][0][2]["geo"] for id in ptc]
if ptc_geo_list == current_chain_geo_list:
continue
ptc_geo = RhinoCurve.JoinCurves(
[ptcg.ToPolylineCurve() for ptcg in ptc_geo_list])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the filtered key to the key list
filtered_target_keys.append(possible_target_keys[j])
# append the measured distance to the distance list
possible_target_chain_dists.append(ptc_dist)
if len(filtered_target_keys) > 0:
# sort filtered target keys using the distances
possible_target_chain_dists, filtered_target_keys = zip(
*sorted(zip(
possible_target_chain_dists,
filtered_target_keys),
key=itemgetter(0)))
# set target key
target_key = filtered_target_keys[0]
else:
target_key = None
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((
target_ids[j][0], self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
# print info on verbose setting
v_print("<=====> detected. Connecting to " +
"segment chain {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
# create initial warp connections between the chains
connected_chains[target_key] = True
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
# CASE 2 - SHORT ROW TO THE RIGHT <=====/ ALL CASES ---------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]
and key[1] == chain_value[1]+1
and key not in connected_chains]
if len(possible_target_keys) == 1:
target_key = possible_target_keys[0]
elif len(possible_target_keys) > 1:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
possible_target_chain_dists = []
for ptc in possible_target_chains:
# retrieve possible target geometry and join into one crv
ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc]
ptc_geo = RhinoCurve.JoinCurves([pg.ToPolylineCurve()
for pg in ptc_geo])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the measured distance to the list
possible_target_chain_dists.append(ptc_dist)
# sort possible target keys using the distances
possible_target_chain_dists, possible_target_keys = zip(
*sorted(zip(possible_target_chain_dists,
possible_target_keys),
key=itemgetter(0)))
target_key = possible_target_keys[0]
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((target_ids[j][0],
self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
targetFirstNode = target_ids[0][0]
targetLastNode = target_ids[-1][1]
# check if firstNode and targetFirstNode are connected via a
# 'warp' edge to verify
if (targetFirstNode == firstNode[0]
and targetLastNode in self[lastNode[0]]):
# print info on verbose setting
v_print("<=====/ detected. Connecting " +
"to segment {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
connected_chains[target_key] = True
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
# create initial 'warp' connections between the chains
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
else:
v_print("No real connection for <=====/. Next case...")
# CASE 3 - SHORT ROW TO THE LEFT /====> ALL CASES -----------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]+1
and key[1] == chain_value[1]
and key not in connected_chains]
if len(possible_target_keys) == 1:
target_key = possible_target_keys[0]
elif len(possible_target_keys) > 1:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
possible_target_chain_dists = []
for ptc in possible_target_chains:
# retrieve possible target geometry and join into one crv
ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc]
ptc_geo = RhinoCurve.JoinCurves(
[pg.ToPolylineCurve() for pg in ptc_geo])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the measured distance to the list
possible_target_chain_dists.append(ptc_dist)
# sort possible target keys using the distances
possible_target_chain_dists, possible_target_keys = zip(
*sorted(zip(possible_target_chain_dists,
possible_target_keys),
key=itemgetter(0)))
target_key = possible_target_keys[0]
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((target_ids[j][0],
self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
targetFirstNode = target_ids[0][0]
targetLastNode = target_ids[-1][1]
# check if firstNode and targetFirstNode are connected via a
# 'warp' edge to verify
if (targetFirstNode in self[firstNode[0]]
and targetLastNode == lastNode[0]):
# print info on verbose setting
v_print("/=====> detected. Connecting " +
"to segment {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
connected_chains[target_key] = True
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
else:
v_print("No real connection for /=====>. Next case...")
# CASE 4 - REGULAR ROW /=====/ ALL CASES --------------------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]+1
and key[1] == chain_value[1]+1
and key not in connected_chains]
if len(possible_target_keys) == 1:
target_key = possible_target_keys[0]
elif len(possible_target_keys) > 1:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
possible_target_chain_dists = []
for ptc in possible_target_chains:
# retrieve possible target geometry and join into one crv
ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc]
ptc_geo = RhinoCurve.JoinCurves([pg.ToPolylineCurve()
for pg in ptc_geo])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the measured distance to the list
possible_target_chain_dists.append(ptc_dist)
# sort possible target keys using the distances
possible_target_chain_dists, possible_target_keys = zip(
*sorted(zip(possible_target_chain_dists,
possible_target_keys),
key=itemgetter(0)))
target_key = possible_target_keys[0]
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((target_ids[j][0],
self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
# set target first and last node ('end' nodes)
targetFirstNode = target_ids[0][0]
targetLastNode = target_ids[-1][1]
# check if firstNode and targetFirstNode are connected via a
# 'warp' edge to verify
if (targetFirstNode in self[firstNode[0]]
and targetLastNode in self[lastNode[0]]):
# print info on verbose setting
v_print("/=====/ detected. Connecting " +
"to segment {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
connected_chains[target_key] = True
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
else:
v_print("No real connection for /=====/. No cases match.")
# INVOKE SECOND PASS FOR SOURCE ---> TARGET ---------------------------
for i, current_chain in enumerate(source_to_target):
v_print("--------------------------------------------------------")
v_print("S>T Current Chain: {}".format(current_chain))
# build a list of nodes containing all nodes in the current chain
# including all 'end' nodes
current_chain_nodes = []
for j, ccid in enumerate(current_chain):
current_chain_nodes.append((ccid[0], self.node[ccid[0]]))
[current_chain_nodes.append(n) for n in SegmentDict[ccid][1]]
current_chain_nodes.append((current_chain[-1][1],
self.node[current_chain[-1][1]]))
# retrieve target chain from the source to target mapping
target_chain = source_to_target[current_chain]
cckey = source_to_key[current_chain]
tckey = target_to_key[target_chain]
# build a list of nodes containing all nodes in the target chain
# including all 'end' nodes
target_chain_nodes = []
for j, tcid in enumerate(target_chain):
target_chain_nodes.append((tcid[0], self.node[tcid[0]]))
[target_chain_nodes.append(n) for n in SegmentDict[tcid][1]]
target_chain_nodes.append((target_chain[-1][1],
self.node[target_chain[-1][1]]))
# initialize start of window marker
start_of_window = -1
# loop through all nodes on the current chain
for k, node in enumerate(current_chain_nodes):
# find out if the current node is already principally connected
node_connected = False
# if the node is the first or the last node, it is defined as
# connected per-se
if k == 0 or k == len(current_chain_nodes)-1:
node_connected = True
# find out if the current node is already connected to the
# target chain, get node warp edges and their target nodes
node_warp_edges = self.node_warp_edges(node[0], data=False)
warp_edge_targets = [we[1] for we in node_warp_edges]
# loop over warp edge targets to get the start of the window
for wet in warp_edge_targets:
# loop over target chain nodes
for n, tcn in enumerate(target_chain_nodes):
# if a warp edge target is in the target chain,
# the node is connected and star of window for next
# node is defined
if wet == tcn[0]:
if n > start_of_window or start_of_window == -1:
start_of_window = n
node_connected = True
# if the node is not connected to the target chain, we
# need to find the end of the window
if not node_connected:
v_print("Node: {}".format(node[0]))
v_print("Start of window: {}".format(start_of_window))
# re-check start of window for <.====/ case
if len(target_chain_nodes) >= 2 and start_of_window == -1:
if target_chain_nodes[0] == current_chain_nodes[0]:
start_of_window = 1
else:
start_of_window = 0
end_of_window = None
# loop over target chain nodes
for n, tcn in enumerate(target_chain_nodes):
if n >= start_of_window:
if tcn[0] == current_chain_nodes[-1][0]:
end_of_window = n
# get all warp edges of the current target node
# and their targets
tcn_warp_edges = self.node_warp_edges(tcn[0],
data=False)
tcn_warp_edge_targets = [we[1] for we
in tcn_warp_edges]
# loop over warp edge targets
for twet in tcn_warp_edge_targets:
if (twet in [cn[0] for cn
in current_chain_nodes]):
end_of_window = n
break
if end_of_window and end_of_window > start_of_window:
break
# re-check end of window for /====.> case
if end_of_window:
tcn_we = target_chain_nodes[end_of_window]
ccn_end = current_chain_nodes[-1]
ccn_len = len(current_chain_nodes)
if tcn_we == ccn_end and k == ccn_len-2:
end_of_window -= 1
if end_of_window < start_of_window:
start_of_window = -1
end_of_window = None
# if we have a valid window, set the target nodes
if start_of_window != -1 and end_of_window != None:
if end_of_window == len(target_chain_nodes)-1:
window = target_chain_nodes[start_of_window:]
else:
window = target_chain_nodes[start_of_window:
end_of_window+1]
v_print("End of window: {}".format(end_of_window))
# execute connection to target
if cckey <= tckey:
rev = False
else:
rev = True
v_print("Connecting chain {} to chain {}".format(
cckey,
tckey))
self._create_second_pass_warp_connection(
current_chain_nodes,
k,
window,
precise=precise,
verbose=verbose,
reverse=rev)
else:
# print info on verbose setting
v_print("No valid window for current chain!")
# INVOKE SECOND PASS FOR TARGET ---> SOURCE ---------------------------
for i, current_chain in enumerate(target_to_source):
v_print("--------------------------------------------------------")
v_print("T>S Current Chain: {}".format(current_chain))
# build a list of nodes containing all nodes in the current chain
# including all 'end' nodes
current_chain_nodes = []
for j, ccid in enumerate(current_chain):
current_chain_nodes.append((ccid[0], self.node[ccid[0]]))
[current_chain_nodes.append(n) for n in SegmentDict[ccid][1]]
current_chain_nodes.append((current_chain[-1][1],
self.node[current_chain[-1][1]]))
# retrieve target chain from the source to target mapping
target_chain = target_to_source[current_chain]
cckey = target_to_key[current_chain]
tckey = source_to_key[target_chain]
# build a list of nodes containing all nodes in the target chain
# including all 'end' nodes
target_chain_nodes = []
for j, tcid in enumerate(target_chain):
target_chain_nodes.append((tcid[0], self.node[tcid[0]]))
[target_chain_nodes.append(n) for n in SegmentDict[tcid][1]]
target_chain_nodes.append((target_chain[-1][1],
self.node[target_chain[-1][1]]))
# initialize start of window marker
start_of_window = -1
# loop through all nodes on the current chain
for k, node in enumerate(current_chain_nodes):
# find out if the current node is already principally connected
node_connected = False
if k == 0 or k == len(current_chain_nodes)-1:
node_connected = True
# find out if the current node is already connected to the
# target chain
node_warp_edges = self.node_warp_edges(node[0], data=False)
warp_edge_targets = [we[1] for we in node_warp_edges]
# loop over weft edge targets
for wet in warp_edge_targets:
# if warp edge target is in target chain nodes, node
# is connected and the start of our window for the next
# node
for n, tcn in enumerate(target_chain_nodes):
if wet == tcn[0]:
if n > start_of_window or start_of_window == -1:
start_of_window = n
node_connected = True
# if the node is not connected to the target chain, we
# need to find the end of the window
if not node_connected:
# print info on verbose output
v_print("Node: {}".format(node[0]))
v_print("Start of window: {}".format(start_of_window))
# re-check start of window for <.====/ case
if len(target_chain_nodes) >= 2 and start_of_window == -1:
if target_chain_nodes[0] == current_chain_nodes[0]:
start_of_window = 1
else:
start_of_window = 0
end_of_window = None
# loop over target chain nodes
for n, tcn in enumerate(target_chain_nodes):
if n >= start_of_window:
if tcn[0] == current_chain_nodes[-1][0]:
end_of_window = n
# get all warp edges of the current target node and
# their targets
tcn_warp_edges = self.node_warp_edges(tcn[0],
data=False)
tcn_warp_edge_targets = [we[1] for we
in tcn_warp_edges]
# loop over warp edge targets of current target
# node
for twet in tcn_warp_edge_targets:
# if warp edge target is in current chain,
# it is the end of the window
if (twet in [cn[0] for cn
in current_chain_nodes]):
end_of_window = n
break
if end_of_window and end_of_window > start_of_window:
break
# re-check end of window for /====.> case
if end_of_window:
tcn_we = target_chain_nodes[end_of_window]
ccn_end = current_chain_nodes[-1]
ccn_len = len(current_chain_nodes)
if tcn_we == ccn_end and k == ccn_len-2:
end_of_window -= 1
if end_of_window < start_of_window:
start_of_window = -1
end_of_window = None
# if there is a valid window, set the target chain nodes
if start_of_window != -1 and end_of_window != None:
if end_of_window == len(target_chain_nodes)-1:
window = target_chain_nodes[start_of_window:]
else:
window = target_chain_nodes[start_of_window:
end_of_window+1]
# print info on verbose output
v_print("End of window: {}".format(end_of_window))
# execute connection
if cckey < tckey:
rev = False
else:
rev = True
v_print("Connecting chain {} to chain {}.".format(
cckey,
tckey))
self._create_second_pass_warp_connection(
current_chain_nodes,
k,
window,
precise=precise,
verbose=verbose,
reverse=rev)
else:
v_print("No valid window for current chain!")
# FIND FACES OF NETWORK ---------------------------------------------------
def to_KnitDiNetwork(self):
"""
Constructs and returns a directed KnitDiNetwork based on this network
by duplicating all edges so that [u -> v] and [v -> u] for every
edge [u - v] in this undirected network.
Returns
-------
directed_network : :class:`KnitDiNetwork`
The directed representation of this network.
"""
# create a directed network with duplicate edges in opposing directions
dirnet = KnitDiNetwork()
dirnet.name = self.name
dirnet.add_nodes_from(self)
dirnet.add_edges_from((u, v, data)
for u, nbrs in self.adjacency_iter()
for v, data in nbrs.items())
dirnet.graph = self.graph
dirnet.node = self.node
dirnet.mapping_network = self.mapping_network
return dirnet
def find_cycles(self, mode=-1):
"""
Finds the cycles (faces) of this network by utilizing a wall-follower
mechanism.
Parameters
----------
mode : int, optional
Determines how the neighbors of each node are sorted when finding
cycles for the network.
``-1`` equals to using the world XY plane.
``0`` equals to using a plane normal to the origin nodes closest
point on the reference geometry.
``1`` equals to using a plane normal to the average of the origin
and neighbor nodes' closest points on the reference geometry.
``2`` equals to using an average plane between a plane fit to the
origin and its neighbor nodes and a plane normal to the origin
nodes closest point on the reference geometry.
Defaults to ``-1``.
Warning
-------
Modes other than ``-1`` are only possible if this network has an
underlying reference geometry in form of a Mesh or NurbsSurface. The
reference geometry should be assigned when initializing the network by
assigning the geometry to the "reference_geometry" attribute of the
network.
Notes
-----
Based on an implementation inside the COMPAS framework.
For more info see [16]_.
"""
return self.to_KnitDiNetwork().find_cycles(mode=mode)
def create_mesh(self, mode=-1, max_valence=4):
"""
Constructs a mesh from this network by finding cycles and using them as
mesh faces.
Parameters
----------
mode : int, optional
Determines how the neighbors of each node are sorted when finding
cycles for the network.
``-1`` equals to using the world XY plane.
``0`` equals to using a plane normal to the origin nodes closest
point on the reference geometry.
``1`` equals to using a plane normal to the average of the origin
and neighbor nodes' closest points on the reference geometry.
``2`` equals to using an average plane between a plane fit to the
origin and its neighbor nodes and a plane normal to the origin
nodes closest point on the reference geometry.
Defaults to ``-1``.
max_valence : int, optional
Sets the maximum edge valence of the faces. If this is set to > 4,
n-gon faces (more than 4 edges) are allowed. Otherwise, their
cycles are treated as invalid and will be ignored.
Defaults to ``4``.
Warning
-------
Modes other than ``-1`` are only possible if this network has an
underlying reference geometry in form of a Mesh or NurbsSurface. The
reference geometry should be assigned when initializing the network by
assigning the geometry to the "reference_geometry" attribute of the
network.
"""
return self.to_KnitDiNetwork().create_mesh(mode=mode,
max_valence=max_valence)
# DUALITY -----------------------------------------------------------------
def create_dual(self, mode=-1, merge_adj_creases=False,
mend_trailing_rows=False):
"""
Creates the dual of this KnitNetwork while translating current edge
attributes to the edges of the dual network.
Parameters
----------
mode : int, optional
Determines how the neighbors of each node are sorted when finding
cycles for the network.
``-1`` equals to using the world XY plane.
``0`` equals to using a plane normal to the origin nodes closest
point on the reference geometry.
``1`` equals to using a plane normal to the average of the origin
and neighbor nodes' closest points on the reference geometry.
``2`` equals to using an average plane between a plane fit to the
origin and its neighbor nodes and a plane normal to the origin
nodes closest point on the reference geometry.
Defaults to ``-1``.
merge_adj_creases : bool, optional
If ``True``, will merge adjacent 'increase' and 'decrease' nodes
connected by a 'weft' edge into a single node. This effectively
simplifies the pattern, as a decrease is unneccessary to perform
if an increase is right beside it - both nodes can be replaced by a
single regular node (stitch).
Defaults to ``False``.
mend_trailing_rows : bool, optional
If ``True``, will attempt to mend trailing rows by reconnecting
nodes.
Defaults to ``False``.
Returns
-------
dual_network : :class:`KnitDiNetwork`
The dual network of this KnitNetwork.
Warning
-------
Modes other than -1 (default) are only possible if this network has an
underlying reference geometry in form of a Mesh or NurbsSurface. The
reference geometry should be assigned when initializing the network by
assigning the geometry to the 'reference_geometry' attribute of the
network.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# first find the cycles of this network
cycles = self.find_cycles(mode=mode)
# get node data for all nodes once
node_data = {k: self.node[k] for k in self.nodes_iter()}
# create new directed KnitDiNetwork for dual network
DualNetwork = KnitDiNetwork(
reference_geometry=self.graph["reference_geometry"])
# create mapping dict for edges to adjacent cycles
edge_to_cycle = {(u, v): None for u, v in self.edges_iter()}
edge_to_cycle.update({(v, u): None for u, v in self.edges_iter()})
# CREATE NODES OF DUAL ------------------------------------------------
# for each cycle, find the centroid node
for ckey in sorted(cycles.keys()):
cycle = cycles[ckey]
clen = len(cycle)
# skip invalid cycles (ngons and self-loops)
if clen > 4 or clen < 3:
continue
# loop over cycle edges and fill mapping dicts
closed_cycle = cycle[:]
closed_cycle.append(cycle[0])
for u, v in pairwise(closed_cycle):
edge_to_cycle[(u, v)] = ckey
# get coords of cycle nodes
cycle_coords = [[node_data[k]["x"],
node_data[k]["y"],
node_data[k]["z"]] for k in cycle]
# compute centroid
cx, cy, cz = zip(*cycle_coords)
centroid = [sum(cx) / clen, sum(cy) / clen, sum(cz) / clen]
centroid_pt = RhinoPoint3d(*centroid)
# get node 'leaf' attributes
is_leaf = True in [node_data[k]["leaf"] for k in cycle]
# get node 'color' attributes. only if all colors of the cycle
# match, the color attribute will be set!
colors = [node_data[k]["color"] for k in cycle]
if all(x == colors[0] for x in colors):
cycle_color = colors[0]
else:
cycle_color = None
# add node to dual network
DualNetwork.node_from_point3d(ckey,
centroid_pt,
position=None,
num=None,
leaf=is_leaf,
start=False,
end=False,
segment=None,
increase=False,
decrease=False,
color=cycle_color)
# CREATE EDGES IN DUAL ------------------------------------------------
# loop over original edges and create corresponding edges in dual
for u, v, d in self.edges_iter(data=True):
u, v = self.edge_geometry_direction(u, v)
cycle_a = edge_to_cycle[(u, v)]
cycle_b = edge_to_cycle[(v, u)]
if cycle_a != None and cycle_b != None:
node_a = (cycle_a, DualNetwork.node[cycle_a])
node_b = (cycle_b, DualNetwork.node[cycle_b])
if d["warp"]:
DualNetwork.create_weft_edge(node_b, node_a)
elif d["weft"]:
DualNetwork.create_warp_edge(node_a, node_b)
# SET ATTRIBUTES OF DUAL NODES ----------------------------------------
# loop over all nodes of the network and set crease and end attributes
for node in DualNetwork.nodes_iter():
node_data = DualNetwork.node[node]
warp_in = DualNetwork.node_warp_edges_in(node)
warp_out = DualNetwork.node_warp_edges_out(node)
weft_in = DualNetwork.node_weft_edges_in(node)
weft_out = DualNetwork.node_weft_edges_out(node)
warplen = len(warp_in) + len(warp_out)
weftlen = len(weft_in) + len(weft_out)
# 2 warp edges and 1 weft edge >> end
if warplen == 2 and weftlen == 1:
node_data["end"] = True
if weft_out:
node_data["start"] = True
# 1 warp edge and 1 weft edge >> end and increase / decrease
elif warplen == 1 and weftlen == 1:
node_data["end"] = True
if weft_out:
node_data["start"] = True
if warp_out and not node_data["leaf"]:
node_data["increase"] = True
elif warp_in and not node_data["leaf"]:
node_data["decrease"] = True
# 2 warp edges and 0 weft edges >> end
elif warplen == 2 and weftlen == 0:
node_data["end"] = True
node_data["start"] = True
# 1 warp edge and 0 weft edges >> end
elif warplen == 1 and weftlen == 0:
node_data["end"] = True
node_data["start"] = True
# 0 warp edges and 1 weft edge >> end
elif warplen == 0 and weftlen == 1:
node_data["end"] = True
if weft_out:
node_data["start"] = True
# 1 warp edge and 2 weft edges >> increase or decrease
elif warplen == 1 and weftlen == 2:
if not node_data["leaf"]:
if warp_out:
node_data["increase"] = True
elif warp_in:
node_data["decrease"] = True
# MERGE ADJACENT INCREASES/DECREASES ----------------------------------
if merge_adj_creases:
increase_nodes = [inc for inc in DualNetwork.nodes_iter(data=True)
if inc[1]["increase"]]
for increase, data in increase_nodes:
pred = DualNetwork.predecessors(increase)
suc = DualNetwork.successors(increase)
pred = [p for p in pred if DualNetwork.node[p]["decrease"]]
suc = [s for s in suc if DualNetwork.node[s]["decrease"]]
# merge only with pred or with suc but not both
if (len(pred) == 1 and
DualNetwork.edge[pred[0]][increase]["weft"]):
# merge nodes, edge is pred, increase
pred = pred[0]
pd = DualNetwork.node[pred]
# remove the connecting edge
DualNetwork.remove_edge(pred, increase)
# get the points of the nodes
increase_pt = data["geo"]
pred_pt = pd["geo"]
# compute the new merged point
new_vec = RhinoVector3d(increase_pt - pred_pt)
new_pt = pred_pt + (new_vec * 0.5)
# replace the increase with the new pt and invert the
# increase attribute
data["geo"] = new_pt
data["x"] = new_pt.X
data["y"] = new_pt.Y
data["z"] = new_pt.Z
data["increase"] = False
# edit the edges of the increase
for edge in DualNetwork.edges_iter(increase, data=True):
edge[2]["geo"] = RhinoLine(
data["geo"],
DualNetwork.node[edge[1]]["geo"])
# edit edges of decrease
for edge in DualNetwork.in_edges_iter(pred, data=True):
if edge[2]["warp"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_warp_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
elif edge[2]["weft"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_weft_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
DualNetwork.remove_node(pred)
elif (not pred and len(suc) == 1 and
DualNetwork.edge[increase][suc[0]]["weft"]):
# merge nodes, edge is increase, suc
suc = suc[0]
sd = DualNetwork.node[suc]
# remove the connecting edge
DualNetwork.remove_edge(increase, suc)
# get the points of the nodes
increase_pt = data["geo"]
suc_pt = sd["geo"]
# compute the new merged point
new_vec = RhinoVector3d(suc_pt - increase_pt)
new_pt = increase_pt + (new_vec * 0.5)
# replace the increase with the new pt and invert the
# increase attribute
data["geo"] = new_pt
data["x"] = new_pt.X
data["y"] = new_pt.Y
data["z"] = new_pt.Z
data["increase"] = False
# edit the edges of the increase
for edge in DualNetwork.edges_iter(increase, data=True):
edge[2]["geo"] = RhinoLine(
data["geo"],
DualNetwork.node[edge[1]]["geo"])
for edge in DualNetwork.in_edges_iter(increase, data=True):
edge[2]["geo"] = RhinoLine(
DualNetwork.node[edge[0]]["geo"],
data["geo"])
# edit incoming edges of decrease
for edge in DualNetwork.in_edges_iter(suc, data=True):
if edge[2]["warp"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_warp_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
elif edge[2]["weft"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_weft_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
# edit outgoing edges of decrease
for edge in DualNetwork.edges_iter(suc, data=True):
if edge[2]["warp"]:
fromNode = (increase, data)
toNode = (edge[1], DualNetwork.node[edge[1]])
DualNetwork.create_warp_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
elif edge[2]["weft"]:
fromNode = (increase, data)
toNode = (edge[1], DualNetwork.node[edge[1]])
DualNetwork.create_weft_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
DualNetwork.remove_node(suc)
# ATTEMPT TO MEND TRAILING ROWS ---------------------------------------
if mend_trailing_rows:
# TODO: find a safer / more robust implementation attempt!
errMsg = ("This option is not satisfyingly implemented for this " +
"method, yet. Therefore, it is deactivated for now.")
raise NotImplementedError(errMsg)
# get all nodes which are 'leaf' and 'end' (right side)
# and all nodes which are 'leaf' and 'start' (left side)
trailing = sorted([(n, d) for n, d in
DualNetwork.nodes_iter(data=True)
if d["leaf"]
and d["end"]], key=lambda x: x[0])
trailing_left = deque([t for t in trailing if t[1]["start"]])
trailing_right = deque([t for t in trailing if not t[1]["start"]])
# from the trailing left nodes...
# travel one outgoing 'weft'
# from there travel one incoming 'warp'
# if the resulting node is 'start', 'end' and has 3 edges in total
# >> take its outgoing 'warp' edge (we already traveled that so
# we should already have it)
# >> connect it to the trailing left node
# >> remove the 'leaf' attribute from the trailing node as it is no
# longer trailing
# >> add the 'increase' attribute to the previous target of the
# 'warp' edge
while len(trailing_left) > 0:
# pop an item from the deque
trail = trailing_left.popleft()
# travel one outgoing 'weft' edge
weft_out = DualNetwork.node_weft_edges_out(trail[0], data=True)
if not weft_out:
continue
weft_out = weft_out[0]
# check the target of the 'weft' edge for incoming 'warp'
warp_in = DualNetwork.node_warp_edges_in(
weft_out[1],
data=True)
warp_out = DualNetwork.node_warp_edges_out(
weft_out[1],
data=True)
if not warp_in:
continue
warp_in = warp_in[0]
candidate = (warp_in[0], DualNetwork.node[warp_in[0]])
nce = len(DualNetwork.in_edges(warp_in[0]))
nce += len(DualNetwork.edges(warp_in[0]))
# if this condition holds, we have a trailing increase
if (candidate[1]["start"]
and candidate[1]["end"]
and nce == 3):
# remove found 'warp' edge
DualNetwork.remove_edge(warp_in[0], warp_in[1])
# assign 'increase' attribute to former 'warp' edge target
DualNetwork.node[warp_in[1]]["increase"] = True
# connect candidate to trail with new 'warp' edge
DualNetwork.create_warp_edge(candidate, trail)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
else:
if warp_out:
warp_out = warp_out[0]
candidate = (warp_out[1],
DualNetwork.node[warp_out[1]])
nce = len(DualNetwork.in_edges(warp_out[1]))
nce += len(DualNetwork.edges(warp_out[1]))
# if this condition holds, we have a trailing decrease
if (candidate[1]["start"]
and candidate[1]["end"]
and nce == 3):
# remove found 'warp' edge
DualNetwork.remove_edge(warp_out[0], warp_out[1])
# assign 'decrease' attribute to former 'warp'
# edge source
DualNetwork.node[warp_out[0]]["decrease"] = True
# connect former trail to candidate with new
# 'warp' edge
DualNetwork.create_warp_edge(trail, candidate)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
while len(trailing_right) > 0:
# pop an item from the deque
trail = trailing_right.popleft()
# travel one incoming 'weft' edge
weft_in = DualNetwork.node_weft_edges_in(trail[0], data=True)
if not weft_in:
continue
weft_in = weft_in[0]
# check the target of the 'weft' edge for incoming 'warp'
warp_in = DualNetwork.node_warp_edges_in(weft_in[0],
data=True)
warp_out = DualNetwork.node_warp_edges_out(weft_in[0],
data=True)
if not warp_in:
continue
warp_in = warp_in[0]
candidate = (warp_in[0], DualNetwork.node[warp_in[0]])
nce = len(DualNetwork.in_edges(warp_in[0]))
nce += len(DualNetwork.edges(warp_in[0]))
# if this condition holds, we have a trailing increase
if candidate[1]["end"] and nce == 3:
# remove found 'warp' edge
DualNetwork.remove_edge(warp_in[0], warp_in[1])
# assign 'increase' attribute to former 'warp' edge target
DualNetwork.node[warp_in[1]]["increase"] = True
# connect candidate to trail with new 'warp' edge
DualNetwork.create_warp_edge(candidate, trail)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
else:
if warp_out:
warp_out = warp_out[0]
candidate = (warp_out[1],
DualNetwork.node[warp_out[1]])
nce = len(DualNetwork.in_edges(warp_out[1]))
nce += len(DualNetwork.edges(warp_out[1]))
# if this condition holds, we have a trailing decrease
if (candidate[1]["start"]
and candidate[1]["end"]
and nce == 3):
# remove found 'warp' edge
DualNetwork.remove_edge(warp_out[0], warp_out[1])
# assign 'decrease' attribute to former 'warp'
# edge source
DualNetwork.node[warp_out[0]]["decrease"] = True
# connect former trail to candidate with new
# 'warp' edge
DualNetwork.create_warp_edge(trail, candidate)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
return DualNetwork
# MAIN ------------------------------------------------------------------------
if __name__ == '__main__':
pass
| 45.454681
| 79
| 0.495106
| 15,195
| 152,955
| 4.844817
| 0.063244
| 0.013448
| 0.00777
| 0.008802
| 0.641129
| 0.600418
| 0.571335
| 0.543883
| 0.523629
| 0.509611
| 0
| 0.009948
| 0.428904
| 152,955
| 3,364
| 80
| 45.468193
| 0.832814
| 0.279599
| 0
| 0.587319
| 0
| 0
| 0.040063
| 0.001593
| 0
| 0
| 0
| 0.000595
| 0
| 1
| 0.015573
| false
| 0.004449
| 0.022247
| 0
| 0.05228
| 0.040044
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27af82c734c9c172d86f1e925df82c41889d2af8
| 5,388
|
py
|
Python
|
main.py
|
GuruOfPython/Python-Tkinter-GUI
|
de17e819cc6008274077d8347d722e779cb9166b
|
[
"MIT"
] | null | null | null |
main.py
|
GuruOfPython/Python-Tkinter-GUI
|
de17e819cc6008274077d8347d722e779cb9166b
|
[
"MIT"
] | null | null | null |
main.py
|
GuruOfPython/Python-Tkinter-GUI
|
de17e819cc6008274077d8347d722e779cb9166b
|
[
"MIT"
] | null | null | null |
# from binary_tree import *
#
# root = Node(8)
#
# root.insert(3)
# root.insert(10)
# root.insert(1)
# root.insert(6)
# root.insert(4)
# root.insert(7)
# root.insert(14)
# root.insert(13)
# node, parent = root.lookup(6)
# print(node, parent)
# root.print_tree()
#
# root.delete(10)
#
# root.print_tree()
import tkinter as tk
from tkinter import *
# import tkMessageBox as messagesbox
import tkinter.messagebox as messagebox
import ttk
from tkinter import simpledialog
from treeview import TreeView
from random import shuffle
from naive import NaiveBST, perfect_inserter
from random import *
import random
class main_GUI(Tk):
def __init__(self, parent):
tk.Tk.__init__(self, parent)
self.parent = parent
self.resizable(0, 0)
self.geometry("1200x800")
self.setting_frame = LabelFrame(self, text="Setting")
create_btn = Button(self.setting_frame, text="Create", height=1, width=10, command=self.create)
create_btn.grid(row=0, padx=5, pady=5)
insert_btn = Button(self.setting_frame, text="Insert", height=1, width=10, command=self.insert)
insert_btn.grid(row=2, padx=5, pady=5)
# self.insert_e = Entry(self.setting_frame, height=1, width=10)
self.insert_e = Entry(self.setting_frame)
self.insert_e.grid(row=2, column=1, padx=5, pady=5)
delete_btn = Button(self.setting_frame, text="Delete", height=1, width=10, command=self.delete)
delete_btn.grid(row=4, padx=5, pady=5)
# self.delete_e = Entry(self.setting_frame, height=1, width=10)
self.delete_e = Entry(self.setting_frame)
self.delete_e.grid(row=4, column=1, padx=5, pady=5)
search_btn = Button(self.setting_frame, text="Search", height=1, width=10, command=self.search)
search_btn.grid(row=6, padx=5, pady=5)
# self.search_e = Entry(self.setting_frame, height=1, width=10)
self.search_e = Entry(self.setting_frame)
self.search_e.grid(row=6, column=1, padx=5, pady=5)
# self.setting_frame.grid(row=1, padx=5, pady=5, sticky=N+S)
self.setting_frame.pack(padx=5, pady=5, side=LEFT)
self.drawing_frame = tk.LabelFrame(self, text="Drawing")
# self.drawing_frame.grid(row=1, column=2, padx=5, pady=5, sticky=N+S)
self.drawing_frame.pack(padx=5, pady=5, fill=BOTH, expand=1)
self.tree = NaiveBST()
self.treeview = TreeView(self.drawing_frame, tree=self.tree)
def callback():
if messagebox.askokcancel("Quit", "Do you really wish to quit?"):
self.destroy()
self.treeview.end_pause = True
self.protocol("WM_DELETE_WINDOW", callback)
def create(self):
# keys = list(range(20))
# shuffle(keys)
# print(keys)
# keys = [randint(1,30) for i in range(20)]
keys = random.sample(range(1, 30), 20)
self.tree.root = None
print(keys)
for i in keys:
self.tree.insert(i)
# perfect_inserter(self.tree, sorted(keys))
self.tree.view()
def insert(self):
if self.tree.root is None:
messagebox.showerror("No Tree", "There is no tree. Please create a tree")
return
if not self.insert_e.get():
messagebox.showerror("No Value", "Please enter a node key")
return
elif not self.insert_e.get().isdigit():
messagebox.showerror("Invalid Value", "Please enter an integer value")
return
node_key = int(self.insert_e.get())
[flag, p] = self.tree.search(node_key)
if not flag:
self.tree.insert(node_key)
self.tree.view()
else:
messagebox.showerror("Invalid Value", "The key already exists. Please enter another value")
return
def delete(self):
if self.tree.root is None:
messagebox.showerror("No Tree", "There is no tree. Please create a tree")
return
if not self.delete_e.get():
messagebox.showerror("No Value", "Please enter a node key")
return
elif not self.delete_e.get().isdigit():
messagebox.showerror("Invalid Value", "Please enter an integer value")
return
node_key = int(self.delete_e.get())
[flag, p] = self.tree.search(node_key)
if flag:
self.tree.delete(node_key)
self.tree.view()
else:
messagebox.showerror("Invalid Value", "The key doesn't exists. Please enter another value")
return
def search(self):
if self.tree.root is None:
messagebox.showerror("No Tree", "There is no tree. Please create a tree")
return
if not self.search_e.get():
messagebox.showerror("No Value", "Please enter a node key")
return
elif not self.search_e.get().isdigit():
messagebox.showerror("Invalid Value", "Please enter an integer value")
return
node_key = int(self.search_e.get())
[flag, p] = self.tree.search(node_key)
if flag and p:
self.tree.view(highlight_nodes=[p])
else:
messagebox.showerror("Invalid Value", "The key can't be found")
if __name__ == '__main__':
app = main_GUI(None)
app.title("Binary Search Tree")
app.mainloop()
| 32.853659
| 103
| 0.615256
| 742
| 5,388
| 4.362534
| 0.190027
| 0.042014
| 0.064257
| 0.033982
| 0.540624
| 0.518072
| 0.420142
| 0.34878
| 0.335187
| 0.335187
| 0
| 0.023355
| 0.26095
| 5,388
| 163
| 104
| 33.055215
| 0.789553
| 0.138641
| 0
| 0.307692
| 0
| 0
| 0.137527
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.096154
| 0
| 0.269231
| 0.009615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27b12ffdc16386ed1ffaa3ad7820397e93894fcc
| 4,634
|
py
|
Python
|
cbagent/collectors/sgimport_latency.py
|
sharujayaram/perfrunner
|
8fe8ff42a5c74c274b569ba2c45cd43b320f48eb
|
[
"Apache-2.0"
] | null | null | null |
cbagent/collectors/sgimport_latency.py
|
sharujayaram/perfrunner
|
8fe8ff42a5c74c274b569ba2c45cd43b320f48eb
|
[
"Apache-2.0"
] | null | null | null |
cbagent/collectors/sgimport_latency.py
|
sharujayaram/perfrunner
|
8fe8ff42a5c74c274b569ba2c45cd43b320f48eb
|
[
"Apache-2.0"
] | 1
|
2019-05-20T13:44:29.000Z
|
2019-05-20T13:44:29.000Z
|
import requests
import json
from concurrent.futures import ProcessPoolExecutor as Executor
from concurrent.futures import ThreadPoolExecutor
from time import sleep, time
from couchbase.bucket import Bucket
from cbagent.collectors import Latency, Collector
from logger import logger
from perfrunner.helpers.misc import uhex
from spring.docgen import Document
from cbagent.metadata_client import MetadataClient
from cbagent.stores import PerfStore
from perfrunner.settings import (
ClusterSpec,
PhaseSettings,
TargetIterator,
TestConfig,
)
def new_client(host, bucket, password, timeout):
connection_string = 'couchbase://{}/{}?password={}'
connection_string = connection_string.format(host,
bucket,
password)
client = Bucket(connection_string=connection_string)
client.timeout = timeout
return client
class SGImport_latency(Collector):
COLLECTOR = "sgimport_latency"
METRICS = "sgimport_latency"
INITIAL_POLLING_INTERVAL = 0.001 # 1 ms
TIMEOUT = 3600 # 1hr minutes
MAX_SAMPLING_INTERVAL = 10 # 250 ms
def __init__(self, settings,
cluster_spec: ClusterSpec,
test_config: TestConfig
):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.mc = MetadataClient(settings)
self.store = PerfStore(settings.cbmonitor_host)
self.workload_setting = PhaseSettings
self.interval = self.MAX_SAMPLING_INTERVAL
self.cluster = settings.cluster
self.clients = []
self.cb_host = self.cluster_spec.servers[int(self.test_config.nodes)]
self.sg_host = next(self.cluster_spec.masters)
src_client = new_client(host=self.cb_host,
bucket='bucket-1',
password='password',
timeout=self.TIMEOUT)
self.clients.append(('bucket-1', src_client))
self.new_docs = Document(1024)
def check_longpoll_changefeed(self, host: str, key: str, last_sequence: str):
sg_db = 'db'
api = 'http://{}:4985/{}/_changes'.format(host, sg_db)
last_sequence_str = "{}".format(last_sequence)
data = {'filter': 'sync_gateway/bychannel',
'feed': 'longpoll',
"channels": "123",
"since": last_sequence_str,
"heartbeat": 3600000}
response = requests.post(url=api, data=json.dumps(data))
t1 = time()
record_found = 0
if response.status_code == 200:
for record in response.json()['results']:
if record['id'] == key:
record_found = 1
break
if record_found != 1:
self.check_longpoll_changefeed(host=host, key=key, last_sequence=last_sequence)
return t1
def insert_doc(self, src_client, key: str, doc):
src_client.upsert(key, doc)
return time()
def get_lastsequence(self, host: str):
sg_db = 'db'
api = 'http://{}:4985/{}/_changes'.format(host, sg_db)
data = {'filter': 'sync_gateway/bychannel',
'feed': 'normal',
"channels": "123",
"since": "0"
}
response = requests.post(url=api, data=json.dumps(data))
last_sequence = response.json()['last_seq']
return last_sequence
def measure(self, src_client):
key = "sgimport_{}".format(uhex())
doc = self.new_docs.next(key)
last_sequence = self.get_lastsequence(host=self.sg_host)
executor = ThreadPoolExecutor(max_workers=2)
future1 = executor.submit(self.check_longpoll_changefeed, host=self.sg_host,
key=key,
last_sequence=last_sequence)
future2 = executor.submit(self.insert_doc, src_client=src_client, key=key, doc=doc)
t1, t0 = future1.result(), future2.result()
print('import latency t1, t0', t1, t0, (t1 - t0) * 1000)
return {'sgimport_latency': (t1 - t0) * 1000} # s -> ms
def sample(self):
for bucket, src_client in self.clients:
lags = self.measure(src_client)
self.store.append(lags,
cluster=self.cluster,
collector=self.COLLECTOR)
def update_metadata(self):
self.mc.add_cluster()
self.mc.add_metric(self.METRICS, collector=self.COLLECTOR)
| 30.486842
| 95
| 0.594087
| 500
| 4,634
| 5.326
| 0.294
| 0.049568
| 0.016898
| 0.020278
| 0.137439
| 0.114157
| 0.088622
| 0.063087
| 0.063087
| 0.030792
| 0
| 0.023024
| 0.306431
| 4,634
| 151
| 96
| 30.688742
| 0.805538
| 0.00669
| 0
| 0.093458
| 0
| 0
| 0.071335
| 0.015876
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074766
| false
| 0.037383
| 0.17757
| 0
| 0.35514
| 0.009346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27b4b4442e8234ce781c98d6ea27cb6fba57c3a9
| 5,000
|
py
|
Python
|
Tools/renew-navi-npc.py
|
vakhet/ragnarok-navigation
|
df7d3ff95a9bd1c0497744113ad664a31d248de6
|
[
"MIT"
] | 3
|
2017-12-02T16:40:32.000Z
|
2020-02-11T17:44:02.000Z
|
Tools/renew-navi-npc.py
|
vakhet/ragnarok-navigation
|
df7d3ff95a9bd1c0497744113ad664a31d248de6
|
[
"MIT"
] | null | null | null |
Tools/renew-navi-npc.py
|
vakhet/ragnarok-navigation
|
df7d3ff95a9bd1c0497744113ad664a31d248de6
|
[
"MIT"
] | null | null | null |
"""
Author : vakhet at gmail.com
This script gets all your NPC names from the original rAthena folder
and updates their lines in navi_npc_krpri.lub
wherever matches the map_name and coords
"""
import re
import os
import random
import sqlite3
NPC_match = r'^[\w\d_]+,\d+,\d+,\d+\tscript\t[\w\d_ -]+#*[\w\d_ -]*\t[\d,{]+$'
allfiles = []
log = open('result.log', 'w', errors='ignore')
conn = sqlite3.connect('db.sqlite')
db = conn.cursor()
intro = '''
Renew navi_npc_krpri.lub | Version 0.2 | (C) 2017 vakhet @ gmail.com
Changes:
v0.2 - *.new file now creates in same folder with original *.lub
'''
outro = '''
Check results in result.log
NEW file generated: navi_npc_krpri.new
'''
db.executescript('''
DROP TABLE IF EXISTS npc;
CREATE TABLE npc (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
map TEXT,
thing1 INTEGER,
thing2 INTEGER,
thing3 INTEGER,
name TEXT,
shadow TEXT,
x INTEGER,
y INTEGER
)
''')
def parse_npc(line):
ln = line.split(',')
map_name, x, y = ln[0], int(ln[1]), int(ln[2])
fullname = ln[3].split('\t')
fullname = fullname[2]
if re.search('#', fullname):
ln = fullname.split('#')
name = ln[0]
shadow = ln[1]
# print(line,'\n',shadow,'<\n=====')
else:
name = fullname
shadow = ''
return name, map_name, x, y, shadow
def parse_navi(line):
line = re.sub('^.*{\s*', '', line)
line = re.sub('\s*}.*$', '', line)
line = line.split(', ')
for i in range(len(line)):
line[i] = re.sub('"', '', line[i], count=2)
try:
line[i] = int(line[i])
except ValueError:
pass
return tuple(line)
def stage_1():
for root, dirs, files in os.walk(path_rathena):
for file in files:
if file.endswith('.txt'):
line = os.path.join(root, file)
allfiles.append(line)
def stage_2():
fh = open(path_navi+'\\navi_npc_krpri.lub', 'r', errors='ignore')
for line in fh.readlines():
navi = parse_navi(line)
if len(navi) != 8:
continue
db.execute('''INSERT INTO npc
(map, thing1, thing2, thing3, name, shadow, x, y)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)''', navi)
conn.commit()
fh.close()
def stage_3():
total, updated = 0, 0
print('Working... ', end='')
for file in allfiles:
fh = open(file, 'r', errors='ignore')
for line in fh.readlines():
print('\b'+chr(random.randint(65, 122)), end='')
if re.match(NPC_match, line) is None:
continue
npc = parse_npc(line)
total = total + 1
db.execute('''SELECT COUNT(id), id, name, map, x, y, shadow FROM npc
WHERE map=? AND x=? AND y=?''', (npc[1], npc[2], npc[3]))
sql = db.fetchone()
if sql[0] == 0 or (sql[2] == npc[0] and sql[6] == npc[4]):
continue
log.writelines('({},{},{}) {} -> {}#{}\n'.format(
sql[3], str(sql[4]), str(sql[5]), sql[2], npc[0], npc[4]))
db.execute('UPDATE npc SET name=?, shadow=? WHERE id=?',
(npc[0], npc[4], sql[1]))
conn.commit()
updated += 1
fh.close()
log.close()
print('\bOK!')
print('Found {} NPC definitions (warps not included)'.format(total))
print('Updated {} NPC names'.format(updated))
def stage_4():
file = open(path_navi+'navi_npc_krpri.new', 'w', errors='ignore')
file.writelines('Navi_Npc = {\n')
sql = db.execute('SELECT * FROM npc WHERE thing1<>0 ORDER BY map, thing1')
for row in sql:
line = '\t{ '
for i in range(1, 9):
try:
item = str(row[i])
except (ValueError, TypeError):
pass
if i in (1, 5, 6):
item = '"{}"'.format(row[i])
line += item + ', '
line = line[:-2] + ' },\n'
file.writelines(line)
file.writelines('\t{ "NULL", 0, 0, 0, "", "", 0, 0 }\n}\n\n')
file.close()
# The Beginning
print(intro)
while True:
path_rathena = input('Enter path to NPC: ')
if not os.path.exists(path_rathena):
print('Wrong path!\n\n')
continue
else:
break
while True:
path_navi = input('Enter path to navi_npc_krpri.lub: ')
if not os.path.exists(path_navi+'\\navi_npc_krpri.lub'):
print('Wrong path!\n\n')
continue
else:
break
stage_1() # scan for *.txt in \npc directory
stage_2() # build DB from navi_npc_krpri.lub
stage_3() # update NPC names in DB from *.txt
stage_4() # building navi_npc_krpri.new
print('Complete list of changes see in log.txt')
print('NEW file generated: navi_npc_krpri.new')
input('\nPress any key')
| 28.571429
| 82
| 0.523
| 671
| 5,000
| 3.825633
| 0.28465
| 0.029996
| 0.046747
| 0.03506
| 0.133229
| 0.133229
| 0.089599
| 0.051422
| 0
| 0
| 0
| 0.021555
| 0.3134
| 5,000
| 174
| 83
| 28.735632
| 0.726187
| 0.0726
| 0
| 0.185714
| 0
| 0.014286
| 0.297346
| 0.008322
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042857
| false
| 0.014286
| 0.028571
| 0
| 0.085714
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27b801a71ed41ab9ae80dc219943a39cdead01b2
| 712
|
py
|
Python
|
tests/components/rtsp_to_webrtc/test_diagnostics.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/rtsp_to_webrtc/test_diagnostics.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/rtsp_to_webrtc/test_diagnostics.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Test nest diagnostics."""
from typing import Any
from .conftest import ComponentSetup
from tests.common import MockConfigEntry
from tests.components.diagnostics import get_diagnostics_for_config_entry
THERMOSTAT_TYPE = "sdm.devices.types.THERMOSTAT"
async def test_entry_diagnostics(
hass,
hass_client,
config_entry: MockConfigEntry,
rtsp_to_webrtc_client: Any,
setup_integration: ComponentSetup,
):
"""Test config entry diagnostics."""
await setup_integration()
assert await get_diagnostics_for_config_entry(hass, hass_client, config_entry) == {
"discovery": {"attempt": 1, "web.failure": 1, "webrtc.success": 1},
"web": {},
"webrtc": {},
}
| 25.428571
| 87
| 0.716292
| 81
| 712
| 6.049383
| 0.481481
| 0.112245
| 0.069388
| 0.093878
| 0.216327
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005119
| 0.176966
| 712
| 27
| 88
| 26.37037
| 0.831058
| 0.030899
| 0
| 0
| 0
| 0
| 0.12037
| 0.04321
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27b8f98dbc5944c52c7fdf99ecb0474a2db0ffed
| 3,477
|
py
|
Python
|
reachweb/models.py
|
kamauvick/ReachOutDash
|
ceb7da731982bc9d1b1bb4185f34822b4dcf6526
|
[
"MIT"
] | null | null | null |
reachweb/models.py
|
kamauvick/ReachOutDash
|
ceb7da731982bc9d1b1bb4185f34822b4dcf6526
|
[
"MIT"
] | 9
|
2020-02-12T02:44:31.000Z
|
2022-03-12T00:03:57.000Z
|
reachweb/models.py
|
kamauvick/ReachOutDash
|
ceb7da731982bc9d1b1bb4185f34822b4dcf6526
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.db import models
class Chv(models.Model):
name = models.OneToOneField(User, on_delete=models.PROTECT, related_name='profile')
age = models.IntegerField()
phonenumber = models.CharField(max_length=255)
profile_picture = models.ImageField(upload_to='chv_profiles/', blank=True, default='prof.jpg')
location = models.CharField(max_length=200)
class Meta:
db_table = 'chv'
ordering = ['-name']
def __str__(self):
return f'{self.name}'
@classmethod
def get_all_chvs(cls):
chvs = cls.objects.all()
return chvs
# @receiver(post_save, sender=User)
# def create_chv(sender, instance, created, **kwargs):
# if created:
# Chv.objects.create(name=instance)
#
# @receiver(post_save, sender=User)
# def save_chv(sender, instance, **kwargs):
# instance.profile.save()
class Patient(models.Model):
URGENCY_LEVELS = (
('red', 'High severity'),
('yellow', 'Moderate severity'),
('green', 'Low severity'),
('blue', 'Unknown severity'),
)
LOCATIONS = (
('Juja', 'Gachororo'),
('High Point', 'Sewage'),
('K-road', 'Stage'),
('Gwa-Kairu', 'Estate'),
('Ruiru', 'Kimbo'),
('Kasarani', 'Nairobi'),
)
name = models.CharField(max_length=255)
examiner = models.ForeignKey('Chv', on_delete=models.CASCADE, related_name='chv')
age = models.IntegerField()
gender = models.CharField(max_length=200)
location = models.CharField(choices=LOCATIONS, max_length=200, default='Ruiru')
time = models.DateTimeField()
symptoms = models.TextField()
urgency = models.CharField(max_length=200, choices=URGENCY_LEVELS, default='blue')
action_taken = models.TextField()
class Meta:
db_table = 'patient'
ordering = ['-name']
def __str__(self):
return f'{self.name},::: {self.location}'
@classmethod
def get_all_patients(cls):
patients = cls.objects.all()
return patients
class Emergencies(models.Model):
Emergency_TYPES = (
('Road', 'Road accidents'),
('Fire', 'Fire emergencies'),
('Water', 'Water related accidents'),
('Sickness', 'Sick people emergencies'),
)
type = models.CharField(max_length=200, choices=Emergency_TYPES, default='Sickness')
location = models.ForeignKey('Location', on_delete=models.CASCADE, related_name='locale')
reported_by = models.ForeignKey('Chv', on_delete=models.CASCADE, related_name='reporter')
class Meta:
db_table = 'emergencies'
ordering = ['type']
@classmethod
def get_all_emergencies(cls):
emergencies = cls.objects.all()
return emergencies
class Location(models.Model):
ROAD_ACCESS = (
('Great', 'The roads are well passable in all weather conditions'),
('Good', 'The roads are passable in favourable weather conditions'),
('Bad', 'The roads are not passable'),
)
name = models.CharField(max_length=200)
county = models.CharField(max_length=200)
accessibility = models.CharField(max_length=200, choices=ROAD_ACCESS)
class Meta:
db_table = 'location'
ordering = ['-name']
def __str__(self):
return f'{self.name}'
@classmethod
def get_all_locations(cls):
locations = cls.objects.all()
return locations
| 30.5
| 98
| 0.632442
| 384
| 3,477
| 5.578125
| 0.335938
| 0.070028
| 0.07563
| 0.10084
| 0.287115
| 0.20775
| 0.118114
| 0.118114
| 0.118114
| 0.053221
| 0
| 0.011223
| 0.231234
| 3,477
| 113
| 99
| 30.769912
| 0.790123
| 0.071326
| 0
| 0.211765
| 0
| 0
| 0.178882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082353
| false
| 0.035294
| 0.023529
| 0.035294
| 0.564706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27bb547681e27f63805f0e3f2bcfba62a6d181f3
| 4,876
|
py
|
Python
|
distances/symmetric_amd_distance.py
|
npielawski/py_alpha_amd_release
|
6fb5b3cdef65ba8902daea050785dd73970002c2
|
[
"MIT"
] | 14
|
2019-02-12T20:30:23.000Z
|
2021-11-04T01:10:34.000Z
|
distances/symmetric_amd_distance.py
|
npielawski/py_alpha_amd_release
|
6fb5b3cdef65ba8902daea050785dd73970002c2
|
[
"MIT"
] | 2
|
2021-05-12T05:02:59.000Z
|
2021-10-11T14:40:10.000Z
|
distances/symmetric_amd_distance.py
|
npielawski/py_alpha_amd_release
|
6fb5b3cdef65ba8902daea050785dd73970002c2
|
[
"MIT"
] | 7
|
2019-02-20T12:19:28.000Z
|
2021-02-09T10:12:06.000Z
|
#
# Py-Alpha-AMD Registration Framework
# Author: Johan Ofverstedt
# Reference: Fast and Robust Symmetric Image Registration Based on Distances Combining Intensity and Spatial Information
#
# Copyright 2019 Johan Ofverstedt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
#
# Symmetric Average Minimal Distances (AMD) Distance implemented as a class.
#
import numpy as np
class SymmetricAMDDistance:
def __init__(self, symmetric_measure = True, squared_measure = False):
self.ref_image_source = None
self.flo_image_source = None
self.ref_image_target = None
self.flo_image_target = None
self.sampling_fraction = 1.0
self.sampling_count = np.nan
self.symmetric_measure = symmetric_measure
self.squared_measure = squared_measure
def set_ref_image_source(self, image):
self.ref_image_source = image
def set_flo_image_source(self, image):
self.flo_image_source = image
def set_ref_image_target(self, image):
self.ref_image_target = image
def set_flo_image_target(self, image):
self.flo_image_target = image
def set_sampling_fraction(self, sampling_fraction):
self.sampling_fraction = sampling_fraction
def initialize(self):
self.sampling_count_forward = self.ref_image_source.get_sampling_fraction_count(self.sampling_fraction)
self.sampling_count_inverse = self.flo_image_source.get_sampling_fraction_count(self.sampling_fraction)
def asymmetric_value_and_derivatives(self, transform, source, target, target_cp, sampling_count):
w_acc = 0.0
value_acc = 0.0
grad_acc = np.zeros(transform.get_param_count())
sampled_points = source.random_sample(sampling_count)
for q in range(len(sampled_points)):
sampled_points_q = sampled_points[q]
if sampled_points_q.size == 0:
continue
w_q = sampled_points_q[:, -1:]
pnts_q = sampled_points_q[:, 0:-1]
tf_pnts = transform.transform(pnts_q) + target_cp
(eval_pnts, eval_w) = target.compute_spatial_grad_and_value(tf_pnts, w_q, q)
values_q = eval_pnts[:, -1:]
grads_q = eval_pnts[:, :-1]
if self.squared_measure:
grads_q = 2.0 * values_q * grads_q
values_q = np.square(values_q)
value_acc = value_acc + np.sum(values_q)
w_acc = w_acc + np.sum(eval_w)
grad_q_2 = transform.grad(pnts_q, grads_q, False)
grad_acc[:] = grad_acc[:] + grad_q_2
#print("grad_acc: " + str(grad_acc))
if w_acc < 0.000001:
w_acc = 1.0
#print("w_acc: " + str(w_acc))
#print("grad_acc: " + str(grad_acc))
w_rec = 1.0 / w_acc
value_acc = value_acc * w_rec
grad_acc[:] = grad_acc[:] * w_rec
#print("grad_acc: " + str(grad_acc))
return (value_acc, grad_acc)
def value_and_derivatives(self, transform):
ref_cp = self.ref_image_source.get_center_point()
flo_cp = self.flo_image_source.get_center_point()
(forward_value, forward_grad) = self.asymmetric_value_and_derivatives(transform, self.ref_image_source, self.flo_image_target, flo_cp, self.sampling_count_forward)
if self.symmetric_measure:
inv_transform = transform.invert()
(inverse_value, inverse_grad) = self.asymmetric_value_and_derivatives(inv_transform, self.flo_image_source, self.ref_image_target, ref_cp, self.sampling_count_inverse)
inverse_grad = transform.grad_inverse_to_forward(inverse_grad)
value = 0.5 * (forward_value + inverse_value)
grad = 0.5 * (forward_grad + inverse_grad)
else:
value = forward_value
grad = forward_grad
return (value, grad)
| 41.322034
| 179
| 0.689295
| 671
| 4,876
| 4.724292
| 0.274218
| 0.04164
| 0.030284
| 0.028391
| 0.208833
| 0.078864
| 0.0347
| 0.0347
| 0.0347
| 0
| 0
| 0.009376
| 0.234413
| 4,876
| 117
| 180
| 41.675214
| 0.839807
| 0.295529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.015152
| 0
| 0.19697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27be070f86ae724315deda03de85e57e9b0b008d
| 5,645
|
py
|
Python
|
misc/util.py
|
winder/indexer
|
18f48f026f022cdeef92dcac558d3900d6ea798d
|
[
"MIT"
] | 87
|
2020-08-20T19:14:02.000Z
|
2022-03-30T21:31:59.000Z
|
misc/util.py
|
hassoon1986/indexer
|
0a58e9a78ba7684c7f4cfb4fe7cb24b3d4622d9b
|
[
"MIT"
] | 615
|
2020-06-03T14:13:29.000Z
|
2022-03-31T12:08:38.000Z
|
misc/util.py
|
hassoon1986/indexer
|
0a58e9a78ba7684c7f4cfb4fe7cb24b3d4622d9b
|
[
"MIT"
] | 58
|
2020-06-03T21:33:48.000Z
|
2022-03-26T15:39:50.000Z
|
#!/usr/bin/env python3
import atexit
import logging
import os
import random
import subprocess
import sys
import time
import msgpack
logger = logging.getLogger(__name__)
def maybedecode(x):
if hasattr(x, 'decode'):
return x.decode()
return x
def mloads(x):
return msgpack.loads(x, strict_map_key=False, raw=True)
def unmsgpack(ob):
"convert dict from msgpack.loads() with byte string keys to text string keys"
if isinstance(ob, dict):
od = {}
for k,v in ob.items():
k = maybedecode(k)
okv = False
if (not okv) and (k == 'note'):
try:
v = unmsgpack(mloads(v))
okv = True
except:
pass
if (not okv) and k in ('type', 'note'):
try:
v = v.decode()
okv = True
except:
pass
if not okv:
v = unmsgpack(v)
od[k] = v
return od
if isinstance(ob, list):
return [unmsgpack(v) for v in ob]
#if isinstance(ob, bytes):
# return base64.b64encode(ob).decode()
return ob
def _getio(p, od, ed):
if od is not None:
od = maybedecode(od)
elif p.stdout:
try:
od = maybedecode(p.stdout.read())
except:
logger.error('subcomand out', exc_info=True)
if ed is not None:
ed = maybedecode(ed)
elif p.stderr:
try:
ed = maybedecode(p.stderr.read())
except:
logger.error('subcomand err', exc_info=True)
return od, ed
def xrun(cmd, *args, **kwargs):
timeout = kwargs.pop('timeout', None)
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
cmdr = ' '.join(map(repr,cmd))
try:
p = subprocess.Popen(cmd, *args, **kwargs)
except Exception as e:
logger.error('subprocess failed {}'.format(cmdr), exc_info=True)
raise
stdout_data, stderr_data = None, None
try:
if timeout:
stdout_data, stderr_data = p.communicate(timeout=timeout)
else:
stdout_data, stderr_data = p.communicate()
except subprocess.TimeoutExpired as te:
logger.error('subprocess timed out {}'.format(cmdr), exc_info=True)
stdout_data, stderr_data = _getio(p, stdout_data, stderr_data)
if stdout_data:
sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, stdout_data))
if stderr_data:
sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, stderr_data))
raise
except Exception as e:
logger.error('subprocess exception {}'.format(cmdr), exc_info=True)
stdout_data, stderr_data = _getio(p, stdout_data, stderr_data)
if stdout_data:
sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, stdout_data))
if stderr_data:
sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, stderr_data))
raise
if p.returncode != 0:
logger.error('cmd failed ({}) {}'.format(p.returncode, cmdr))
stdout_data, stderr_data = _getio(p, stdout_data, stderr_data)
if stdout_data:
sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, stdout_data))
if stderr_data:
sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, stderr_data))
raise Exception('error: cmd failed: {}'.format(cmdr))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('cmd success: %s\n%s\n%s\n', cmdr, maybedecode(stdout_data), maybedecode(stderr_data))
def atexitrun(cmd, *args, **kwargs):
cargs = [cmd]+list(args)
atexit.register(xrun, *cargs, **kwargs)
def find_indexer(indexer_bin, exc=True):
if indexer_bin:
return indexer_bin
# manually search local build and PATH for algorand-indexer
path = ['cmd/algorand-indexer'] + os.getenv('PATH').split(':')
for pd in path:
ib = os.path.join(pd, 'algorand-indexer')
if os.path.exists(ib):
return ib
msg = 'could not find algorand-indexer. use --indexer-bin or PATH environment variable.'
if exc:
raise Exception(msg)
logger.error(msg)
return None
def ensure_test_db(connection_string, keep_temps=False):
if connection_string:
# use the passed db
return connection_string
# create a temporary database
dbname = 'e2eindex_{}_{}'.format(int(time.time()), random.randrange(1000))
xrun(['dropdb', '--if-exists', dbname], timeout=5)
xrun(['createdb', dbname], timeout=5)
if not keep_temps:
atexitrun(['dropdb', '--if-exists', dbname], timeout=5)
else:
logger.info("leaving db %r", dbname)
return 'dbname={} sslmode=disable'.format(dbname)
# whoever calls this will need to import boto and get the s3 client
def firstFromS3Prefix(s3, bucket, prefix, desired_filename, outdir=None, outpath=None):
response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix, MaxKeys=10)
if (not response.get('KeyCount')) or ('Contents' not in response):
raise Exception('nothing found in s3://{}/{}'.format(bucket, prefix))
for x in response['Contents']:
path = x['Key']
_, fname = path.rsplit('/', 1)
if fname == desired_filename:
if outpath is None:
if outdir is None:
outdir = '.'
outpath = os.path.join(outdir, desired_filename)
logger.info('s3://%s/%s -> %s', bucket, x['Key'], outpath)
s3.download_file(bucket, x['Key'], outpath)
return
| 34.845679
| 107
| 0.589725
| 719
| 5,645
| 4.536857
| 0.255911
| 0.04905
| 0.044145
| 0.055181
| 0.274065
| 0.244329
| 0.207541
| 0.168302
| 0.168302
| 0.168302
| 0
| 0.006158
| 0.280779
| 5,645
| 161
| 108
| 35.062112
| 0.797291
| 0.058813
| 0
| 0.255319
| 0
| 0
| 0.130063
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0.014184
| 0.056738
| 0.007092
| 0.212766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27c0f66f70a59c9a16bcacfd772c973fa3bad2e9
| 11,093
|
py
|
Python
|
coconut/_pyparsing.py
|
evhub/coconut
|
27a4af9dc06667870f736f20c862930001b8cbb2
|
[
"Apache-2.0"
] | 3,624
|
2015-02-22T07:06:18.000Z
|
2022-03-31T03:38:00.000Z
|
coconut/_pyparsing.py
|
evhub/coconut
|
27a4af9dc06667870f736f20c862930001b8cbb2
|
[
"Apache-2.0"
] | 627
|
2015-03-31T01:18:53.000Z
|
2022-03-28T07:48:31.000Z
|
coconut/_pyparsing.py
|
evhub/coconut
|
27a4af9dc06667870f736f20c862930001b8cbb2
|
[
"Apache-2.0"
] | 162
|
2016-03-02T05:22:55.000Z
|
2022-03-31T23:42:55.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------------------------------------------------
# INFO:
# -----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Wrapper around PyParsing that selects the best available implementation.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
from coconut.root import * # NOQA
import os
import sys
import traceback
import functools
import inspect
from warnings import warn
from collections import defaultdict
from coconut.constants import (
PURE_PYTHON,
PYPY,
use_fast_pyparsing_reprs,
use_packrat_parser,
packrat_cache_size,
default_whitespace_chars,
varchars,
min_versions,
pure_python_env_var,
enable_pyparsing_warnings,
use_left_recursion_if_available,
)
from coconut.util import get_clock_time # NOQA
from coconut.util import (
ver_str_to_tuple,
ver_tuple_to_str,
get_next_version,
)
# warning: do not name this file cPyparsing or pyparsing or it might collide with the following imports
try:
if PURE_PYTHON:
raise ImportError("skipping cPyparsing check due to " + pure_python_env_var + " = " + os.environ.get(pure_python_env_var, ""))
import cPyparsing as _pyparsing
from cPyparsing import * # NOQA
from cPyparsing import __version__
PYPARSING_PACKAGE = "cPyparsing"
PYPARSING_INFO = "Cython cPyparsing v" + __version__
except ImportError:
try:
import pyparsing as _pyparsing
from pyparsing import * # NOQA
from pyparsing import __version__
PYPARSING_PACKAGE = "pyparsing"
PYPARSING_INFO = "Python pyparsing v" + __version__
except ImportError:
traceback.print_exc()
__version__ = None
PYPARSING_PACKAGE = "cPyparsing"
PYPARSING_INFO = None
# -----------------------------------------------------------------------------------------------------------------------
# VERSION CHECKING:
# -----------------------------------------------------------------------------------------------------------------------
min_ver = min(min_versions["pyparsing"], min_versions["cPyparsing"][:3]) # inclusive
max_ver = get_next_version(max(min_versions["pyparsing"], min_versions["cPyparsing"][:3])) # exclusive
cur_ver = None if __version__ is None else ver_str_to_tuple(__version__)
if cur_ver is None or cur_ver < min_ver:
min_ver_str = ver_tuple_to_str(min_ver)
raise ImportError(
"Coconut requires pyparsing/cPyparsing version >= " + min_ver_str
+ ("; got " + PYPARSING_INFO if PYPARSING_INFO is not None else "")
+ " (run '{python} -m pip install --upgrade {package}' to fix)".format(python=sys.executable, package=PYPARSING_PACKAGE),
)
elif cur_ver >= max_ver:
max_ver_str = ver_tuple_to_str(max_ver)
warn(
"This version of Coconut was built for pyparsing/cPyparsing versions < " + max_ver_str
+ ("; got " + PYPARSING_INFO if PYPARSING_INFO is not None else "")
+ " (run '{python} -m pip install {package}<{max_ver}' to fix)".format(python=sys.executable, package=PYPARSING_PACKAGE, max_ver=max_ver_str),
)
# -----------------------------------------------------------------------------------------------------------------------
# SETUP:
# -----------------------------------------------------------------------------------------------------------------------
if cur_ver >= (3,):
MODERN_PYPARSING = True
_trim_arity = _pyparsing.core._trim_arity
_ParseResultsWithOffset = _pyparsing.core._ParseResultsWithOffset
else:
MODERN_PYPARSING = False
_trim_arity = _pyparsing._trim_arity
_ParseResultsWithOffset = _pyparsing._ParseResultsWithOffset
USE_COMPUTATION_GRAPH = (
not MODERN_PYPARSING # not yet supported
and not PYPY # experimentally determined
)
if enable_pyparsing_warnings:
if MODERN_PYPARSING:
_pyparsing.enable_all_warnings()
else:
_pyparsing._enable_all_warnings()
_pyparsing.__diag__.warn_name_set_on_empty_Forward = False
if MODERN_PYPARSING and use_left_recursion_if_available:
ParserElement.enable_left_recursion()
elif use_packrat_parser:
ParserElement.enablePackrat(packrat_cache_size)
ParserElement.setDefaultWhitespaceChars(default_whitespace_chars)
Keyword.setDefaultKeywordChars(varchars)
# -----------------------------------------------------------------------------------------------------------------------
# FAST REPRS:
# -----------------------------------------------------------------------------------------------------------------------
if PY2:
def fast_repr(cls):
"""A very simple, fast __repr__/__str__ implementation."""
return "<" + cls.__name__ + ">"
else:
fast_repr = object.__repr__
_old_pyparsing_reprs = []
def set_fast_pyparsing_reprs():
"""Make pyparsing much faster by preventing it from computing expensive nested string representations."""
for obj in vars(_pyparsing).values():
try:
if issubclass(obj, ParserElement):
_old_pyparsing_reprs.append((obj, (obj.__repr__, obj.__str__)))
obj.__repr__ = functools.partial(fast_repr, obj)
obj.__str__ = functools.partial(fast_repr, obj)
except TypeError:
pass
def unset_fast_pyparsing_reprs():
"""Restore pyparsing's default string representations for ease of debugging."""
for obj, (repr_method, str_method) in _old_pyparsing_reprs:
obj.__repr__ = repr_method
obj.__str__ = str_method
if use_fast_pyparsing_reprs:
set_fast_pyparsing_reprs()
# -----------------------------------------------------------------------------------------------------------------------
# PROFILING:
# -----------------------------------------------------------------------------------------------------------------------
_timing_info = [None] # in list to allow reassignment
class _timing_sentinel(object):
pass
def add_timing_to_method(cls, method_name, method):
"""Add timing collection to the given method.
It's a monstrosity, but it's only used for profiling."""
from coconut.terminal import internal_assert # hide to avoid circular import
args, varargs, keywords, defaults = inspect.getargspec(method)
internal_assert(args[:1] == ["self"], "cannot add timing to method", method_name)
if not defaults:
defaults = []
num_undefaulted_args = len(args) - len(defaults)
def_args = []
call_args = []
fix_arg_defaults = []
defaults_dict = {}
for i, arg in enumerate(args):
if i >= num_undefaulted_args:
default = defaults[i - num_undefaulted_args]
def_args.append(arg + "=_timing_sentinel")
defaults_dict[arg] = default
fix_arg_defaults.append(
"""
if {arg} is _timing_sentinel:
{arg} = _exec_dict["defaults_dict"]["{arg}"]
""".strip("\n").format(
arg=arg,
),
)
else:
def_args.append(arg)
call_args.append(arg)
if varargs:
def_args.append("*" + varargs)
call_args.append("*" + varargs)
if keywords:
def_args.append("**" + keywords)
call_args.append("**" + keywords)
new_method_name = "new_" + method_name + "_func"
_exec_dict = globals().copy()
_exec_dict.update(locals())
new_method_code = """
def {new_method_name}({def_args}):
{fix_arg_defaults}
_all_args = (lambda *args, **kwargs: args + tuple(kwargs.values()))({call_args})
_exec_dict["internal_assert"](not any(_arg is _timing_sentinel for _arg in _all_args), "error handling arguments in timed method {new_method_name}({def_args}); got", _all_args)
_start_time = _exec_dict["get_clock_time"]()
try:
return _exec_dict["method"]({call_args})
finally:
_timing_info[0][str(self)] += _exec_dict["get_clock_time"]() - _start_time
{new_method_name}._timed = True
""".format(
fix_arg_defaults="\n".join(fix_arg_defaults),
new_method_name=new_method_name,
def_args=", ".join(def_args),
call_args=", ".join(call_args),
)
exec(new_method_code, _exec_dict)
setattr(cls, method_name, _exec_dict[new_method_name])
return True
def collect_timing_info():
"""Modifies pyparsing elements to time how long they're executed for.
It's a monstrosity, but it's only used for profiling."""
from coconut.terminal import logger # hide to avoid circular imports
logger.log("adding timing to pyparsing elements:")
_timing_info[0] = defaultdict(float)
for obj in vars(_pyparsing).values():
if isinstance(obj, type) and issubclass(obj, ParserElement):
added_timing = False
for attr_name in dir(obj):
attr = getattr(obj, attr_name)
if (
callable(attr)
and not isinstance(attr, ParserElement)
and not getattr(attr, "_timed", False)
and attr_name not in (
"__getattribute__",
"__setattribute__",
"__init_subclass__",
"__subclasshook__",
"__class__",
"__setattr__",
"__getattr__",
"__new__",
"__init__",
"__str__",
"__repr__",
"__hash__",
"__eq__",
"_trim_traceback",
"_ErrorStop",
"enablePackrat",
"inlineLiteralsUsing",
"setDefaultWhitespaceChars",
"setDefaultKeywordChars",
"resetCache",
)
):
added_timing |= add_timing_to_method(obj, attr_name, attr)
if added_timing:
logger.log("\tadded timing to", obj)
def print_timing_info():
"""Print timing_info collected by collect_timing_info()."""
print(
"""
=====================================
Timing info:
(timed {num} total pyparsing objects)
=====================================
""".rstrip().format(
num=len(_timing_info[0]),
),
)
sorted_timing_info = sorted(_timing_info[0].items(), key=lambda kv: kv[1])
for method_name, total_time in sorted_timing_info:
print("{method_name}:\t{total_time}".format(method_name=method_name, total_time=total_time))
| 35.554487
| 180
| 0.554945
| 1,099
| 11,093
| 5.187443
| 0.259327
| 0.026311
| 0.018242
| 0.00842
| 0.167164
| 0.106297
| 0.081038
| 0.066304
| 0.066304
| 0.047711
| 0
| 0.001516
| 0.22672
| 11,093
| 311
| 181
| 35.66881
| 0.663092
| 0.22077
| 0
| 0.093458
| 0
| 0.004673
| 0.163846
| 0.041652
| 0
| 0
| 0
| 0
| 0.014019
| 1
| 0.028037
| false
| 0.009346
| 0.11215
| 0
| 0.158879
| 0.023364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27c1c2dd0bdd326bf942be3440f758392e7db45f
| 4,948
|
py
|
Python
|
tests/test_explicit_hll.py
|
aholyoke/python-hll
|
30793aeb18103600fce0f3ad0b0c9e99e8b756fe
|
[
"MIT"
] | 13
|
2019-11-19T07:38:46.000Z
|
2022-02-11T13:23:25.000Z
|
tests/test_explicit_hll.py
|
aholyoke/python-hll
|
30793aeb18103600fce0f3ad0b0c9e99e8b756fe
|
[
"MIT"
] | 4
|
2019-12-12T04:19:34.000Z
|
2021-06-09T17:52:52.000Z
|
tests/test_explicit_hll.py
|
aholyoke/python-hll
|
30793aeb18103600fce0f3ad0b0c9e99e8b756fe
|
[
"MIT"
] | 6
|
2019-11-06T21:33:25.000Z
|
2022-02-21T14:43:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from python_hll.hlltype import HLLType
from python_hll.hll import HLL
from python_hll.serialization import SerializationUtil
"""Unit tests for BitVector."""
def test_add_basic():
"""
Tests basic set semantics of ``HLL.add_raw()``.
"""
# Adding a single positive value to an empty set should work.
hll = new_hll(128) # arbitrary
hll.add_raw(1) # positive
assert hll.cardinality() == 1
# Adding a single negative value to an empty set should work.
hll = new_hll(128) # arbitrary
hll.add_raw(-1) # negative
assert hll.cardinality() == 1
# Adding a duplicate value to a set should be a no-op.
hll = new_hll(128) # arbitrary
hll.add_raw(1) # positive
hll.add_raw(1) # dupe
assert hll.cardinality() == 1
def test_union():
"""
Tests ``HLL.union()``.
"""
# Unioning two distinct sets should work
hll_a = new_hll(128) # arbitrary
hll_b = new_hll(128) # arbitrary
hll_a.add_raw(1)
hll_a.add_raw(2)
hll_b.add_raw(3)
hll_a.union(hll_b)
assert hll_a.cardinality() == 3
# Unioning two sets whose union doesn't exceed the cardinality cap should not promote
hll_a = new_hll(128) # arbitrary
hll_b = new_hll(128) # arbitrary
hll_a.add_raw(1)
hll_a.add_raw(2)
hll_b.add_raw(1)
hll_a.union(hll_b)
assert hll_a.cardinality() == 2
assert hll_a.get_type() == HLLType.EXPLICIT
# Unioning two sets whose union exceeds the cardinality cap should promote
hll_a = new_hll(128) # arbitrary
hll_b = new_hll(128) # arbitrary
for i in range(0, 128):
hll_a.add_raw(i)
hll_b.add_raw(i+128)
hll_a.union(hll_b)
assert hll_a.get_type() == HLLType.SPARSE
def test_clear():
"""
Tests ``HLL.clear()``
"""
hll = new_hll(128) # arbitrary
hll.add_raw(1)
hll.clear()
assert hll.cardinality() == 0
def test_to_from_bytes():
"""
Tests ``HLL.to_bytes() and ``HLL.from_bytes().
"""
schema_version = SerializationUtil.DEFAULT_SCHEMA_VERSION
type = HLLType.EXPLICIT
padding = schema_version.padding_bytes(type)
bytes_per_word = 8
# Should work on an empty set
hll = new_hll(128)
bytes = hll.to_bytes(schema_version)
assert len(bytes) == padding # no elements, just padding
in_hll = HLL.from_bytes(bytes)
assert_elements_equal(hll, in_hll)
# Should work on a partially filled set
hll = new_hll(128)
for i in range(0, 3):
hll.add_raw(i)
bytes = hll.to_bytes(schema_version)
assert len(bytes) == padding + bytes_per_word * 3
in_hll = HLL.from_bytes(bytes)
assert_elements_equal(hll, in_hll)
# Should work on a full set
explicit_threshold = 128
hll = new_hll(explicit_threshold)
for i in range(0, explicit_threshold):
hll.add_raw(27 + i)
bytes = hll.to_bytes(schema_version)
assert len(bytes) == padding + bytes_per_word * explicit_threshold
in_hll = HLL.from_bytes(bytes)
assert_elements_equal(hll, in_hll)
def test_random_values():
"""
Tests correctness against `set()`.
"""
explicit_threshold = 4096
canonical = set()
hll = new_hll(explicit_threshold)
seed = 1 # constant so results are reproducible
random.seed(seed)
max_java_long = 9223372036854775807
for i in range(0, explicit_threshold):
random_long = random.randint(1, max_java_long)
canonical.add(random_long)
hll.add_raw(random_long)
canonical_cardinality = len(canonical)
assert hll.cardinality() == canonical_cardinality
def test_promotion():
"""
Tests promotion to ``HLLType.SPARSE`` and ``HLLType.FULL``.
"""
explicit_threshold = 128
hll = HLL.create_for_testing(11, 5, explicit_threshold, 256, HLLType.EXPLICIT)
for i in range(0, explicit_threshold + 1):
hll.add_raw(i)
assert hll.get_type() == HLLType.SPARSE
hll = HLL(11, 5, 4, False, HLLType.EXPLICIT) # expthresh=4 => explicit_threshold=8
for i in range(0, 9):
hll.add_raw(i)
assert hll.get_type() == HLLType.FULL
# ------------------------------------------------------------
# assertion helpers
def assert_elements_equal(hll_a, hll_b):
"""
Asserts that values in both sets are exactly equal.
"""
assert hll_a._explicit_storage == hll_b._explicit_storage
def new_hll(explicit_threshold):
"""
Builds a ``HLLType.EXPLICIT`` ``HLL`` instance with the specified
explicit threshold.
:param explicit_threshold: explicit threshold to use for the constructed
``HLL``. This must be greater than zero.
:type explicit_threshold: int
:returns: A default-sized ``HLLType.EXPLICIT`` empty ``HLL`` instance. This
will never be ``None``.
:rtype: HLL
"""
return HLL.create_for_testing(11, 5, explicit_threshold, 256, HLLType.EXPLICIT)
| 27.337017
| 89
| 0.653597
| 703
| 4,948
| 4.391181
| 0.206259
| 0.036929
| 0.034985
| 0.058309
| 0.444121
| 0.389375
| 0.358925
| 0.330742
| 0.323291
| 0.269841
| 0
| 0.031904
| 0.227162
| 4,948
| 180
| 90
| 27.488889
| 0.775366
| 0.295069
| 0
| 0.463158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.084211
| false
| 0
| 0.042105
| 0
| 0.136842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27c9faa515cbfcb516d2a78da11f8590793a0cac
| 6,912
|
py
|
Python
|
src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py
|
Yulv-git/Model_Inference_Deployment
|
623f9955dfb60fe7af9d17415bfec58fc4c86c1b
|
[
"MIT"
] | 4
|
2022-02-05T14:16:05.000Z
|
2022-03-27T13:35:06.000Z
|
src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py
|
Yulv-git/Model_Inference_Deployment
|
623f9955dfb60fe7af9d17415bfec58fc4c86c1b
|
[
"MIT"
] | null | null | null |
src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py
|
Yulv-git/Model_Inference_Deployment
|
623f9955dfb60fe7af9d17415bfec58fc4c86c1b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
'''
Author: Shuangchi He / Yulv
Email: yulvchi@qq.com
Date: 2022-01-28 14:21:09
Motto: Entities should not be multiplied unnecessarily.
LastEditors: Shuangchi He
LastEditTime: 2022-04-06 11:40:23
FilePath: /Model_Inference_Deployment/src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py
Description: Init from https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
Exporting a model from PyTorch to ONNX and running it using ONNX RUNTIME.
'''
import argparse
import os
import numpy as np
from PIL import Image
import torch.nn as nn
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
import torchvision.transforms as transforms
import onnx
import torch.onnx
import onnxruntime
from utils import check_dir, torchtensor2numpy
# Super Resolution model definition in PyTorch
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor, inplace=False):
super(SuperResolutionNet, self).__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
def PyTorch2ONNX(torch_model, dummy_input_to_model, onnx_save_dir, check_onnx_model=True):
''' Export the model. (PyTorch2ONNX) '''
torch.onnx.export(
torch_model, # model being run.
dummy_input_to_model, # model input (or a tuple for multiple inputs).
onnx_save_dir, # where to save the model (can be a file or file-like object).
export_params=True, # store the trained parameter weights inside the model file.
opset_version=10, # the ONNX version to export the model to.
do_constant_folding=True, # whether to execute constant folding for optimization.
input_names=['input'], # the model's input names.
output_names=['output'], # the model's output names.
dynamic_axes={ # variable length axes.
'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
if check_onnx_model: # Verify the model’s structure and confirm that the model has a valid schema.
onnx_model = onnx.load(onnx_save_dir)
onnx.checker.check_model(onnx_model)
def Verify_ONNX_in_ONNX_RUNTIME(onnx_dir, dummy_input_to_model, torch_out):
''' Verify ONNX Runtime and PyTorch are computing the same value for the model. '''
# Create an inference session.
ort_session = onnxruntime.InferenceSession(onnx_dir)
# Compute ONNX Runtime output prediction.
ort_inputs = {ort_session.get_inputs()[0].name: torchtensor2numpy(dummy_input_to_model)}
ort_outs = ort_session.run(None, ort_inputs)
# Compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(torchtensor2numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
def Run_ONNX_in_ONNX_RUNTIME(onnx_dir, img_path, img_save_path):
''' Running the model on an image using ONNX Runtime. '''
# Take the tensor representing the greyscale resized image.
img = Image.open(img_path)
resize = transforms.Resize([224, 224])
img = resize(img)
img_ycbcr = img.convert('YCbCr')
img_y, img_cb, img_cr = img_ycbcr.split()
to_tensor = transforms.ToTensor()
img_y = to_tensor(img_y)
img_y.unsqueeze_(0)
# Create an inference session.
ort_session = onnxruntime.InferenceSession(onnx_dir)
# Run the ONNX model in ONNX Runtime.
ort_inputs = {ort_session.get_inputs()[0].name: torchtensor2numpy(img_y)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
# Get the output image.
img_out_y = Image.fromarray(np.uint8((img_out_y[0] * 255.0).clip(0, 255)[0]), mode='L')
final_img = Image.merge(
"YCbCr", [
img_out_y,
img_cb.resize(img_out_y.size, Image.BICUBIC),
img_cr.resize(img_out_y.size, Image.BICUBIC),
]).convert("RGB")
# Save the image, compare this with the output image from mobile device.
final_img.save(img_save_path)
def main(args):
# Create the super-resolution model.
torch_model = SuperResolutionNet(upscale_factor=3)
# Initialize model with the pretrained weights.
def map_location(storage, loc): return storage
if torch.cuda.is_available():
map_location = None
torch_model.load_state_dict(model_zoo.load_url(
url='https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth', map_location=map_location))
# Set the model to inference mode.
torch_model.eval()
# Input to the model.
batch_size = 1
dummy_input_to_model = torch.randn(batch_size, 1, 224, 224, requires_grad=True)
torch_out = torch_model(dummy_input_to_model)
# Export the model. (PyTorch2ONNX)
PyTorch2ONNX(torch_model, dummy_input_to_model, args.onnx_save_dir, args.check_onnx_model)
# Verify ONNX Runtime and PyTorch are computing the same value for the model.
Verify_ONNX_in_ONNX_RUNTIME(args.onnx_save_dir, dummy_input_to_model, torch_out)
# Running the model on an image using ONNX Runtime.
Run_ONNX_in_ONNX_RUNTIME(args.onnx_save_dir, args.img_path, args.img_save_path)
if __name__ == "__main__":
parse = argparse.ArgumentParser(description='PyTorch2ONNX_Run_in_ONNX_RUNTIME')
parse.add_argument('--img_path', type=str, default='{}/data/cat.jpg'.format(os.path.dirname(os.path.abspath(__file__))))
parse.add_argument('--check_onnx_model', type=bool, default=True)
parse.add_argument('--output_dir', type=str, default='{}/output'.format(os.path.dirname(os.path.abspath(__file__))))
args = parse.parse_args()
check_dir(args.output_dir)
args.onnx_save_dir = '{}/super_resolution.onnx'.format(args.output_dir)
args.img_save_path = '{}/cat_superres_with_ort.jpg'.format(args.output_dir)
main(args)
| 41.638554
| 124
| 0.684172
| 973
| 6,912
| 4.623844
| 0.280576
| 0.026673
| 0.021338
| 0.030229
| 0.273172
| 0.237386
| 0.221383
| 0.172705
| 0.0978
| 0.057791
| 0
| 0.026552
| 0.209925
| 6,912
| 165
| 125
| 41.890909
| 0.79729
| 0.252894
| 0
| 0.04
| 0
| 0
| 0.074154
| 0.016435
| 0
| 0
| 0
| 0
| 0.01
| 1
| 0.08
| false
| 0
| 0.12
| 0.01
| 0.22
| 0.01
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27cc788dc3d49e45198c96fa1cec36fea676e304
| 2,085
|
py
|
Python
|
scripts/dataset.py
|
MarcGroef/deeplearning
|
d1ef095fbe0f7e9b56017808d976efe7502e6b81
|
[
"MIT"
] | null | null | null |
scripts/dataset.py
|
MarcGroef/deeplearning
|
d1ef095fbe0f7e9b56017808d976efe7502e6b81
|
[
"MIT"
] | null | null | null |
scripts/dataset.py
|
MarcGroef/deeplearning
|
d1ef095fbe0f7e9b56017808d976efe7502e6b81
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from sklearn.model_selection import StratifiedKFold
# Set dataset seed
np.random.seed(seed=842102)
class SingletonDecorator:
def __init__(self,klass):
self.klass = klass
self.instance = None
def __call__(self,*args,**kwds):
if self.instance == None:
self.instance = self.klass(*args,**kwds)
return self.instance
@SingletonDecorator
class Dataset(object):
def __init__(self, nSplits, split_index):
print("DATASET: You should only see this message once.")
(self._trainImages, self._trainLabels), (self._testImages, self._testLabels) = tf.keras.datasets.fashion_mnist.load_data()
# Cross validation
skf = StratifiedKFold(n_splits=nSplits)
indices_by_expIdx = []
for train_index, val_index in skf.split(self._trainImages, self._trainLabels):
indices_by_expIdx.append((train_index, val_index))
def convert_to_tf(data):
# reshape data to fit shape
data = data.astype('float32') / 255
return np.expand_dims(data, axis=-1)
def get_split(type, split_index):
# Get the training or validation data+labels, by given split
train, val = indices_by_expIdx[split_index]
indices = train
if type == 'validation':
indices = val
train_data = convert_to_tf(self._trainImages[indices])
train_labels = tf.keras.utils.to_categorical(self._trainLabels[indices])
return train_data, train_labels
self.trainImages = lambda : get_split('train', split_index)[0]
self.trainLabels = lambda : get_split('train', split_index)[1]
self.valImages = lambda : get_split('validation', split_index)[0]
self.valLabels = lambda : get_split('validation', split_index)[1]
self.testImages = lambda : convert_to_tf(self._testImages)
self.testLabels = lambda : tf.keras.utils.to_categorical(self._testLabels)
if __name__ == "__main__":
dataset = Dataset();
| 35.948276
| 130
| 0.659472
| 250
| 2,085
| 5.224
| 0.364
| 0.053599
| 0.042879
| 0.045942
| 0.140888
| 0.140888
| 0
| 0
| 0
| 0
| 0
| 0.010107
| 0.240767
| 2,085
| 57
| 131
| 36.578947
| 0.814908
| 0.056595
| 0
| 0
| 0
| 0
| 0.051988
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.075
| 0
| 0.325
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27cf1141da0cf1cbeff01d7fcd33d6536ff17b4d
| 1,962
|
py
|
Python
|
src/python/utils/image.py
|
Lamzigit/manifold_learning
|
f699fe4f25dbabdbc2dc9635c4e654b59806e17d
|
[
"MIT"
] | 10
|
2017-06-14T08:04:44.000Z
|
2021-07-06T07:13:16.000Z
|
src/python/utils/image.py
|
Lamzigit/manifold_learning
|
f699fe4f25dbabdbc2dc9635c4e654b59806e17d
|
[
"MIT"
] | 1
|
2020-11-18T13:08:43.000Z
|
2020-11-18T13:12:39.000Z
|
src/python/utils/image.py
|
Lamzigit/manifold_learning
|
f699fe4f25dbabdbc2dc9635c4e654b59806e17d
|
[
"MIT"
] | 3
|
2017-06-14T08:04:53.000Z
|
2019-11-18T13:21:15.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 14:03:52 2015
@author: jemanjohnson
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import scipy.io
from sklearn import preprocessing
from time import time
from sklearn.preprocessing import MinMaxScaler
# Image Reshape Function
def img_as_array(img, gt=False):
"""Takes a N*M*D image
where:
* N - number of rows
* M - number of columns
* D - dimension of data
Returns:
--------
Image as an array with dimensions -
(N*M) by D
"""
if gt == False:
img_array = img.reshape(
img.shape[0]*img.shape[1], img.shape[2])
else:
img_array = img.reshape(
img.shape[0]*img.shape[1])
return img_array
# Image Normalization function
def standardize(data):
"""
Quick function to standardize my data between 0 and 1
"""
return MinMaxScaler().fit_transform(data)
# Define HSI X and y Ground Truth pairing function
def img_gt_idx(img, img_gt, printinfo=False):
"""Takes a flattened image array and
extracts the image indices that correspond
to the ground truth that we have.
"""
# Find the non-zero entries
n_samples = (img_gt>0).sum()
# Find the classification labels
classlabels = np.unique(img_gt[img_gt>0])
# Create X matrix containing the features
X = img[img_gt>0,:]
# Create y matrix containing the labels
y = img_gt[img_gt>0]
# Print out useful information
if printinfo:
print('We have {n} ground-truth samples.'.format(
n=n_samples))
print('The training data includes {n} classes: {classes}'.format(
n=classlabels.size, classes=classlabels.T))
print('Dimensions of matrix X: {sizeX}'.format(sizeX=X.shape))
print('Dimensions of matrix y: {sizey}'.format(sizey=y.shape))
return X, y
#
| 25.480519
| 73
| 0.618756
| 270
| 1,962
| 4.433333
| 0.418519
| 0.037594
| 0.02005
| 0.030075
| 0.07853
| 0.06015
| 0.06015
| 0.06015
| 0.06015
| 0.06015
| 0
| 0.016961
| 0.278797
| 1,962
| 77
| 74
| 25.480519
| 0.828975
| 0.357798
| 0
| 0.066667
| 0
| 0
| 0.124138
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.233333
| 0
| 0.433333
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27d2e55ac297493daba610855afc860802f2e6c9
| 2,074
|
py
|
Python
|
tests/test_visualize_poll.py
|
UBC-MDS/tweepypoll
|
62ea4ea0ab381eecf8f24bd13da0a0cdfb18eaa6
|
[
"MIT"
] | null | null | null |
tests/test_visualize_poll.py
|
UBC-MDS/tweepypoll
|
62ea4ea0ab381eecf8f24bd13da0a0cdfb18eaa6
|
[
"MIT"
] | 30
|
2022-01-14T17:10:08.000Z
|
2022-02-02T21:17:05.000Z
|
tests/test_visualize_poll.py
|
UBC-MDS/tweepypoll
|
62ea4ea0ab381eecf8f24bd13da0a0cdfb18eaa6
|
[
"MIT"
] | 1
|
2022-01-14T16:10:11.000Z
|
2022-01-14T16:10:11.000Z
|
from tweepypoll.tweepypoll import visualize_poll
import pandas as pd
import altair as alt
def test_visualize_poll():
"""Test visualize_poll on a dictionary input"""
sample_poll_obj = [
{
"text": "Important research!!!",
"duration": 1440,
"date": "2022-01-22T04:01:08.000Z",
"poll options": [
{"position": 1, "label": "Cookies", "votes": 29},
{"position": 2, "label": "Cupcakes", "votes": 5},
{"position": 3, "label": "Donuts", "votes": 24},
{"position": 4, "label": "Ice Cream", "votes": 25},
],
"user": "GregShahade",
"total": 83,
}
]
test_plot = visualize_poll(sample_poll_obj)
# test settings on altair plot
assert isinstance(
test_plot[0], alt.Chart
), "The type of the output mush be a altair chart"
assert (
test_plot[0].encoding.x.shorthand == "votes"
), "The votes should be mapped to the x axis"
assert (
test_plot[0].encoding.y.shorthand == "label"
), "The label should be mapped to the y axis"
assert test_plot[0].mark == "bar", "mark should be a bar"
assert (
test_plot[0].encoding.color.title == "Options"
), "Option should be the legend title"
# check if show_user=True, correct user name is printed
assert sample_poll_obj[0]["user"] == "GregShahade", "The user name is not correct."
# check if show_date=True, correct date and time is printed
assert (
pd.Timestamp(sample_poll_obj[0]["date"]).strftime("%Y-%m-%d %H:%M:%S")
== "2022-01-22 04:01:08"
), "Date and time is not correct."
# check if show_duration=True, correct duration is printed
assert sample_poll_obj[0]["duration"] / 60 == 24.0, "Duration is not correct."
# check if calculated total votes is equal to the input dict
df = pd.DataFrame(sample_poll_obj[0]["poll options"])
assert (
df["votes"].sum() == sample_poll_obj[0]["total"]
), "Total response is not correct."
| 35.152542
| 87
| 0.590646
| 276
| 2,074
| 4.336957
| 0.373188
| 0.05848
| 0.076023
| 0.05848
| 0.211362
| 0.086884
| 0.048454
| 0
| 0
| 0
| 0
| 0.041557
| 0.269045
| 2,074
| 58
| 88
| 35.758621
| 0.748021
| 0.144166
| 0
| 0.116279
| 0
| 0
| 0.334278
| 0.013598
| 0
| 0
| 0
| 0
| 0.209302
| 1
| 0.023256
| false
| 0
| 0.093023
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27d78b89ba7b997214a4c7166893ac8b3158ac3f
| 38,343
|
py
|
Python
|
sgan/models.py
|
peaceminusones/Group-GAN-GCN
|
ff0abf90bb830729d082d1fa46e41c749c738895
|
[
"MIT"
] | 2
|
2021-05-25T09:10:15.000Z
|
2021-09-25T07:53:35.000Z
|
sgan/models.py
|
peaceminusones/Group-GAN-GCN
|
ff0abf90bb830729d082d1fa46e41c749c738895
|
[
"MIT"
] | null | null | null |
sgan/models.py
|
peaceminusones/Group-GAN-GCN
|
ff0abf90bb830729d082d1fa46e41c749c738895
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def make_mlp(dim_list, activation='relu', batch_norm=True, dropout=0):
# make_mlp主要是构造多层的全连接网络,并且根据需求决定激活函数的类型,其参数dim_list是全连接网络各层维度的列表
layers = []
for dim_in, dim_out in zip(dim_list[:-1], dim_list[1:]):
layers.append(nn.Linear(dim_in, dim_out))
if batch_norm:
layers.append(nn.BatchNorm1d(dim_out))
if activation == 'relu':
layers.append(nn.ReLU())
elif activation == 'leakyrelu':
layers.append(nn.LeakyReLU())
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
return nn.Sequential(*layers)
def get_noise(shape, noise_type):
# get_noise函数主要是生成特定的噪声
if noise_type == 'gaussian':
return torch.randn(*shape).cuda()
elif noise_type == 'uniform':
return torch.rand(*shape).sub_(0.5).mul_(2.0).cuda()
raise ValueError('Unrecognized noise type "%s"' % noise_type)
class Encoder(nn.Module):
"""
Encoder is part of both TrajectoryGenerator and
TrajectoryDiscriminator
网络结构主要包括一个全连接层和一个LSTM网络
"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, num_layers=1, dropout=0.0
):
super(Encoder, self).__init__()
self.mlp_dim = 1024
self.h_dim = h_dim
self.embedding_dim = embedding_dim
self.num_layers = num_layers
# 2*16
self.spatial_embedding = nn.Linear(2, embedding_dim)
# input_size: 16
# hidden_size: 32
# num_layers: 1
self.encoder = nn.LSTM(embedding_dim, h_dim, num_layers, dropout=dropout)
def init_hidden(self, batch):
return (
torch.zeros(self.num_layers, batch, self.h_dim).cuda(),
torch.zeros(self.num_layers, batch, self.h_dim).cuda()
)
def forward(self, obs_traj):
"""
Inputs:
- obs_traj: Tensor of shape (obs_len, batch, 2)
最原始的输入是这一批输数据所有人的观测数据中的相对位置变化坐标,即当前帧相对于上一帧每个人的坐标变化,
其经过一个2*16的全连接层,全连接层的输入的shape:[obs_len*batch,2],输出:[obs_len*batch,16]
Output:
- final_h: Tensor of shape (self.num_layers, batch, self.h_dim)
"""
# Encode observed Trajectory (batch即batch_size个sequence序列中的总人数
batch = obs_traj.size(1)
'''经过一个2*16的全连接层,全连接层的输入的shape:[obs_len*batch,2],输出:[obs_len*batch,16]'''
# shape:
# "obs_traj": [obs_len,batch,2]
# "obs_traj.contiguous().view(-1, 2)": [obs_len*batch,2]
# "obs_traj_embedding": [obs_len*batch,16]
obs_traj_embedding = self.spatial_embedding(obs_traj.reshape(-1, 2))
# 经过维度变换变成3维的以符合LSTM网络中输入input的格式要求
# "obs_traj_embedding": [obs_len,batch,16]
obs_traj_embedding = obs_traj_embedding.view(-1, batch, self.embedding_dim)
# lstm模块初始化h_0, c_0
state_tuple = self.init_hidden(batch)
# LSTM,LSTM的输入input的shape为[seq_len,batch,input_size], 然后再把h_0和c_0输入LSTM
# 输出数据:output, (h_n, c_n)
# output.shape: [seq_length, batch_size, hidden_size]
# output[-1]与h_n是相等的
output, state = self.encoder(obs_traj_embedding, state_tuple)
# 输出隐藏状态h_t记为final_h
final_h = state[0]
return final_h
class Decoder(nn.Module):
"""Decoder is part of TrajectoryGenerator"""
def __init__(
self, seq_len, embedding_dim=64, h_dim=128, mlp_dim=1024, num_layers=1,
pool_every_timestep=True, dropout=0.0, bottleneck_dim=1024,
activation='relu', batch_norm=True, pooling_type='pool_net',
neighborhood_size=2.0, grid_size=8
):
super(Decoder, self).__init__()
self.seq_len = seq_len
self.mlp_dim = mlp_dim
self.h_dim = h_dim
self.embedding_dim = embedding_dim
self.pool_every_timestep = pool_every_timestep
# mlp [2,16]
self.spatial_embedding = nn.Linear(2, embedding_dim)
# lstm
# input_size: 16
# hidden_size: 32
# num_layers: 1
self.decoder = nn.LSTM(embedding_dim, h_dim, num_layers, dropout=dropout)
# mlp [32,2]
self.hidden2pos = nn.Linear(h_dim, 2)
if pool_every_timestep:
if pooling_type == 'pool_net':
self.pool_net = PoolHiddenNet(
embedding_dim=self.embedding_dim,
h_dim=self.h_dim,
mlp_dim=mlp_dim,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
mlp_dims = [h_dim + bottleneck_dim, mlp_dim, h_dim]
self.mlp = make_mlp(
mlp_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
def forward(self, last_pos, last_pos_rel, state_tuple, seq_start_end):
"""
Inputs:
- last_pos: Tensor of shape (batch, 2)
- last_pos_rel: Tensor of shape (batch, 2)
- state_tuple: (hh, ch) each tensor of shape (num_layers, batch, h_dim)
- seq_start_end: A list of tuples which delimit sequences within batch
Output:
- pred_traj: tensor of shape (self.seq_len, batch, 2)
"""
batch = last_pos.size(0)
pred_traj_fake_rel = []
decoder_input = self.spatial_embedding(last_pos_rel)
decoder_input = decoder_input.view(1, batch, self.embedding_dim)
for _ in range(self.seq_len):
output, state_tuple = self.decoder(decoder_input, state_tuple)
rel_pos = self.hidden2pos(output.view(-1, self.h_dim))
curr_pos = rel_pos + last_pos
if self.pool_every_timestep:
decoder_h = state_tuple[0]
pool_h = self.pool_net(decoder_h, seq_start_end, curr_pos)
decoder_h = torch.cat([decoder_h.view(-1, self.h_dim), pool_h], dim=1)
decoder_h = self.mlp(decoder_h)
decoder_h = torch.unsqueeze(decoder_h, 0)
state_tuple = (decoder_h, state_tuple[1])
embedding_input = rel_pos
decoder_input = self.spatial_embedding(embedding_input)
decoder_input = decoder_input.view(1, batch, self.embedding_dim)
pred_traj_fake_rel.append(rel_pos.view(batch, -1))
last_pos = curr_pos
pred_traj_fake_rel = torch.stack(pred_traj_fake_rel, dim=0)
return pred_traj_fake_rel, state_tuple[0]
"""
modified by zyl 2021/3/2
"""
class GraphAttentionLayer(nn.Module):
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.empty(size=(2*out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, h, adj):
Wh = torch.mm(h, self.W)
a_input = self._prepare_attentional_mechanism_input(Wh)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9e15*torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _prepare_attentional_mechanism_input(self, Wh):
N = Wh.size()[0]
# 对第0个维度复制N遍
Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=0)
# 对第1个维度复制N遍
Wh_repeated_alternating = Wh.repeat(N, 1)
# 在第1维上做全连接操作,得到了(N * N, 2 * out_features)的矩阵
all_combinations_matrix = torch.cat([Wh_repeated_in_chunks, Wh_repeated_alternating], dim=1)
return all_combinations_matrix.view(N, N, 2 * self.out_features)
class GAT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
def forward(self, x, adj):
# dropout不改变x的维度
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
x = F.elu(self.out_att(x, adj))
return F.log_softmax(x, dim=1)
class GATEncoder(nn.Module):
def __init__(self, n_units, n_heads, dropout, alpha):
super(GATEncoder, self).__init__()
self.gat_intra = GAT(40, 72, 16, dropout, alpha, n_heads)
self.gat_inter = GAT(16, 72, 16, dropout, alpha, n_heads)
self.out_embedding = nn.Linear(16*2, 24)
def normalize(self, adj, dim):
N = adj.size()
adj2 = torch.sum(adj, dim) # 对每一行求和
norm = adj2.unsqueeze(1).float() # 扩展张量维度
norm = norm.pow(-1) # 求倒数
norm_adj = adj.mul(norm) # 点乘
return norm_adj
def forward(self, h_states, seq_start_end, end_pos, end_group):
graph_embeded_data = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
curr_state = h_states[start:end]
curr_end_group = end_group[start:end]
num_ped = end - start
eye_mtx = torch.eye(num_ped, device=end_group.device).bool()
A_g = curr_end_group.repeat(1, num_ped)
B_g = curr_end_group.transpose(1, 0).repeat(num_ped, 1)
M_intra = (A_g == B_g) & (A_g != 0) | eye_mtx
A_intra = self.normalize(M_intra, dim=1).cuda()
curr_gat_state_intra = self.gat_intra(curr_state, A_intra)
R_intra_unique = torch.unique(M_intra, sorted=False, dim=0)
n_group = R_intra_unique.size()[0]
R_intra_unique.unsqueeze_(1)
R_intra = []
for i in range(n_group-1, -1, -1):
R_intra.append(R_intra_unique[i])
R_intra = torch.cat(R_intra, dim=0)
R_intra = self.normalize(R_intra, dim=1).cuda()
curr_gat_group_state_in = torch.matmul(R_intra, curr_gat_state_intra)
M_inter = torch.ones((n_group, n_group), device=end_group.device).bool()
A_inter = self.normalize(M_inter, dim=1).cuda()
curr_gat_group_state_out = self.gat_inter(curr_gat_group_state_in, A_inter)
curr_gat_state_inter = torch.matmul(R_intra.T, curr_gat_group_state_out)
curr_gat_state = torch.cat([curr_gat_state_intra, curr_gat_state_inter], dim=1)
curr_gat_state = self.out_embedding(curr_gat_state)
graph_embeded_data.append(curr_gat_state)
graph_embeded_data = torch.cat(graph_embeded_data, dim=0)
return graph_embeded_data
# class BatchMultiHeadGraphAttention(nn.Module):
# """
# graph attetion layer(GAL)
# """
# def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True):
# super(BatchMultiHeadGraphAttention, self).__init__()
# self.n_head = n_head
# self.f_in = f_in
# self.f_out = f_out
# self.w = nn.Parameter(torch.Tensor(n_head, f_in, f_out))
# self.a_src = nn.Parameter(torch.Tensor(n_head, f_out, 1))
# self.a_dst = nn.Parameter(torch.Tensor(n_head, f_out, 1))
# self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
# self.softmax = nn.Softmax(dim=-1)
# self.dropout = nn.Dropout(attn_dropout)
# if bias:
# self.bias = nn.Parameter(torch.Tensor(f_out))
# nn.init.constant_(self.bias, 0)
# else:
# self.register_parameter("bias", None)
# nn.init.xavier_uniform_(self.w, gain=1.414)
# nn.init.xavier_uniform_(self.a_src, gain=1.414)
# nn.init.xavier_uniform_(self.a_dst, gain=1.414)
# def forward(self, h, adj):
# bs, n = h.size()[:2]
# h_prime = torch.matmul(h.unsqueeze(1), self.w)
# attn_src = torch.matmul(h_prime, self.a_src)
# attn_dst = torch.matmul(h_prime, self.a_dst)
# attn = attn_src.expand(-1, -1, -1, n) + attn_dst.expand(-1, -1, -1, n).permute(0, 1, 3, 2)
# attn = self.leaky_relu(attn)
# attn = self.softmax(attn)
# attn = self.dropout(attn)
# attn = torch.matmul(torch.squeeze(attn, dim=0), adj)
# attn = torch.unsqueeze(attn, 0)
# output = torch.matmul(attn, h_prime)
# if self.bias is not None:
# return output + self.bias, attn
# else:
# return output, attn
# def __repr__(self):
# return (
# self.__class__.__name__
# + " ("
# + str(self.n_head)
# + " -> "
# + str(self.f_in)
# + " -> "
# + str(self.f_out)
# + ")"
# )
# """
# modified by zyl 2021/2/6 graph attetion network
# """
# class GAT(nn.Module):
# def __init__(self, n_units, n_heads, dropout=0.2, alpha=0.2):
# super(GAT, self).__init__()
# self.n_layer = len(n_units) - 1
# self.dropout = dropout
# self.layer_stack = nn.ModuleList()
# for i in range(self.n_layer):
# f_in = n_units[i] * n_heads[i - 1] if i else n_units[i]
# self.layer_stack.append(
# BatchMultiHeadGraphAttention(
# n_heads[i], f_in=f_in, f_out=n_units[i + 1], attn_dropout=dropout
# )
# )
# self.norm_list = [
# torch.nn.InstanceNorm1d(32).cuda(),
# torch.nn.InstanceNorm1d(64).cuda(),
# ]
# def forward(self, x, adj):
# bs, n = x.size()[:2]
# for i, gat_layer in enumerate(self.layer_stack):
# # x = self.norm_list[i](x.permute(0, 2, 1)).permute(0, 2, 1)
# x, attn = gat_layer(x, adj)
# if i + 1 == self.n_layer:
# x = x.squeeze(dim=1)
# else:
# x = F.elu(x.contiguous().view(bs, n, -1))
# x = F.dropout(x, self.dropout, training=self.training)
# else:
# return x
# """
# modified by zyl 2021/2/6 graph attetion network encoder
# """
# class GATEncoder(nn.Module):
# def __init__(self, n_units, n_heads, dropout, alpha):
# super(GATEncoder, self).__init__()
# self.gat_intra = GAT([40,72,16], n_heads, dropout, alpha)
# self.gat_inter = GAT([16,72,16], n_heads, dropout, alpha)
# self.out_embedding = nn.Linear(16*2, 24)
# def normalize(self, adj, dim):
# N = adj.size()
# adj2 = torch.sum(adj, dim) # 对每一行求和
# norm = adj2.unsqueeze(1).float() # 扩展张量维度
# norm = norm.pow(-1) # 求倒数
# norm_adj = adj.mul(norm) # 点乘
# return norm_adj
# def forward(self, obs_traj_embedding, seq_start_end, end_pos, end_group):
# graph_embeded_data = []
# for start, end in seq_start_end.data:
# curr_seq_embedding_traj = obs_traj_embedding[:, start:end, :]
# h_states = torch.squeeze(obs_traj_embedding, dim=0)
# num_ped = end - start
# curr_end_group = end_group[start:end]
# eye_mtx = torch.eye(num_ped, device=end_group.device).bool()
# A_g = curr_end_group.repeat(1, num_ped)
# B_g = curr_end_group.transpose(1, 0).repeat(num_ped, 1)
# M_intra = (A_g == B_g) & (A_g != 0) | eye_mtx
# A_intra = self.normalize(M_intra, dim=1).cuda()
# curr_seq_graph_intra = self.gat_intra(curr_seq_embedding_traj, A_intra)
# # print("curr_seq_embedding_traj:", curr_seq_embedding_traj.size())
# # print("curr_seq_graph_intra:", curr_seq_graph_intra.size())
# R_intra_unique = torch.unique(M_intra, sorted=False, dim=0)
# n_group = R_intra_unique.size()[0]
# R_intra_unique.unsqueeze_(1)
# R_intra = []
# for i in range(n_group-1, -1, -1):
# R_intra.append(R_intra_unique[i])
# R_intra = torch.cat(R_intra, dim=0)
# R_intra = self.normalize(R_intra, dim=1).cuda()
# curr_seq_graph_state_in = torch.matmul(R_intra, torch.squeeze(curr_seq_graph_intra, dim=0))
# curr_seq_graph_state_in = torch.unsqueeze(curr_seq_graph_state_in, 0)
# M_inter = torch.ones((n_group, n_group), device=end_group.device).bool()
# A_inter = self.normalize(M_inter, dim=1).cuda()
# curr_seq_graph_out = self.gat_inter(curr_seq_graph_state_in, A_inter)
# curr_seq_graph_inter = torch.matmul(R_intra.T, torch.squeeze(curr_seq_graph_out, dim=0))
# curr_seq_graph_inter = torch.unsqueeze(curr_seq_graph_inter, 0)
# curr_gat_state = torch.cat([curr_seq_graph_intra, curr_seq_graph_inter],dim=2)
# curr_gat_state = torch.squeeze(curr_gat_state, dim=0)
# curr_gat_state = self.out_embedding(curr_gat_state)
# curr_gat_state = torch.unsqueeze(curr_gat_state, 0)
# graph_embeded_data.append(curr_gat_state)
# graph_embeded_data = torch.cat(graph_embeded_data, dim=1)
# return graph_embeded_data
class PoolHiddenNet(nn.Module):
"""Pooling module as proposed in our paper"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, bottleneck_dim=1024,
activation='relu', batch_norm=True, dropout=0.0
):
super(PoolHiddenNet, self).__init__()
self.mlp_dim = 1024
self.h_dim = h_dim
self.bottleneck_dim = bottleneck_dim
self.embedding_dim = embedding_dim # 16
mlp_pre_dim = embedding_dim + h_dim
mlp_pre_pool_dims = [mlp_pre_dim, 512, bottleneck_dim] # mlp_pre_pool_dims: [48,512,8]
# mlp: 2*16
self.spatial_embedding = nn.Linear(2, embedding_dim)
# mlp: 48*512*8
self.mlp_pre_pool = make_mlp(
mlp_pre_pool_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout)
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def forward(self, h_states, seq_start_end, end_pos):
"""
Inputs:
- h_states: Tensor of shape (num_layers, batch, h_dim) 即encoder的return:final_h
- seq_start_end: A list of tuples which delimit sequences within batch
- end_pos: Tensor of shape (batch, 2)
Output:
- pool_h: Tensor of shape (batch, bottleneck_dim)
"""
pool_h = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
num_ped = end - start
# print("num_ped:", num_ped)
# print("h_states:", h_states.shape)
# h_states == final_h (即这里h_states就是LSTM的输出)
# h_states([1,batch,32]) -> cur_hidden([N,32])
curr_hidden = h_states.view(-1, self.h_dim)[start:end]
# print("curr_hidden: ", curr_hidden.shape)
# Repeat -> H1, H2, H1, H2
# curr_hidden([N,32]) -> curr_hidden_1([N*N,32])
curr_hidden_1 = curr_hidden.repeat(num_ped, 1)
# print("curr_hidden_1: ", curr_hidden_1.shape)
# Repeat position -> P1, P2, P1, P2
curr_end_pos = end_pos[start:end]
curr_end_pos_1 = curr_end_pos.repeat(num_ped, 1)
# Repeat position -> P1, P1, P2, P2
curr_end_pos_2 = self.repeat(curr_end_pos, num_ped)
# curr_rel_pos: [N*N,2]
curr_rel_pos = curr_end_pos_1 - curr_end_pos_2
# self.spatial_embedding(mlp): 2*16
# curr_rel_embedding: [N*N,16]
curr_rel_embedding = self.spatial_embedding(curr_rel_pos)
# mlp_h_inpur: [N*N,48]
mlp_h_input = torch.cat([curr_rel_embedding, curr_hidden_1], dim=1)
# curr_pool_h: [N*N,8]
curr_pool_h = self.mlp_pre_pool(mlp_h_input)
# curr_pool_h: [N,8]
# print(curr_pool_h.view(num_ped, num_ped, -1)[0])
curr_pool_h = curr_pool_h.view(num_ped, num_ped, -1).max(1)[0] # [N,N,8] -->[n,8]
# print(curr_pool_h)
# print("curr_pool_h:", curr_pool_h.shape)
pool_h.append(curr_pool_h)
# pool_h: [batch,8]: a pooled tensor Pi for each person
pool_h = torch.cat(pool_h, dim=0)
# print("pool_h:", pool_h.shape)
return pool_h
class GCN(nn.Module):
"""GCN module"""
def __init__(self, input_dim=48, hidden_dim=72, out_dim=8, gcn_layers=2):
super(GCN, self).__init__()
self.X_dim = input_dim
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.gcn_layers = gcn_layers
# graph convolution layer
self.W = torch.nn.ParameterList()
for i in range(self.gcn_layers):
if i == 0:
self.W.append(nn.Parameter(torch.randn(self.X_dim, self.hidden_dim)))
elif i == self.gcn_layers-1:
self.W.append(nn.Parameter(torch.randn(self.hidden_dim, self.out_dim)))
else:
self.W.append(nn.Parameter(torch.randn(self.hidden_dim, self.hidden_dim)))
def forward(self, A, X):
next_H = H = X
for i in range(self.gcn_layers):
next_H = F.relu(torch.matmul(torch.matmul(A, H), self.W[i]))
H = next_H
feat = H
return feat
class GCNModule(nn.Module):
"""group information aggregation with GCN layer"""
def __init__(
self, input_dim=40, hidden_dim=72, out_dim=16, gcn_layers=2, final_dim=24
):
super(GCNModule, self).__init__()
# GCN_intra: 40*72*16
self.gcn_intra = GCN(
input_dim=input_dim,
hidden_dim=hidden_dim,
out_dim=out_dim,
gcn_layers=gcn_layers)
# GCN_inter: 16*72*16
self.gcn_inter = GCN(
input_dim=16,
hidden_dim=hidden_dim,
out_dim=out_dim,
gcn_layers=gcn_layers)
# mlp:16*8
self.out_embedding = nn.Linear(out_dim*2, final_dim)
def normalize(self, adj, dim):
N = adj.size()
adj2 = torch.sum(adj, dim) # 对每一行求和
norm = adj2.unsqueeze(1).float() # 扩展张量维度
norm = norm.pow(-1) # 求倒数
norm_adj = adj.mul(norm) # 点乘
return norm_adj
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def forward(self, h_states, seq_start_end, end_pos, end_group):
"""
Inputs:
- h_states: Tensor of shape (batch, h_dim) 即encoder+pooling net的return
- seq_start_end: A list of tuples which delimit sequences within batch
- end_pos: Tensor of shape (batch, 2)
- end_group: group labels at the last time step (t_obs); shape: (batch, 1)
Output:
- gcn_aggre: Tensor of shape (batch, bottleneck_dim)
"""
gcn_aggre = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
num_ped = end - start # num_ped: number of pedestrians in the scene
# curr_state: [N,40]
curr_state = h_states[start:end]
# get the modulated adjacency matrix arrays
# Generate masks from the group labels
# labels can only be used to distinguish groups at a timestep.
# var: end_group; def: group labels at the last time step (t_obs); shape: (batch, 1)
# clip one onservation-prediction window out of multiple windows.
curr_end_group = end_group[start:end]
# get the coherency adjacency, dimension: (N, N)
# coherency mask is shared by all pedestrians in the scene
eye_mtx = torch.eye(num_ped, device=end_group.device).bool()
A_g = curr_end_group.repeat(1, num_ped)
B_g = curr_end_group.transpose(1, 0).repeat(num_ped, 1)
# M_intra: [N,N]
M_intra = (A_g == B_g) & (A_g != 0) | eye_mtx
# get the modulated normalized adjacency matrix arrays
# normalized M_intra: [N,N]
A_intra = self.normalize(M_intra, dim=1).cuda()
"""gcn_intra"""
# curr_gcn_state_intra: [N,16] (GCN:[40,72,16])
curr_gcn_state_intra = self.gcn_intra(A_intra, curr_state)
"""GPool =================================================================="""
# M_intra: [N,N]
# R_intra_unique: [M,N]
R_intra_unique = torch.unique(M_intra, sorted=False, dim=0)
# group 的数量
n_group = R_intra_unique.size()[0]
R_intra_unique.unsqueeze_(1) # 增加一维
# 从下到上翻转R_intra_unique
R_intra = []
for i in range(n_group-1, -1, -1):
R_intra.append(R_intra_unique[i])
R_intra = torch.cat(R_intra, dim=0)
# 归一化
R_intra = self.normalize(R_intra, dim=1).cuda()
# 提取群组部分 [M,N]*[N,16]
# curr_gcn_group_state: [M,16]
curr_gcn_group_state_in = torch.matmul(R_intra, curr_gcn_state_intra)
"""=========================================================================="""
"""gcn_inter"""
# M_inter: [M,M]
M_inter = torch.ones((n_group, n_group), device=end_group.device).bool()
# normalize
A_inter = self.normalize(M_inter, dim=1).cuda()
# M_inter_norm: [M,M]
# curr_gcn_group_state_in: [M,16] (GCN:[16,72,16])
# curr_gcn_group_state_out: [M,16]
curr_gcn_group_state_out = self.gcn_inter(A_inter, curr_gcn_group_state_in)
"""GUnpool================================================================="""
# [N,M]*[M,16]
# curr_gcn_state_inter: [N,16]
curr_gcn_state_inter = torch.matmul(R_intra.T, curr_gcn_group_state_out)
"""========================================================================="""
# curr_gcn_state: [N,32]
curr_gcn_state = torch.cat([curr_gcn_state_intra, curr_gcn_state_inter], dim=1)
# curr_gcn_state: [N,24]
curr_gcn_state = self.out_embedding(curr_gcn_state)
gcn_aggre.append(curr_gcn_state)
# gcn_aggre: [batch,24]:
gcn_aggre = torch.cat(gcn_aggre, dim=0)
return gcn_aggre
class TrajectoryGenerator(nn.Module):
def __init__(
self, obs_len, pred_len, embedding_dim=64, encoder_h_dim=64,
decoder_h_dim=128, mlp_dim=1024, num_layers=1, noise_dim=(0, ),
noise_type='gaussian', noise_mix_type='ped', pooling_type=None,
pool_every_timestep=True, dropout=0.0, bottleneck_dim=1024,
activation='relu', batch_norm=True, neighborhood_size=2.0, grid_size=8,
n_units=[32,16,32], n_heads=4, dropout1=0, alpha=0.2,
):
super(TrajectoryGenerator, self).__init__()
if pooling_type and pooling_type.lower() == 'none':
pooling_type = None
self.obs_len = obs_len
self.pred_len = pred_len
self.mlp_dim = mlp_dim
self.encoder_h_dim = encoder_h_dim
self.decoder_h_dim = decoder_h_dim
self.embedding_dim = embedding_dim
self.noise_dim = noise_dim
self.num_layers = num_layers
self.noise_type = noise_type
self.noise_mix_type = noise_mix_type
self.pooling_type = pooling_type
self.noise_first_dim = 0
self.pool_every_timestep = pool_every_timestep
self.bottleneck_dim = 1024
self.encoder = Encoder(
embedding_dim=embedding_dim,
h_dim=encoder_h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
dropout=dropout
)
self.decoder = Decoder(
pred_len,
embedding_dim=embedding_dim,
h_dim=decoder_h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
pool_every_timestep=pool_every_timestep,
dropout=dropout,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm,
pooling_type=pooling_type,
grid_size=grid_size,
neighborhood_size=neighborhood_size
)
if pooling_type == 'pool_net':
self.pool_net = PoolHiddenNet(
embedding_dim=self.embedding_dim,
h_dim=encoder_h_dim,
mlp_dim=mlp_dim,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm
)
if self.noise_dim is None:
self.noise_dim = None
elif self.noise_dim[0] == 0:
self.noise_dim = None
else:
self.noise_first_dim = noise_dim[0]
# gatencoder
self.gatencoder = GATEncoder(
n_units=n_units, n_heads=n_heads, dropout=dropout1, alpha=alpha
)
# Decoder Hidden
if pooling_type:
input_dim = encoder_h_dim + bottleneck_dim
else:
input_dim = encoder_h_dim
# if self.mlp_decoder_needed():
# mlp_decoder_context_dims = [input_dim, mlp_dim, decoder_h_dim - self.noise_first_dim]
# self.mlp_decoder_context = make_mlp(
# mlp_decoder_context_dims,
# activation=activation,
# batch_norm=batch_norm,
# dropout=dropout
# )
self.gcn_module = GCNModule(
input_dim=input_dim,
hidden_dim=72,
out_dim=16,
gcn_layers=2,
final_dim=decoder_h_dim - self.noise_first_dim
)
def add_noise(self, _input, seq_start_end, user_noise=None):
"""
Inputs:
- _input: Tensor of shape (_, decoder_h_dim - noise_first_dim)
- seq_start_end: A list of tuples which delimit sequences within batch.
- user_noise: Generally used for inference when you want to see
relation between different types of noise and outputs.
Outputs:
- decoder_h: Tensor of shape (_, decoder_h_dim)
"""
if not self.noise_dim:
return _input
if self.noise_mix_type == 'global':
noise_shape = (seq_start_end.size(0), ) + self.noise_dim
else:
noise_shape = (_input.size(0), ) + self.noise_dim
if user_noise is not None:
z_decoder = user_noise
else:
z_decoder = get_noise(noise_shape, self.noise_type)
if self.noise_mix_type == 'global':
_list = []
for idx, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
_vec = z_decoder[idx].view(1, -1)
_to_cat = _vec.repeat(end - start, 1)
_list.append(torch.cat([_input[start:end], _to_cat], dim=1))
decoder_h = torch.cat(_list, dim=0)
return decoder_h
decoder_h = torch.cat([_input, z_decoder], dim=1)
return decoder_h
def mlp_decoder_needed(self):
if (
self.noise_dim or self.pooling_type or
self.encoder_h_dim != self.decoder_h_dim
):
return True
else:
return False
# modified by zyl 2021/1/12
def forward(self, obs_traj, obs_traj_rel, seq_start_end, obs_traj_g, user_noise=None):
"""
Inputs:
- obs_traj: Tensor of shape (obs_len, batch, 2)
- obs_traj_rel: Tensor of shape (obs_len, batch, 2)
- seq_start_end: A list of tuples which delimit sequences within batch.
- user_noise: Generally used for inference when you want to see
relation between different types of noise and outputs.
Output:
- pred_traj_rel: Tensor of shape (self.pred_len, batch, 2)
"""
batch = obs_traj_rel.size(1)
# Encode seq
final_encoder_h = self.encoder(obs_traj_rel)
# Pool States
if self.pooling_type:
end_pos = obs_traj[-1, :, :]
pool_h = self.pool_net(final_encoder_h, seq_start_end, end_pos)
# Construct input hidden states for decoder
# final_encoder_h: [batch, 32]
# pool_h: [batch, 8]
# mlp_decoder_context_input: [batch, 40]
mlp_decoder_context_input = torch.cat([final_encoder_h.view(-1, self.encoder_h_dim), pool_h], dim=1)
else:
mlp_decoder_context_input = final_encoder_h.view(-1, self.encoder_h_dim)
# end_pos = obs_traj[-1, :, :]
# end_group = obs_traj_g[-1, :, :]
# mlp_decoder_context_input = torch.unsqueeze(mlp_decoder_context_input, 0)
# mlp_decoder_context_input = self.gatencoder(mlp_decoder_context_input, seq_start_end, end_pos, end_group)
# mlp_decoder_context_input = torch.squeeze(mlp_decoder_context_input, dim=0)
# Add Noise
if self.mlp_decoder_needed():
# # noise_input = self.mlp_decoder_context(mlp_decoder_context_input)
# end_pos = obs_traj[-1, :, :]
# # modified by zyl 2021/1/12 9:56
# end_group = obs_traj_g[-1, :, :]
# noise_input = self.gcn_module(mlp_decoder_context_input, seq_start_end, end_pos, end_group)
end_pos = obs_traj[-1, :, :]
end_group = obs_traj_g[-1, :, :]
noise_input = self.gatencoder(mlp_decoder_context_input, seq_start_end, end_pos, end_group)
else:
noise_input = mlp_decoder_context_input
decoder_h = self.add_noise(noise_input, seq_start_end, user_noise=user_noise)
decoder_h = torch.unsqueeze(decoder_h, 0)
decoder_c = torch.zeros(self.num_layers, batch, self.decoder_h_dim).cuda()
state_tuple = (decoder_h, decoder_c)
last_pos = obs_traj[-1]
last_pos_rel = obs_traj_rel[-1]
# Predict Trajectory
decoder_out = self.decoder(
last_pos,
last_pos_rel,
state_tuple,
seq_start_end,
)
pred_traj_fake_rel, final_decoder_h = decoder_out
return pred_traj_fake_rel
class TrajectoryDiscriminator(nn.Module):
def __init__(
self, obs_len, pred_len, embedding_dim=64, h_dim=64, mlp_dim=1024,
num_layers=1, activation='relu', batch_norm=True, dropout=0.0,
d_type='local'
):
super(TrajectoryDiscriminator, self).__init__()
self.obs_len = obs_len
self.pred_len = pred_len
self.seq_len = obs_len + pred_len
# self.mlp_dim = mlp_dim
self.h_dim = h_dim
self.d_type = d_type
self.encoder = Encoder(
embedding_dim=embedding_dim, # 16
h_dim=h_dim, # 48
mlp_dim=mlp_dim, # 64
num_layers=num_layers,
dropout=dropout
)
if d_type == 'global':
mlp_pool_dims = [h_dim + embedding_dim, mlp_dim, h_dim]
self.pool_net = PoolHiddenNet(
embedding_dim=embedding_dim,
h_dim=h_dim,
mlp_dim=mlp_pool_dims,
bottleneck_dim=h_dim,
activation=activation,
batch_norm=batch_norm
)
real_classifier_dims = [h_dim, mlp_dim, 1]
self.real_classifier = make_mlp(
real_classifier_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
def forward(self, traj, traj_rel, seq_start_end=None):
"""
Inputs:
- traj: Tensor of shape (obs_len + pred_len, batch, 2)
- traj_rel: Tensor of shape (obs_len + pred_len, batch, 2)
- seq_start_end: A list of tuples which delimit sequences within batch
Output:
- scores: Tensor of shape (batch,) with real/fake scores
"""
final_h = self.encoder(traj_rel)
# Note: In case of 'global' option we are using start_pos as opposed to
# end_pos. The intution being that hidden state has the whole
# trajectory and relative postion at the start when combined with
# trajectory information should help in discriminative behavior.
if self.d_type == 'local':
classifier_input = final_h.squeeze()
else:
classifier_input = self.pool_net(final_h.squeeze(), seq_start_end, traj[0])
scores = self.real_classifier(classifier_input)
return scores
| 38.652218
| 126
| 0.586235
| 5,098
| 38,343
| 4.096116
| 0.082581
| 0.012068
| 0.01475
| 0.013696
| 0.552916
| 0.466622
| 0.410066
| 0.366153
| 0.345896
| 0.314146
| 0
| 0.023116
| 0.300498
| 38,343
| 991
| 127
| 38.691221
| 0.755453
| 0.335002
| 0
| 0.39
| 0
| 0
| 0.006423
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.008
| 0.002
| 0.138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27da1fb06b835a7c7c1c2845d17975f0ff1c9b74
| 2,940
|
py
|
Python
|
pylons-emlo/emlo/workspace/indexing/src/conversionhelper.py
|
culturesofknowledge/emlo-server
|
8a88ca98a5211086195793e4bed5960550638936
|
[
"MIT"
] | null | null | null |
pylons-emlo/emlo/workspace/indexing/src/conversionhelper.py
|
culturesofknowledge/emlo-server
|
8a88ca98a5211086195793e4bed5960550638936
|
[
"MIT"
] | null | null | null |
pylons-emlo/emlo/workspace/indexing/src/conversionhelper.py
|
culturesofknowledge/emlo-server
|
8a88ca98a5211086195793e4bed5960550638936
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Created on 24 Aug 2010
@author: Matthew Wilcoxson
functions convert from one value to another in the form:
def conversion(value):
#do something
return new_value
'''
import time
def convert_to_rdf_date(value):
date_check = value
# rdf uses format '1651-12-31T00:00:00Z' or '1651-12-31T00:00:00.999Z'
# Recognisers dates in the format:
# * 'YYYY-M-D' to 'YYYY-MM-DD'
# * 'YYYY-MM-DD HH:MM:SS'
# * 'YYYY-MM-DD HH:MM:SS.M' to 'YYYY-MM-DD HH:MM:SS.MMMMMMM'
d = None
date_length = len( date_check )
if 8 <= date_length <= 10 :
d = time.strptime( date_check, '%Y-%m-%d')
elif date_length == 19 :
d = time.strptime( date_check, '%Y-%m-%d %H:%M:%S')
elif 20 <= date_length <= 26 :
d = time.strptime( date_check[:23], '%Y-%m-%d %H:%M:%S.%f')
if d == None :
raise SyntaxError( "Value '" + value +"' can not be converted to a date")
# Annoyingly time.strftime does not cope with years less than 1900, so I'm forced to use this:
new_value = "%(year)d-%(month)02d-%(day)02dT%(hour)02d:%(minute)02d:%(second)02dZ" % \
{ 'year':d.tm_year, 'month':d.tm_mon, 'day':d.tm_mday, 'hour':d.tm_hour, 'minute':d.tm_min, 'second':d.tm_sec }
return new_value
def convert_to_solr_date(value):
# Just use rdf one!
return convert_to_rdf_date(value)
def convert_to_rdf_boolean( value ):
value = value.lower()
if value == '1' or value == 'y' or value == 'true' :
new_value = 'true'
elif value == '0' or value == 'n' or value == 'false' :
new_value = 'false'
else:
raise SyntaxError( "Value '" + value + "' can not be converted to a boolean")
return new_value
def convert_to_solr_boolean(value):
# Just use rdf one!
return convert_to_rdf_boolean(value)
def convert_people_gender( value ):
valuelow = value.lower()
if valuelow == 'male' or valuelow == 'm' or valuelow == 'man' or valuelow == 'men':
new_value = "male"
elif valuelow == 'female' or valuelow == 'f' or valuelow == 'woman' or valuelow == 'women':
new_value = "female"
else:
raise SyntaxError( "Value '" + value + "' can not be converted to a gender" )
return new_value
def convert_to_local_url( value ) :
value = value.replace( 'http://sers018.sers.ox.ac.uk/history/cofk/union.php?iwork_id=', '/profile?iwork_id=' )
value = value.replace( 'http://sers018.sers.ox.ac.uk/history/cofk/selden_end.php?iwork_id=', '/profile?iwork_id=' )
return value
def convert_manifestation_type( value ):
if value == 'Scribal copy' :
return "Manuscript copy"
return value
def convert_manifestation_opened( value ):
if value == 'o' :
return "Opened"
elif value == 'p' :
return "Partially Opened"
elif value == 'u' :
return "Unopened"
return "Unknown:"+value
| 30
| 119
| 0.611905
| 430
| 2,940
| 4.053488
| 0.348837
| 0.041308
| 0.060241
| 0.039013
| 0.405622
| 0.316122
| 0.244406
| 0.209983
| 0.181297
| 0.139989
| 0
| 0.03181
| 0.240816
| 2,940
| 98
| 120
| 30
| 0.749104
| 0.182313
| 0
| 0.12963
| 0
| 0.055556
| 0.231896
| 0.028464
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.018519
| 0.037037
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27dfaf52615924607a73e76ca9bec8a17c8c3880
| 11,305
|
py
|
Python
|
estimate.py
|
DS3Lab/feebee
|
eb210d07a7f9956ca2d0681ccf446330c8427a8b
|
[
"Apache-2.0"
] | 1
|
2022-03-24T06:15:37.000Z
|
2022-03-24T06:15:37.000Z
|
estimate.py
|
DS3Lab/feebee
|
eb210d07a7f9956ca2d0681ccf446330c8427a8b
|
[
"Apache-2.0"
] | null | null | null |
estimate.py
|
DS3Lab/feebee
|
eb210d07a7f9956ca2d0681ccf446330c8427a8b
|
[
"Apache-2.0"
] | 1
|
2021-12-20T12:11:55.000Z
|
2021-12-20T12:11:55.000Z
|
from absl import app
from absl import flags
from absl import logging
import csv
import importlib
import numpy as np
import os.path as path
import random
from sklearn.model_selection import train_test_split
import time
from transformations.reader.matrix import test_argument_and_file, load_and_log
import transformations.label_noise as label_noise
import methods.knn as knn
import methods.knn_extrapolate as knn_extrapolate
import methods.ghp as ghp
import methods.kde as kde
import methods.onenn as onenn
import methods.lr_model as lr_model
FLAGS = flags.FLAGS
flags.DEFINE_string("path", ".", "Path to the matrices directory")
flags.DEFINE_string("features_train", None, "Name of the train features numpy matrix exported file (npy)")
flags.DEFINE_string("features_test", None, "Name of the test features numpy matrix exported file (npy)")
flags.DEFINE_string("labels_train", None, "Name of the train labels numpy matrix exported file (npy)")
flags.DEFINE_string("labels_test", None, "Name of the test labels numpy matrix exported file (npy)")
flags.DEFINE_list("noise_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], "Run at different noise levels")
flags.DEFINE_integer("noise_runs", 5, "Number of runs for different noise levels")
flags.DEFINE_string("output_file", None, "File to write the output in CSV format (including headers)")
flags.DEFINE_bool("output_overwrite", True, "Writes (if True) or appends (if False) to the specified output file if any")
flags.DEFINE_enum("method", None, ["knn", "knn_loo", "knn_extrapolate", "ghp", "kde_knn_loo", "kde", "onenn", "lr_model"], "Method to estimate the bayes error (results in either 1 value or a lower and upper bound)")
def _get_csv_row(variant, run, samples, noise, results, time):
return {'method': FLAGS.method,
'variant': variant,
'run': run,
'samples': samples,
'noise': noise,
'results': results,
'time': time}
def _write_result(rows):
writeheader = False
if FLAGS.output_overwrite or not path.exists(FLAGS.output_file):
writeheader = True
with open(FLAGS.output_file, mode='w+' if FLAGS.output_overwrite else 'a+') as f:
fieldnames = ['method', 'variant', 'run', 'samples', 'noise', 'results', 'time']
writer = csv.DictWriter(f, fieldnames=fieldnames)
if writeheader:
writer.writeheader()
for r in rows:
writer.writerow(r)
def estimate_from_split_matrices(eval_fn):
test_argument_and_file(FLAGS.path, "features_train")
test_argument_and_file(FLAGS.path, "features_test")
test_argument_and_file(FLAGS.path, "labels_train")
test_argument_and_file(FLAGS.path, "labels_test")
train_features, dim_train, samples_train = load_and_log(FLAGS.path, "features_train")
test_features, dim_test, samples_test = load_and_log(FLAGS.path, "features_test")
if dim_test != dim_train:
raise AttributeError("Train and test features do not have the same dimension!")
train_labels, dim, samples_train_labels = load_and_log(FLAGS.path, "labels_train")
if dim != 1:
raise AttributeError("Train labels file does not point to a vector!")
if samples_train_labels != samples_train:
raise AttributeError("Train features and labels files does not have the same amount of samples!")
test_labels, _, samples_test_labels = load_and_log(FLAGS.path, "labels_test")
if dim != 1:
raise AttributeError("Test labels file does not point to a vector!")
if samples_test_labels != samples_test:
raise AttributeError("Test features and labels files does not have the same amount of samples!")
logging.log(logging.DEBUG, "Start full estimation with method '{}'".format(FLAGS.method))
start = time.time()
result_full = eval_fn(train_features, test_features, train_labels, test_labels)
end = time.time()
logging.log(logging.DEBUG, "Method '{}' executed in {} seconds".format(FLAGS.method, end - start))
logging.log(logging.INFO, "Full train and test set: {}".format(result_full))
if FLAGS.noise_levels and FLAGS.noise_runs > 0:
result_rows = []
for run in range(FLAGS.noise_runs):
if FLAGS.output_file:
rows = [_get_csv_row(k, run, samples_train, 0.0, v, (end - start) / float(len(result_full))) for k, v in result_full.items()]
result_rows.extend(rows)
logging.log(logging.DEBUG, "Start noisy run {} out of {}".format(run+1, FLAGS.noise_runs))
run_start = time.time()
for noise_level in [float(x) for x in FLAGS.noise_levels]:
if noise_level > 1.0 or noise_level <= 0.0:
raise AttributeError("Noise level {} has to be bigger than 0 and not larger than 1!".format(noise_level))
logging.log(logging.DEBUG, "Start noise level {} for run {} out of {}".format(noise_level, run+1, FLAGS.noise_runs))
noise_start = time.time()
# flip labels test and train
flipped_train_labels = label_noise.random_flip(train_labels, samples_train, noise_level, copy=True)
flipped_test_labels = label_noise.random_flip(test_labels, samples_test, noise_level, copy=True)
# run method
logging.log(logging.DEBUG, "Start full estimation with method '{}', noise level {}, run {}/{}".format(FLAGS.method, noise_level, run+1, FLAGS.noise_runs))
start = time.time()
result = eval_fn(train_features, test_features, flipped_train_labels, flipped_test_labels)
end = time.time()
logging.log(logging.DEBUG, "Method '{}' executed in {} seconds".format(FLAGS.method, end - start))
logging.log(logging.INFO, "Run {}/{} - noise level {}: {}".format(run+1, FLAGS.noise_runs, noise_level, result))
if FLAGS.output_file:
rows = [_get_csv_row(k, run, samples_train, noise_level, v, (end - start) / float(len(result))) for k, v in result.items()]
result_rows.extend(rows)
noise_end = time.time()
logging.log(logging.DEBUG, "Noise level {} for run {}/{} executed in {} seconds".format(noise_level, run+1, FLAGS.noise_runs, noise_end - noise_start))
run_end = time.time()
logging.log(logging.DEBUG, "Run {}/{} executed in {} seconds".format(run+1, FLAGS.noise_runs, run_end - run_start))
if FLAGS.output_file:
_write_result(result_rows)
elif FLAGS.output_file:
rows = [_get_csv_row(k, 0, samples_train, 0.0, v, (end - start) / float(len(result_full))) for k, v in result_full.items()]
_write_result(rows)
def estimate_from_single_matrix(eval_fn):
test_argument_and_file(FLAGS.path, "features_train")
test_argument_and_file(FLAGS.path, "labels_train")
train_features, dim_train, samples_train = load_and_log(FLAGS.path, "features_train")
train_labels, dim, samples_train_labels = load_and_log(FLAGS.path, "labels_train")
if dim != 1:
raise AttributeError("Train labels file does not point to a vector!")
if samples_train_labels != samples_train:
raise AttributeError("Train features and labels files does not have the same amount of samples!")
logging.log(logging.DEBUG, "Start full estimation with method '{}'".format(FLAGS.method))
start = time.time()
result_full = eval_fn(train_features, train_labels)
end = time.time()
logging.log(logging.DEBUG, "Method '{}' executed in {} seconds".format(FLAGS.method, end - start))
logging.log(logging.INFO, "Full train set: {}".format(result_full))
if FLAGS.noise_levels and FLAGS.noise_runs > 0:
result_rows = []
for run in range(FLAGS.noise_runs):
if FLAGS.output_file:
rows = [_get_csv_row(k, run, samples_train, 0.0, v, (end - start) / float(len(result_full))) for k, v in result_full.items()]
result_rows.extend(rows)
logging.log(logging.DEBUG, "Start noisy run {} out of {}".format(run+1, FLAGS.noise_runs))
run_start = time.time()
for noise_level in [float(x) for x in FLAGS.noise_levels]:
if noise_level > 1.0 or noise_level <= 0.0:
raise AttributeError("Noise level {} has to be bigger than 0 and not larger than 1!".format(noise_level))
logging.log(logging.DEBUG, "Start noise level {} for run {} out of {}".format(noise_level, run+1, FLAGS.noise_runs))
noise_start = time.time()
# flip labels train
flipped_train_labels = label_noise.random_flip(train_labels, samples_train, noise_level, copy=True)
# run method
logging.log(logging.DEBUG, "Start full estimation with method '{}', noise level {}, run {}/{}".format(FLAGS.method, noise_level, run+1, FLAGS.noise_runs))
start = time.time()
result = eval_fn(train_features, flipped_train_labels)
end = time.time()
logging.log(logging.DEBUG, "Method '{}' executed in {} seconds".format(FLAGS.method, end - start))
logging.log(logging.INFO, "Run {}/{} - noise level {}: {}".format(run+1, FLAGS.noise_runs, noise_level, result))
if FLAGS.output_file:
rows = [_get_csv_row(k, run, samples_train, noise_level, v, (end - start) / float(len(result))) for k, v in result.items()]
result_rows.extend(rows)
noise_end = time.time()
logging.log(logging.DEBUG, "Noise level {} for run {}/{} executed in {} seconds".format(noise_level, run+1, FLAGS.noise_runs, noise_end - noise_start))
run_end = time.time()
logging.log(logging.DEBUG, "Run {}/{} executed in {} seconds".format(run+1, FLAGS.noise_runs, run_end - run_start))
if FLAGS.output_file:
_write_result(result_rows)
elif FLAGS.output_file:
rows = [_get_csv_row(k, 0, samples_train, 0.0, v, (end - start) / float(len(result_full))) for k, v in result_full.items()]
_write_result(rows)
def main(argv):
if FLAGS.method is None:
raise app.UsageError("You have to specify the method!")
if FLAGS.method == "knn":
estimate_from_split_matrices(knn.eval_from_matrices)
elif FLAGS.method == "knn_extrapolate":
estimate_from_split_matrices(knn_extrapolate.eval_from_matrices)
elif FLAGS.method == "lr_model":
estimate_from_split_matrices(lr_model.eval_from_matrices)
elif FLAGS.method == "knn_loo":
estimate_from_single_matrix(knn.eval_from_matrix_loo)
elif FLAGS.method == "ghp":
estimate_from_single_matrix(ghp.eval_from_matrix)
elif FLAGS.method == "kde_knn_loo":
estimate_from_single_matrix(kde.eval_from_matrix_knn_loo)
elif FLAGS.method == "onenn":
estimate_from_single_matrix(onenn.eval_from_matrix_onenn)
elif FLAGS.method == "kde":
estimate_from_single_matrix(kde.eval_from_matrix_kde)
else:
raise NotImplementedError("Method module for 'matrices' not yet implemented!")
if __name__ == "__main__":
app.run(main)
| 51.153846
| 215
| 0.665369
| 1,563
| 11,305
| 4.59501
| 0.111964
| 0.043163
| 0.047341
| 0.049011
| 0.720134
| 0.690198
| 0.652743
| 0.637845
| 0.607909
| 0.586048
| 0
| 0.00693
| 0.221406
| 11,305
| 220
| 216
| 51.386364
| 0.809021
| 0.005838
| 0
| 0.494318
| 0
| 0
| 0.211234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028409
| false
| 0
| 0.102273
| 0.005682
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27dfb13b1540ca2ae940981337f040231ef6dd46
| 2,610
|
py
|
Python
|
allmodels_image.py
|
GustavZ/Tensorflow-Object-Detection
|
3aab434b20e510d3953b4265dd73a1c7c315067d
|
[
"MIT"
] | 187
|
2017-12-26T17:41:09.000Z
|
2019-03-06T04:44:25.000Z
|
allmodels_image.py
|
a554142589/realtime_object_detection
|
d2bd7e58df9af1848e473fa7627aa2433192903d
|
[
"MIT"
] | 38
|
2018-02-01T17:05:01.000Z
|
2019-02-15T21:58:25.000Z
|
allmodels_image.py
|
a554142589/realtime_object_detection
|
d2bd7e58df9af1848e473fa7627aa2433192903d
|
[
"MIT"
] | 65
|
2018-01-19T06:03:44.000Z
|
2019-03-06T04:58:31.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 09:45:23 2018
@author: www.github.com/GustavZ
"""
import os
import sys
import numpy as np
from rod.config import Config
from rod.helper import get_model_list, check_if_optimized_model
from rod.model import ObjectDetectionModel, DeepLabModel
ROOT_DIR = os.getcwd()
#MODELS_DIR = os.path.join(ROOT_DIR,'models')
MODELS_DIR = '/home/gustav/workspace/eetfm_automation/nmsspeed_test'
INPUT_TYPE = 'image'
def create_test_config(type,model_name, optimized=False, single_class=False):
class TestConfig(Config):
OD_MODEL_PATH=MODELS_DIR+'/'+model_name+'/{}'
DL_MODEL_PATH=MODELS_DIR+'/'+model_name+'/{}'
OD_MODEL_NAME=model_name
DL_MODEL_NAME=model_name
VISUALIZE=False
SPLIT_MODEL = False
WRITE_TIMELINE = True
LIMIT_IMAGES = 11
if optimized:
USE_OPTIMIZED=True
else:
USE_OPTIMIZED=False
if single_class:
NUM_CLASSES=1
else:
NUM_CLASSES=90
def __init__(self):
super(TestConfig, self).__init__(type)
return TestConfig()
# Read sequentail Models or Gather all Models from models/
config = Config('od')
if config.SEQ_MODELS:
model_names = config.SEQ_MODELS
else:
model_names = get_model_list(MODELS_DIR)
# Sequential testing
for model_name in model_names:
print("> testing model: {}".format(model_name))
# conditionals
optimized=False
single_class=False
# Test Model
if 'hands' in model_name or 'person' in model_name:
single_class=True
if 'deeplab' in model_name:
config = create_test_config('dl',model_name,optimized,single_class)
model = DeepLabModel(config).prepare_model(INPUT_TYPE)
else:
config = create_test_config('od',model_name,optimized,single_class)
model = ObjectDetectionModel(config).prepare_model(INPUT_TYPE)
# Check if there is an optimized graph
model_dir = os.path.join(os.getcwd(),'models',model_name)
optimized = check_if_optimized_model(model_dir)
# Again for the optimized graph
if optimized:
if 'deeplab' in model_name:
config = create_test_config('dl',model_name,optimized,single_class)
model = DeepLabModel(config).prepare_model(INPUT_TYPE)
else:
config = create_test_config('od',model_name,optimized,single_class)
model = ObjectDetectionModel(config).prepare_model(INPUT_TYPE)
model.run()
| 32.222222
| 79
| 0.668966
| 331
| 2,610
| 4.996979
| 0.323263
| 0.097944
| 0.065296
| 0.053204
| 0.342201
| 0.305925
| 0.273277
| 0.273277
| 0.273277
| 0.273277
| 0
| 0.009572
| 0.239464
| 2,610
| 80
| 80
| 32.625
| 0.823678
| 0.123755
| 0
| 0.298246
| 0
| 0
| 0.055482
| 0.023338
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.105263
| 0
| 0.315789
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27e04f3e71ee9ae2490b13c55437303fba48ca2d
| 5,953
|
py
|
Python
|
train.py
|
Jing-lun/GPR_3D_Model_Reconstruction
|
24259bdbdf5e993e286e556ee1bae720892a16b9
|
[
"Unlicense"
] | 1
|
2021-09-30T10:22:54.000Z
|
2021-09-30T10:22:54.000Z
|
train.py
|
Jing-lun/GPR_3D_Model_Reconstruction
|
24259bdbdf5e993e286e556ee1bae720892a16b9
|
[
"Unlicense"
] | 1
|
2021-07-23T13:10:58.000Z
|
2021-07-23T13:10:58.000Z
|
train.py
|
Jing-lun/GPR_3D_Model_Reconstruction
|
24259bdbdf5e993e286e556ee1bae720892a16b9
|
[
"Unlicense"
] | null | null | null |
# Copyright 2021, Robotics Lab, City College of New York
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Originating Author: Jinglun Feng, (jfeng1@ccny.cuny.edu)
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader, random_split
from torchvision.utils import save_image
from model import UNet3D
from utils.data_loader import BasicDataset
from utils.utils import PointLoss
from eval import eval_net
def train_net(net,
epochs,
batch_size,
lr,
device,
save_cp = True):
dset = BasicDataset(args.input, args.gt)
n_train = int(len(dset) * 0.85)
n_val = len(dset) - n_train
train, val = random_split(dset, [n_train, n_val])
dset_train = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
dset_valid = DataLoader(val, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=True)
writer = SummaryWriter(comment=f'BS_{2}')
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {batch_size}
Learning rate: {lr}
Training size: {n_train}
Validation size: {n_val}
Device: {device.type}
''')
optimizer = optim.Adam(net.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.2)
L1_loss = nn.L1Loss()
L1_loss.to(device)
global_step = 0
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch+1}/{epochs}', unit='mat') as pbar:
for batch in dset_train:
mats = batch['mat_input']
pcds = batch['mat_gt']
mats = mats.to(device=device, dtype=torch.float32)
pcds = pcds.to(device=device, dtype=torch.float32)
test = pcds*6 + 1
optimizer.zero_grad()
mats_pred = net(mats)
new_predict = test * mats_pred
new_ground_truth = 7*pcds
loss = L1_loss(new_predict, new_ground_truth)
epoch_loss += loss.item()
writer.add_scalar('Loss/train', loss.item(), global_step)
pbar.set_postfix(**{'loss (batch)': loss.item()})
loss.backward()
optimizer.step()
pbar.update(mats.shape[0])
global_step += 1
val_score = eval_net(net, dset_valid, device, n_val)
logging.info(f'Validation L1 Distance: {val_score}')
writer.add_scalar('Loss/test', val_score, global_step)
scheduler.step()
if epoch % 20 == 0:
torch.save(net.state_dict(),
'check_points/' + f'CP_epoch{epoch + 1}.pth')
logging.info(f'Checkpoint {epoch + 1} saved !')
writer.close()
def args_setting():
parser = argparse.ArgumentParser(description='Train the net on gpr data',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=101,
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=4,
help='Batch size', dest='batchsize')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=0.00001,
help='Learning rate', dest='lr')
parser.add_argument('-f', '--load', dest='load', type=str, default='check_points/good_627/CP_epoch101.pth',
help='Load model from a .pth file')
parser.add_argument('-i', '--input', default='../resnet_range/',
type=str, metavar='PATH', help='path to input dataset', dest='input')
parser.add_argument('-g', '--ground-truth', default='../new_mat_gt/',
type=str, metavar='PATH', help='path to gt dataset', dest='gt')
parser.add_argument('-c', '--checkpoint', default='check_point/',
type=str, metavar='PATH', help='path to gt dataset', dest='cp')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
args = args_setting()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Let\'s use {torch.cuda.device_count()} GPUs!')
net = UNet3D(residual='conv')
net = torch.nn.DataParallel(net)
if args.load != '':
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
logging.info(f'Network Structure:\n'
f'\t{net}\n')
net.to(device=device)
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batchsize,
lr=args.lr,
device=device)
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 36.975155
| 111
| 0.607425
| 759
| 5,953
| 4.635046
| 0.346509
| 0.025583
| 0.033826
| 0.01535
| 0.088687
| 0.062536
| 0.031268
| 0.023309
| 0.023309
| 0.023309
| 0
| 0.014059
| 0.271124
| 5,953
| 160
| 112
| 37.20625
| 0.796727
| 0.106501
| 0
| 0.016667
| 0
| 0
| 0.171849
| 0.006972
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.141667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27e33b028e6c906a2e346f640e4d67536b199914
| 23,817
|
py
|
Python
|
dxtbx/tests/model/experiment/test_experiment_list.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
dxtbx/tests/model/experiment/test_experiment_list.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
dxtbx/tests/model/experiment/test_experiment_list.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import six.moves.cPickle as pickle
from glob import glob
import os
import pytest
from dxtbx.model import Experiment, ExperimentList
from dxtbx.model.experiment_list import ExperimentListFactory, \
ExperimentListDumper, ExperimentListDict
def test_experiment_contains():
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
# Create a load of models
b1 = Beam()
d1 = Detector()
g1 = Goniometer()
s1 = Scan()
c1 = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
# Create an experiment
e = Experiment(
beam=b1, detector=d1, goniometer=g1,
scan=s1, crystal=c1, imageset=None)
# Check experiment contains model
assert b1 in e
assert d1 in e
assert g1 in e
assert s1 in e
assert c1 in e
# Create a load of models that look the same but aren't
b2 = Beam()
d2 = Detector()
g2 = Goniometer()
s2 = Scan()
c2 = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
# Check experiment doesn't contain model
assert b2 not in e
assert d2 not in e
assert g2 not in e
assert s2 not in e
assert c2 not in e
def test_experiment_equality():
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
# Create a load of models
b1 = Beam()
d1 = Detector()
g1 = Goniometer()
s1 = Scan()
c1 = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
# Create a load of models that look the same but aren't
b2 = Beam()
d2 = Detector()
g2 = Goniometer()
s2 = Scan()
c2 = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
# Create an experiment
e1 = Experiment(
beam=b1, detector=d1, goniometer=g1,
scan=s1, crystal=c1, imageset=None)
# Create an experiment
e2 = Experiment(
beam=b1, detector=d1, goniometer=g1,
scan=s1, crystal=c1, imageset=None)
# Create an experiment
e3 = Experiment(
beam=b2, detector=d2, goniometer=g2,
scan=s2, crystal=c2, imageset=None)
# Check e1 equals e2 but not e3
assert e1 == e2
assert e1 != e3
assert e2 != e3
def test_experiment_consistent(dials_regression):
from dxtbx.imageset import ImageSetFactory
from dxtbx.model import Scan
# Create a sweep
sweep_filenames = os.path.join(dials_regression, 'centroid_test_data', 'centroid*.cbf')
sweep = ImageSetFactory.new(sorted(glob(sweep_filenames)))[0]
# Create experiment with sweep and good scan
e = Experiment(imageset=sweep, scan=sweep.get_scan())
assert e.is_consistent()
# Create experiment with sweep and defective scan
scan = sweep.get_scan()
scan.set_image_range((1, 1))
e = Experiment(imageset=sweep, scan=scan)
#assert not e.is_consistent()) # FIXME
## Create experiment with imageset and good scan
#assert e.is_consistent()
## Create experiment with imageset and non-still scan
#assert not e.is_consistent()
## Create experiment with imageset and scan with more than 1 image
#assert not e.is_consistent()
## Create experiment with imageset and defective scan
#assert not e.is_consistent()
def test_experimentlist_contains(experiment_list):
from dxtbx.model import Beam, Detector, Goniometer, Scan
# Check all the models are found
for e in experiment_list:
assert e.beam in experiment_list
assert e.detector in experiment_list
assert e.goniometer in experiment_list
assert e.scan in experiment_list
# Create some more models
b = Beam()
d = Detector()
g = Goniometer()
s = Scan()
# Check that models not in are not found
assert b not in experiment_list
assert d not in experiment_list
assert g not in experiment_list
assert s not in experiment_list
# def test_experimentlist_index(experiment_list):
# # Check the indices of exisiting experiments
# assert experiment_list.index(experiment_list[0]) is 0
# assert experiment_list.index(experiment_list[1]) is 1
# assert experiment_list.index(experiment_list[2]) is 2
# assert experiment_list.index(experiment_list[3]) is 1
# assert experiment_list.index(experiment_list[4]) is 0
# # Check index of non exisiting experiment
# try:
# experiment_list.index(Experiment())
# assert False
# except ValueError:
# pass
def test_experimentlist_replace(experiment_list):
# Get the models
b = [e.beam for e in experiment_list]
d = [e.detector for e in experiment_list]
g = [e.goniometer for e in experiment_list]
s = [e.scan for e in experiment_list]
# Replace some models
experiment_list.replace(b[0], b[1])
assert experiment_list[0].beam is b[1]
assert experiment_list[4].beam is b[1]
# Replace again
experiment_list[0].beam = b[0]
experiment_list[4].beam = b[4]
def test_experimentlist_indices(experiment_list):
from dxtbx.model import Beam, Detector, Goniometer, Scan
# Get the models
b = [e.beam for e in experiment_list]
d = [e.detector for e in experiment_list]
g = [e.goniometer for e in experiment_list]
s = [e.scan for e in experiment_list]
# Check indices of beams
assert list(experiment_list.indices(b[0])) == [0, 4]
assert list(experiment_list.indices(b[1])) == [1, 3]
assert list(experiment_list.indices(b[2])) == [2]
assert list(experiment_list.indices(b[3])) == [1, 3]
assert list(experiment_list.indices(b[4])) == [0, 4]
# Check indices of detectors
assert list(experiment_list.indices(d[0])) == [0, 4]
assert list(experiment_list.indices(d[1])) == [1, 3]
assert list(experiment_list.indices(d[2])) == [2]
assert list(experiment_list.indices(d[3])) == [1, 3]
assert list(experiment_list.indices(d[4])) == [0, 4]
# Check indices of goniometer
assert list(experiment_list.indices(g[0])) == [0, 4]
assert list(experiment_list.indices(g[1])) == [1, 3]
assert list(experiment_list.indices(g[2])) == [2]
assert list(experiment_list.indices(g[3])) == [1, 3]
assert list(experiment_list.indices(g[4])) == [0, 4]
# Check indices of scans
assert list(experiment_list.indices(s[0])) == [0, 4]
assert list(experiment_list.indices(s[1])) == [1, 3]
assert list(experiment_list.indices(s[2])) == [2]
assert list(experiment_list.indices(s[3])) == [1, 3]
assert list(experiment_list.indices(s[4])) == [0, 4]
# Check some models not in the list
assert len(experiment_list.indices(Beam())) == 0
assert len(experiment_list.indices(Detector())) == 0
assert len(experiment_list.indices(Goniometer())) == 0
assert len(experiment_list.indices(Scan())) == 0
def test_experimentlist_models(experiment_list):
# Get all the unique models
b = experiment_list.beams()
d = experiment_list.detectors()
g = experiment_list.goniometers()
s = experiment_list.scans()
# Check we have the expected number
assert len(b) == 3
assert len(d) == 3
assert len(g) == 3
assert len(s) == 3
# Check we have the expected order
assert b[0] == experiment_list[0].beam
assert b[1] == experiment_list[1].beam
assert b[2] == experiment_list[2].beam
assert d[0] == experiment_list[0].detector
assert d[1] == experiment_list[1].detector
assert d[2] == experiment_list[2].detector
assert g[0] == experiment_list[0].goniometer
assert g[0] == experiment_list[0].goniometer
assert g[1] == experiment_list[1].goniometer
assert s[2] == experiment_list[2].scan
assert s[1] == experiment_list[1].scan
assert s[2] == experiment_list[2].scan
def test_experimentlist_to_dict(experiment_list):
# Convert the list to a dictionary
obj = experiment_list.to_dict()
# Check this is the right object
assert obj['__id__'] == 'ExperimentList'
# Check length of items
assert len(obj['experiment']) == 5
assert len(obj['beam']) == 3
assert len(obj['detector']) == 3
assert len(obj['goniometer']) == 3
assert len(obj['scan']) == 3
# The expected models
b = [0, 1, 2, 1, 0]
d = [0, 1, 2, 1, 0]
g = [0, 1, 2, 1, 0]
s = [0, 1, 2, 1, 0]
# Check all the experiments
for i, eobj in enumerate(obj['experiment']):
assert eobj['__id__'] == 'Experiment'
assert eobj['beam'] == b[i]
assert eobj['detector'] == d[i]
assert eobj['goniometer'] == g[i]
assert eobj['scan'] == s[i]
def test_experimentlist_where(experiment_list):
for beam in experiment_list.beams():
assert beam is not None
for i in experiment_list.where(beam=beam):
assert experiment_list[i].beam is beam
for goniometer in experiment_list.goniometers():
assert goniometer is not None
for i in experiment_list.where(goniometer=goniometer):
assert experiment_list[i].goniometer is goniometer
for scan in experiment_list.scans():
assert scan is not None
for i in experiment_list.where(scan=scan):
assert experiment_list[i].scan is scan
for detector in experiment_list.detectors():
assert detector is not None
for i in experiment_list.where(detector=detector):
assert experiment_list[i].detector is detector
@pytest.fixture
def experiment_list():
from dxtbx.model import Beam, Detector, Goniometer, Scan
# Initialise a list of experiments
experiments = ExperimentList()
# Create a few beams
b1 = Beam()
b2 = Beam()
b3 = Beam()
# Create a few detectors
d1 = Detector()
d2 = Detector()
d3 = Detector()
# Create a few goniometers
g1 = Goniometer()
g2 = Goniometer()
g3 = Goniometer()
# Create a few scans
s1 = Scan()
s2 = Scan()
s3 = Scan()
# Create a list of models
b = [b1, b2, b3, b2, b1]
d = [d1, d2, d3, d2, d1]
g = [g1, g2, g3, g2, g1]
s = [s1, s2, s3, s2, s1]
ident = ["sausage", "eggs", "bacon", "toast", "beans"]
# Populate with various experiments
for i in range(5):
experiments.append(Experiment(
beam=b[i],
detector=d[i],
goniometer=g[i],
scan=s[i],
identifier=ident[i]))
# Return the list of experiments
return experiments
def test_experimentlist_factory_from_json(dials_regression):
os.environ['DIALS_REGRESSION'] = dials_regression
# Get all the filenames
filename1 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json')
filename2 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_2.json')
filename3 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_3.json')
filename4 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_4.json')
# Read all the experiment lists in
el1 = ExperimentListFactory.from_json_file(filename1)
#el2 = ExperimentListFactory.from_json_file(filename2)
el3 = ExperimentListFactory.from_json_file(filename3)
el4 = ExperimentListFactory.from_json_file(filename4)
# All the experiment lists should be the same length
assert len(el1) == 1
#assert len(el1) == len(el2)
assert len(el1) == len(el3)
assert len(el1) == len(el4)
# Check all the models are the same
for e in zip(el1, el3, el4):
e1 = e[0]
assert e1.imageset is not None
assert e1.beam is not None
assert e1.detector is not None
assert e1.goniometer is not None
assert e1.scan is not None
assert e1.crystal is not None
for ee in e[1:]:
assert e1.imageset == ee.imageset
assert e1.beam == ee.beam
assert e1.detector == ee.detector
assert e1.goniometer == ee.goniometer
assert e1.scan == ee.scan
assert e1.crystal == ee.crystal
def test_experimentlist_factory_from_pickle(dials_regression):
os.environ['DIALS_REGRESSION'] = dials_regression
# Get all the filenames
filename1 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json')
# Read all the experiment lists in
el1 = ExperimentListFactory.from_json_file(filename1)
# Pickle then load again
el2 = pickle.loads(pickle.dumps(el1))
# All the experiment lists should be the same length
assert len(el1) == 1
assert len(el1) == len(el2)
# Check all the models are the same
for e1, e2 in zip(el1, el2):
assert e1.imageset and e1.imageset == e2.imageset
assert e1.beam and e1.beam == e2.beam
assert e1.detector and e1.detector == e2.detector
assert e1.goniometer and e1.goniometer == e2.goniometer
assert e1.scan and e1.scan == e2.scan
assert e1.crystal and e1.crystal == e2.crystal
def test_experimentlist_factory_from_args(dials_regression):
pytest.importorskip('dials')
os.environ['DIALS_REGRESSION'] = dials_regression
# Get all the filenames
filenames = [
os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json'),
#os.path.join(dials_regression, 'experiment_test_data', 'experiment_2.json'),
os.path.join(dials_regression, 'experiment_test_data', 'experiment_3.json'),
os.path.join(dials_regression, 'experiment_test_data', 'experiment_4.json')]
# Get the experiments from a list of filenames
experiments = ExperimentListFactory.from_args(filenames, verbose=True)
# Have 4 experiment
assert len(experiments) == 3
for i in range(3):
assert experiments[i].imageset is not None
assert experiments[i].beam is not None
assert experiments[i].detector is not None
assert experiments[i].goniometer is not None
assert experiments[i].scan is not None
def test_experimentlist_factory_from_imageset():
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
from dxtbx.format.Format import Format
imageset = Format.get_imageset(["filename.cbf"], as_imageset=True)
imageset.set_beam(Beam(), 0)
imageset.set_detector(Detector(), 0)
crystal = Crystal(
(1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
experiments = ExperimentListFactory.from_imageset_and_crystal(
imageset, crystal)
assert len(experiments) == 1
assert experiments[0].imageset is not None
assert experiments[0].beam is not None
assert experiments[0].detector is not None
assert experiments[0].crystal is not None
def test_experimentlist_factory_from_sweep():
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
from dxtbx.format.Format import Format
filenames = ["filename_%01d.cbf" % (i+1) for i in range(0, 2)]
imageset = Format.get_imageset(
filenames,
beam = Beam(),
detector = Detector(),
goniometer = Goniometer(),
scan = Scan((1,2), (0,1)),
as_sweep=True)
crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
experiments = ExperimentListFactory.from_imageset_and_crystal(
imageset, crystal)
assert len(experiments) == 1
assert experiments[0].imageset is not None
assert experiments[0].beam is not None
assert experiments[0].detector is not None
assert experiments[0].goniometer is not None
assert experiments[0].scan is not None
assert experiments[0].crystal is not None
def test_experimentlist_factory_from_datablock():
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.datablock import DataBlockFactory
from dxtbx.model import Crystal
from dxtbx.format.Format import Format
filenames = ["filename_%01d.cbf" % (i+1) for i in range(0, 2)]
imageset = Format.get_imageset(
filenames,
beam = Beam(),
detector = Detector(),
goniometer = Goniometer(),
scan = Scan((1,2), (0,1)),
as_sweep=True)
crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
datablock = DataBlockFactory.from_imageset(imageset)
experiments = ExperimentListFactory.from_datablock_and_crystal(
datablock, crystal)
assert len(experiments) == 1
assert experiments[0].imageset is not None
assert experiments[0].beam is not None
assert experiments[0].detector is not None
assert experiments[0].goniometer is not None
assert experiments[0].scan is not None
assert experiments[0].crystal is not None
def test_experimentlist_dumper_dump_formats(dials_regression, tmpdir):
tmpdir.chdir()
os.environ['DIALS_REGRESSION'] = dials_regression
# Get all the filenames
filename1 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json')
# Read all the experiment lists in
elist1 = ExperimentListFactory.from_json_file(filename1)
# Create the experiment list dumper
dump = ExperimentListDumper(elist1)
# Dump as JSON file and reload
filename = 'temp1.json'
dump.as_json(filename)
elist2 = ExperimentListFactory.from_json_file(filename)
check(elist1, elist2)
# Dump as split JSON file and reload
filename = 'temp2.json'
dump.as_json(filename, split=True)
elist2 = ExperimentListFactory.from_json_file(filename)
check(elist1, elist2)
# Dump as pickle and reload
filename = 'temp.pickle'
dump.as_pickle(filename)
elist2 = ExperimentListFactory.from_pickle_file(filename)
check(elist1, elist2)
def test_experimentlist_dumper_dump_scan_varying(dials_regression, tmpdir):
tmpdir.chdir()
os.environ['DIALS_REGRESSION'] = dials_regression
# Get all the filenames
filename1 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json')
# Read the experiment list in
elist1 = ExperimentListFactory.from_json_file(filename1)
# Make trivial scan-varying models
crystal = elist1[0].crystal
beam = elist1[0].beam
goniometer = elist1[0].goniometer
crystal.set_A_at_scan_points([crystal.get_A()] * 5)
from scitbx.array_family import flex
cov_B = flex.double([1e-5]*9*9)
crystal.set_B_covariance(cov_B)
cov_B.reshape(flex.grid(1, 9, 9))
cov_B_array = flex.double(flex.grid(5, 9, 9))
for i in range(5):
cov_B_array[i:(i+1), :, :] = cov_B
crystal.set_B_covariance_at_scan_points(cov_B_array)
beam.set_s0_at_scan_points([beam.get_s0()] * 5)
goniometer.set_setting_rotation_at_scan_points([goniometer.get_setting_rotation()] * 5)
# Create the experiment list dumper
dump = ExperimentListDumper(elist1)
# Dump as JSON file and reload
filename = 'temp.json'
dump.as_json(filename)
elist2 = ExperimentListFactory.from_json_file(filename)
check(elist1, elist2)
def test_experimentlist_dumper_dump_empty_sweep(tmpdir):
tmpdir.chdir()
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
from dxtbx.format.Format import Format
filenames = ["filename_%01d.cbf" % (i+1) for i in range(0, 2)]
imageset = Format.get_imageset(
filenames,
beam = Beam((1, 0, 0)),
detector = Detector(),
goniometer = Goniometer(),
scan = Scan((1,2), (0.0, 1.0)),
as_sweep=True)
crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")
experiments = ExperimentListFactory.from_imageset_and_crystal(
imageset, crystal)
dump = ExperimentListDumper(experiments)
filename = 'temp.json'
dump.as_json(filename)
experiments2 = ExperimentListFactory.from_json_file(filename,
check_format=False)
check(experiments, experiments2)
def test_experimentlist_dumper_dump_with_lookup(dials_regression, tmpdir):
tmpdir.chdir()
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
filename = os.path.join(dials_regression, "centroid_test_data",
"experiments_with_lookup.json")
experiments = ExperimentListFactory.from_json_file(
filename, check_format=True)
imageset = experiments[0].imageset
assert not imageset.external_lookup.mask.data.empty()
assert not imageset.external_lookup.gain.data.empty()
assert not imageset.external_lookup.pedestal.data.empty()
assert imageset.external_lookup.mask.filename is not None
assert imageset.external_lookup.gain.filename is not None
assert imageset.external_lookup.pedestal.filename is not None
assert imageset.external_lookup.mask.data.tile(0).data().all_eq(True)
assert imageset.external_lookup.gain.data.tile(0).data().all_eq(1)
assert imageset.external_lookup.pedestal.data.tile(0).data().all_eq(0)
dump = ExperimentListDumper(experiments)
filename = 'temp.json'
dump.as_json(filename)
experiments = ExperimentListFactory.from_json_file(
filename,
check_format=True)
imageset = experiments[0].imageset
assert not imageset.external_lookup.mask.data.empty()
assert not imageset.external_lookup.gain.data.empty()
assert not imageset.external_lookup.pedestal.data.empty()
assert imageset.external_lookup.mask.filename is not None
assert imageset.external_lookup.gain.filename is not None
assert imageset.external_lookup.pedestal.filename is not None
assert imageset.external_lookup.mask.data.tile(0).data().all_eq(True)
assert imageset.external_lookup.gain.data.tile(0).data().all_eq(1)
assert imageset.external_lookup.pedestal.data.tile(0).data().all_eq(0)
def test_experimentlist_dumper_dump_with_bad_lookup(dials_regression, tmpdir):
tmpdir.chdir()
from dxtbx.model import Beam, Detector, Goniometer, Scan
from dxtbx.model import Crystal
filename = os.path.join(dials_regression, "centroid_test_data",
"experiments_with_bad_lookup.json")
experiments = ExperimentListFactory.from_json_file(
filename, check_format=False)
imageset = experiments[0].imageset
assert imageset.external_lookup.mask.data.empty()
assert imageset.external_lookup.gain.data.empty()
assert imageset.external_lookup.pedestal.data.empty()
assert imageset.external_lookup.mask.filename is not None
assert imageset.external_lookup.gain.filename is not None
assert imageset.external_lookup.pedestal.filename is not None
dump = ExperimentListDumper(experiments)
filename = 'temp.json'
dump.as_json(filename)
experiments = ExperimentListFactory.from_json_file(
filename, check_format=False)
imageset = experiments[0].imageset
assert imageset.external_lookup.mask.data.empty()
assert imageset.external_lookup.gain.data.empty()
assert imageset.external_lookup.pedestal.data.empty()
assert imageset.external_lookup.mask.filename is not None
assert imageset.external_lookup.gain.filename is not None
assert imageset.external_lookup.pedestal.filename is not None
def test_experimentlist_with_identifiers():
from dxtbx.model import Beam, Detector, Goniometer, Scan
# Initialise a list of experiments
experiments = ExperimentList()
experiments.append(Experiment(
beam=Beam(s0=(0,0,-1)),
detector=Detector(),
identifier="bacon"))
experiments.append(Experiment(
beam=Beam(s0=(0,0,-1)),
detector=Detector(),
identifier="sausage"))
with pytest.raises(Exception):
experiments.append(Experiment(
beam=Beam(),
detector=Detector(),
identifier="bacon"))
d = experiments.to_dict()
e2 = ExperimentListDict(d).decode()
assert experiments[0].identifier == e2[0].identifier
assert experiments[1].identifier == e2[1].identifier
assert tuple(experiments.identifiers()) == ("bacon", "sausage")
experiments[0].identifier = "spam"
assert tuple(experiments.identifiers()) == ("spam", "sausage")
experiments.append(Experiment(identifier="bacon"))
experiments.select_on_experiment_identifiers(["spam", "bacon"])
assert list(experiments.identifiers()) == ["spam", "bacon"]
experiments.append(Experiment(identifier="ham"))
experiments.append(Experiment(identifier="jam"))
experiments.remove_on_experiment_identifiers(["spam", "jam"])
assert list(experiments.identifiers()) == ["bacon", "ham"]
def check(el1, el2):
# All the experiment lists should be the same length
assert len(el1) == 1
assert len(el1) == len(el2)
# Check all the models are the same
for e1, e2 in zip(el1, el2):
assert e1.imageset and e1.imageset == e2.imageset
assert e1.beam and e1.beam == e2.beam
assert e1.detector is not None and e1.detector == e2.detector
assert e1.goniometer and e1.goniometer == e2.goniometer
assert e1.scan and e1.scan == e2.scan
assert e1.crystal and e1.crystal == e2.crystal
assert e1.identifier == e2.identifier
| 32.715659
| 89
| 0.719528
| 3,395
| 23,817
| 4.919293
| 0.073932
| 0.082151
| 0.023711
| 0.028741
| 0.671996
| 0.630441
| 0.582241
| 0.564996
| 0.519131
| 0.503802
| 0
| 0.028018
| 0.168283
| 23,817
| 727
| 90
| 32.76066
| 0.815084
| 0.127052
| 0
| 0.474645
| 0
| 0
| 0.045916
| 0.0029
| 0
| 0
| 0
| 0.001376
| 0.356998
| 1
| 0.046653
| false
| 0
| 0.073022
| 0
| 0.121704
| 0.002028
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27e7f431903fe9377416892525c526c246e0ed24
| 21,183
|
py
|
Python
|
_states/novav21.py
|
NDPF/salt-formula-nova
|
265d9e6c2cbd41d564ee389b210441d9f7378433
|
[
"Apache-2.0"
] | 4
|
2017-04-27T14:27:04.000Z
|
2017-11-04T18:23:09.000Z
|
_states/novav21.py
|
NDPF/salt-formula-nova
|
265d9e6c2cbd41d564ee389b210441d9f7378433
|
[
"Apache-2.0"
] | 22
|
2017-02-01T09:04:52.000Z
|
2019-05-10T09:04:01.000Z
|
_states/novav21.py
|
NDPF/salt-formula-nova
|
265d9e6c2cbd41d564ee389b210441d9f7378433
|
[
"Apache-2.0"
] | 35
|
2017-02-05T23:11:16.000Z
|
2019-04-04T17:21:36.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from six.moves import zip_longest
import time
import salt
from salt.exceptions import CommandExecutionError
LOG = logging.getLogger(__name__)
KEYSTONE_LOADED = False
def __virtual__():
"""Only load if the nova module is in __salt__"""
if 'keystonev3.project_get_details' in __salt__:
global KEYSTONE_LOADED
KEYSTONE_LOADED = True
return 'novav21'
class SaltModuleCallException(Exception):
def __init__(self, result_dict, *args, **kwargs):
super(SaltModuleCallException, self).__init__(*args, **kwargs)
self.result_dict = result_dict
def _get_failure_function_mapping():
return {
'create': _create_failed,
'update': _update_failed,
'find': _find_failed,
'delete': _delete_failed,
}
def _call_nova_salt_module(call_string, name, module_name='novav21'):
def inner(*args, **kwargs):
func = __salt__['%s.%s' % (module_name, call_string)]
result = func(*args, **kwargs)
if not result['result']:
ret = _get_failure_function_mapping()[func._action_type](
name, func._resource_human_readable_name)
ret['comment'] += '\nStatus code: %s\n%s' % (result['status_code'],
result['comment'])
raise SaltModuleCallException(ret)
return result['body'].get(func._body_response_key)
return inner
def _error_handler(fun):
@six.wraps(fun)
def inner(*args, **kwargs):
try:
return fun(*args, **kwargs)
except SaltModuleCallException as e:
return e.result_dict
return inner
@_error_handler
def flavor_present(name, cloud_name, vcpus=1, ram=256, disk=0, flavor_id=None,
extra_specs=None):
"""Ensures that the flavor exists"""
extra_specs = extra_specs or {}
# There is no way to query flavors by name
flavors = _call_nova_salt_module('flavor_list', name)(
detail=True, cloud_name=cloud_name)
flavor = [flavor for flavor in flavors if flavor['name'] == name]
# Flavor names are unique, there is either 1 or 0 with requested name
if flavor:
flavor = flavor[0]
current_extra_specs = _call_nova_salt_module(
'flavor_get_extra_specs', name)(
flavor['id'], cloud_name=cloud_name)
to_delete = set(current_extra_specs) - set(extra_specs)
to_add = set(extra_specs) - set(current_extra_specs)
for spec in to_delete:
_call_nova_salt_module('flavor_delete_extra_spec', name)(
flavor['id'], spec, cloud_name=cloud_name)
_call_nova_salt_module('flavor_add_extra_specs', name)(
flavor['id'], cloud_name=cloud_name, **extra_specs)
if to_delete or to_add:
ret = _updated(name, 'Flavor', extra_specs)
else:
ret = _no_change(name, 'Flavor')
else:
flavor = _call_nova_salt_module('flavor_create', name)(
name, vcpus, ram, disk, id=flavor_id, cloud_name=cloud_name)
_call_nova_salt_module('flavor_add_extra_specs', name)(
flavor['id'], cloud_name=cloud_name, **extra_specs)
flavor['extra_specs'] = extra_specs
ret = _created(name, 'Flavor', flavor)
return ret
@_error_handler
def flavor_absent(name, cloud_name):
"""Ensure flavor is absent"""
# There is no way to query flavors by name
flavors = _call_nova_salt_module('flavor_list', name)(
detail=True, cloud_name=cloud_name)
flavor = [flavor for flavor in flavors if flavor['name'] == name]
# Flavor names are unique, there is either 1 or 0 with requested name
if flavor:
_call_nova_salt_module('flavor_delete', name)(
flavor[0]['id'], cloud_name=cloud_name)
return _deleted(name, 'Flavor')
return _non_existent(name, 'Flavor')
def _get_keystone_project_id_by_name(project_name, cloud_name):
if not KEYSTONE_LOADED:
LOG.error("Keystone module not found, can not look up project ID "
"by name")
return None
project = __salt__['keystonev3.project_get_details'](
project_name, cloud_name=cloud_name)
if not project:
return None
return project['project']['id']
@_error_handler
def quota_present(name, cloud_name, **kwargs):
"""Ensures that the nova quota exists
:param name: project name to ensure quota for.
"""
project_name = name
project_id = _get_keystone_project_id_by_name(project_name, cloud_name)
changes = {}
if not project_id:
ret = _update_failed(project_name, 'Project quota')
ret['comment'] += ('\nCould not retrieve keystone project %s' %
project_name)
return ret
quota = _call_nova_salt_module('quota_list', project_name)(
project_id, cloud_name=cloud_name)
for key, value in kwargs.items():
if quota.get(key) != value:
changes[key] = value
if changes:
_call_nova_salt_module('quota_update', project_name)(
project_id, cloud_name=cloud_name, **changes)
return _updated(project_name, 'Project quota', changes)
else:
return _no_change(project_name, 'Project quota')
@_error_handler
def quota_absent(name, cloud_name):
"""Ensures that the nova quota set to default
:param name: project name to reset quota for.
"""
project_name = name
project_id = _get_keystone_project_id_by_name(project_name, cloud_name)
if not project_id:
ret = _delete_failed(project_name, 'Project quota')
ret['comment'] += ('\nCould not retrieve keystone project %s' %
project_name)
return ret
_call_nova_salt_module('quota_delete', name)(
project_id, cloud_name=cloud_name)
return _deleted(name, 'Project quota')
@_error_handler
def aggregate_present(name, cloud_name, availability_zone_name=None,
hosts=None, metadata=None):
"""Ensures that the nova aggregate exists"""
aggregates = _call_nova_salt_module('aggregate_list', name)(
cloud_name=cloud_name)
aggregate_exists = [agg for agg in aggregates
if agg['name'] == name]
metadata = metadata or {}
hosts = hosts or []
if availability_zone_name:
metadata.update(availability_zone=availability_zone_name)
if not aggregate_exists:
aggregate = _call_nova_salt_module('aggregate_create', name)(
name, availability_zone_name, cloud_name=cloud_name)
if metadata:
_call_nova_salt_module('aggregate_set_metadata', name)(
cloud_name=cloud_name, **metadata)
aggregate['metadata'] = metadata
for host in hosts or []:
_call_nova_salt_module('aggregate_add_host', name)(
name, host, cloud_name=cloud_name)
aggregate['hosts'] = hosts
return _created(name, 'Host aggregate', aggregate)
else:
aggregate = aggregate_exists[0]
changes = {}
existing_meta = set(aggregate['metadata'].items())
requested_meta = set(metadata.items())
if existing_meta - requested_meta or requested_meta - existing_meta:
_call_nova_salt_module('aggregate_set_metadata', name)(
name, cloud_name=cloud_name, **metadata)
changes['metadata'] = metadata
hosts_to_add = set(hosts) - set(aggregate['hosts'])
hosts_to_remove = set(aggregate['hosts']) - set(hosts)
if hosts_to_remove or hosts_to_add:
for host in hosts_to_add:
_call_nova_salt_module('aggregate_add_host', name)(
name, host, cloud_name=cloud_name)
for host in hosts_to_remove:
_call_nova_salt_module('aggregate_remove_host', name)(
name, host, cloud_name=cloud_name)
changes['hosts'] = hosts
if changes:
return _updated(name, 'Host aggregate', changes)
else:
return _no_change(name, 'Host aggregate')
@_error_handler
def aggregate_absent(name, cloud_name):
"""Ensure aggregate is absent"""
existing_aggregates = _call_nova_salt_module('aggregate_list', name)(
cloud_name=cloud_name)
matching_aggs = [agg for agg in existing_aggregates
if agg['name'] == name]
if matching_aggs:
_call_nova_salt_module('aggregate_delete', name)(
name, cloud_name=cloud_name)
return _deleted(name, 'Host Aggregate')
return _non_existent(name, 'Host Aggregate')
@_error_handler
def keypair_present(name, cloud_name, public_key_file=None, public_key=None):
"""Ensures that the Nova key-pair exists"""
existing_keypairs = _call_nova_salt_module('keypair_list', name)(
cloud_name=cloud_name)
matching_kps = [kp for kp in existing_keypairs
if kp['keypair']['name'] == name]
if public_key_file and not public_key:
with salt.utils.fopen(public_key_file, 'r') as f:
public_key = f.read()
if not public_key:
ret = _create_failed(name, 'Keypair')
ret['comment'] += '\nPlease specify public key for keypair creation.'
return ret
if matching_kps:
# Keypair names are unique, there is either 1 or 0 with requested name
kp = matching_kps[0]['keypair']
if kp['public_key'] != public_key:
_call_nova_salt_module('keypair_delete', name)(
name, cloud_name=cloud_name)
else:
return _no_change(name, 'Keypair')
res = _call_nova_salt_module('keypair_create', name)(
name, cloud_name=cloud_name, public_key=public_key)
return _created(name, 'Keypair', res)
@_error_handler
def keypair_absent(name, cloud_name):
"""Ensure keypair is absent"""
existing_keypairs = _call_nova_salt_module('keypair_list', name)(
cloud_name=cloud_name)
matching_kps = [kp for kp in existing_keypairs
if kp['keypair']['name'] == name]
if matching_kps:
_call_nova_salt_module('keypair_delete', name)(
name, cloud_name=cloud_name)
return _deleted(name, 'Keypair')
return _non_existent(name, 'Keypair')
def cell_present(name='cell1', transport_url='none:///', db_engine='mysql',
db_name='nova_upgrade', db_user='nova', db_password=None,
db_address='0.0.0.0'):
"""Ensure nova cell is present
For newly created cells this state also runs discover_hosts and
map_instances."""
cell_info = __salt__['cmd.shell'](
"nova-manage cell_v2 list_cells --verbose | "
"awk '/%s/ {print $4,$6,$8}'" % name).split()
db_connection = (
'%(db_engine)s+pymysql://%(db_user)s:%(db_password)s@'
'%(db_address)s/%(db_name)s?charset=utf8' % {
'db_engine': db_engine, 'db_user': db_user,
'db_password': db_password, 'db_address': db_address,
'db_name': db_name})
args = {'transport_url': transport_url, 'db_connection': db_connection}
# There should be at least 1 component printed to cell_info
if len(cell_info) >= 1:
cell_info = dict(zip_longest(
('cell_uuid', 'existing_transport_url', 'existing_db_connection'),
cell_info))
cell_uuid, existing_transport_url, existing_db_connection = cell_info
command_string = ''
if existing_transport_url != transport_url:
command_string = (
'%s --transport-url %%(transport_url)s' % command_string)
if existing_db_connection != db_connection:
command_string = (
'%s --database_connection %%(db_connection)s' % command_string)
if not command_string:
return _no_change(name, 'Nova cell')
try:
__salt__['cmd.shell'](
('nova-manage cell_v2 update_cell --cell_uuid %s %s' % (
cell_uuid, command_string)) % args)
LOG.warning("Updating the transport_url or database_connection "
"fields on a running system will NOT result in all "
"nodes immediately using the new values. Use caution "
"when changing these values.")
ret = _updated(name, 'Nova cell', args)
except Exception as e:
ret = _update_failed(name, 'Nova cell')
ret['comment'] += '\nException: %s' % e
return ret
args.update(name=name)
try:
cell_uuid = __salt__['cmd.shell'](
'nova-manage cell_v2 create_cell --name %(name)s '
'--transport-url %(transport_url)s '
'--database_connection %(db_connection)s --verbose' % args)
__salt__['cmd.shell']('nova-manage cell_v2 discover_hosts '
'--cell_uuid %s --verbose' % cell_uuid)
__salt__['cmd.shell']('nova-manage cell_v2 map_instances '
'--cell_uuid %s' % cell_uuid)
ret = _created(name, 'Nova cell', args)
except Exception as e:
ret = _create_failed(name, 'Nova cell')
ret['comment'] += '\nException: %s' % e
return ret
def cell_absent(name, force=False):
"""Ensure cell is absent"""
cell_uuid = __salt__['cmd.shell'](
"nova-manage cell_v2 list_cells | awk '/%s/ {print $4}'" % name)
if not cell_uuid:
return _non_existent(name, 'Nova cell')
try:
__salt__['cmd.shell'](
'nova-manage cell_v2 delete_cell --cell_uuid %s %s' % (
cell_uuid, '--force' if force else ''))
ret = _deleted(name, 'Nova cell')
except Exception as e:
ret = _delete_failed(name, 'Nova cell')
ret['comment'] += '\nException: %s' % e
return ret
def _db_version_update(db, version, human_readable_resource_name):
existing_version = __salt__['cmd.shell'](
'nova-manage %s version 2>/dev/null' % db)
try:
existing_version = int(existing_version)
version = int(version)
except Exception as e:
ret = _update_failed(existing_version,
human_readable_resource_name)
ret['comment'] += ('\nCan not convert existing or requested version '
'to integer, exception: %s' % e)
LOG.error(ret['comment'])
return ret
if existing_version < version:
try:
__salt__['cmd.shell'](
'nova-manage %s sync --version %s' % (db, version))
ret = _updated(existing_version, human_readable_resource_name,
{db: '%s sync --version %s' % (db, version)})
except Exception as e:
ret = _update_failed(existing_version,
human_readable_resource_name)
ret['comment'] += '\nException: %s' % e
return ret
return _no_change(existing_version, human_readable_resource_name)
def api_db_version_present(name=None, version="20"):
"""Ensures that specific api_db version is present"""
return _db_version_update('api_db', version, 'Nova API database version')
def db_version_present(name=None, version="334"):
"""Ensures that specific db version is present"""
return _db_version_update('db', version, 'Nova database version')
def online_data_migrations_present(name=None, api_db_version="20",
db_version="334"):
"""Runs online_data_migrations if databases are of specific versions"""
ret = {'name': 'online_data_migrations', 'changes': {}, 'result': False,
'comment': 'Current nova api_db version != {0} or nova db version '
'!= {1}.'.format(api_db_version, db_version)}
cur_api_db_version = __salt__['cmd.shell'](
'nova-manage api_db version 2>/dev/null')
cur_db_version = __salt__['cmd.shell'](
'nova-manage db version 2>/dev/null')
try:
cur_api_db_version = int(cur_api_db_version)
cur_db_version = int(cur_db_version)
api_db_version = int(api_db_version)
db_version = int(db_version)
except Exception as e:
LOG.error(ret['comment'])
ret['comment'] = ('\nCan not convert existing or requested database '
'versions to integer, exception: %s' % e)
return ret
if cur_api_db_version == api_db_version and cur_db_version == db_version:
try:
__salt__['cmd.shell']('nova-manage db online_data_migrations')
ret['result'] = True
ret['comment'] = ('nova-manage db online_data_migrations was '
'executed successfuly')
ret['changes']['online_data_migrations'] = (
'online_data_migrations run on nova api_db version {0} and '
'nova db version {1}'.format(api_db_version, db_version))
except Exception as e:
ret['comment'] = (
'Failed to execute online_data_migrations on nova api_db '
'version %s and nova db version %s, exception: %s' % (
api_db_version, db_version, e))
return ret
@_error_handler
def service_enabled(name, cloud_name, binary="nova-compute"):
"""Ensures that the service is enabled on the host
:param name: name of a host where service is running
:param service: name of the service have to be run
"""
changes = {}
services = _call_nova_salt_module('services_list', name)(
name, service=binary, cloud_name=cloud_name)
enabled_service = [s for s in services if s['binary'] == binary
and s['status'] == 'enabled' and s['host'] == name]
if len(enabled_service) > 0:
ret = _no_change(name, 'Compute services')
else:
changes = _call_nova_salt_module('services_update', name)(
name, binary, 'enable', cloud_name=cloud_name)
ret = _updated(name, 'Compute services', changes)
return ret
@_error_handler
def service_disabled(name, cloud_name, binary="nova-compute", disabled_reason=None):
"""Ensures that the service is disabled on the host
:param name: name of a host where service is running
:param service: name of the service have to be disabled
"""
changes = {}
kwargs = {}
if disabled_reason is not None:
kwargs['disabled_reason'] = disabled_reason
services = _call_nova_salt_module('services_list', name)(
name, service=binary, cloud_name=cloud_name)
disabled_service = [s for s in services if s['binary'] == binary
and s['status'] == 'disabled' and s['host'] == name]
if len(disabled_service) > 0:
ret = _no_change(name, 'Compute services')
else:
changes = _call_nova_salt_module('services_update', name)(
name, binary, 'disable', cloud_name=cloud_name, **kwargs)
ret = _updated(name, 'Compute services', changes)
return ret
def _find_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': 'Failed to find {0}s with name {1}'.format(resource, name)}
def _created(name, resource, changes):
return {
'name': name, 'changes': changes, 'result': True,
'comment': '{0} {1} created'.format(resource, name)}
def _create_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': '{0} {1} creation failed'.format(resource, name)}
def _no_change(name, resource):
return {
'name': name, 'changes': {}, 'result': True,
'comment': '{0} {1} already is in the desired state'.format(
resource, name)}
def _updated(name, resource, changes):
return {
'name': name, 'changes': changes, 'result': True,
'comment': '{0} {1} was updated'.format(resource, name)}
def _update_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': '{0} {1} update failed'.format(resource, name)}
def _deleted(name, resource):
return {
'name': name, 'changes': {}, 'result': True,
'comment': '{0} {1} deleted'.format(resource, name)}
def _delete_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': '{0} {1} deletion failed'.format(resource, name)}
def _non_existent(name, resource):
return {
'name': name, 'changes': {}, 'result': True,
'comment': '{0} {1} does not exist'.format(resource, name)}
| 39.155268
| 84
| 0.626729
| 2,623
| 21,183
| 4.762867
| 0.120092
| 0.052589
| 0.057232
| 0.043224
| 0.525895
| 0.435524
| 0.361963
| 0.340591
| 0.299608
| 0.278156
| 0
| 0.005187
| 0.262805
| 21,183
| 540
| 85
| 39.227778
| 0.794826
| 0.089789
| 0
| 0.367542
| 0
| 0
| 0.200345
| 0.027389
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078759
| false
| 0.00716
| 0.01432
| 0.023866
| 0.217184
| 0.004773
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27ed7774eba9356593529c7a047bb6eafaebca6b
| 6,891
|
py
|
Python
|
src/pyff/fetch.py
|
rhoerbe/pyFF
|
85933ed9cc9f720c9432d5e4c3114895cefd3579
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/pyff/fetch.py
|
rhoerbe/pyFF
|
85933ed9cc9f720c9432d5e4c3114895cefd3579
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/pyff/fetch.py
|
rhoerbe/pyFF
|
85933ed9cc9f720c9432d5e4c3114895cefd3579
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
"""
An abstraction layer for metadata fetchers. Supports both syncronous and asyncronous fetchers with cache.
"""
from .logs import get_log
import os
import requests
from .constants import config
from datetime import datetime
from collections import deque
import six
from concurrent import futures
import traceback
from .parse import parse_resource
from itertools import chain
from .exceptions import ResourceException
from .utils import url_get
from copy import deepcopy, copy
if six.PY2:
from UserDict import DictMixin as ResourceManagerBase
elif six.PY3:
from collections import MutableMapping as ResourceManagerBase
requests.packages.urllib3.disable_warnings()
log = get_log(__name__)
class ResourceManager(ResourceManagerBase):
def __init__(self):
self._resources = dict()
self.shutdown = False
def __setitem__(self, key, value):
if not isinstance(value, Resource):
raise ValueError("I can only store Resources")
self._resources[key] = value
def __getitem__(self, key):
return self._resources[key]
def __delitem__(self, key):
if key in self:
del self._resources[key]
def keys(self):
return list(self._resources.keys())
def values(self):
return list(self._resources.values())
def walk(self, url=None):
if url is not None:
return self[url].walk()
else:
i = [r.walk() for r in list(self.values())]
return chain(*i)
def add(self, r):
if not isinstance(r, Resource):
raise ValueError("I can only store Resources")
self[r.name] = r
def __contains__(self, item):
return item in self._resources
def __len__(self):
return len(list(self.values()))
def __iter__(self):
return self.walk()
def reload(self, url=None, fail_on_error=False, store=None):
# type: (object, basestring) -> None
if url is not None:
resources = deque([self[url]])
else:
resources = deque(list(self.values()))
with futures.ThreadPoolExecutor(max_workers=config.worker_pool_size) as executor:
while resources:
tasks = dict((executor.submit(r.fetch, store=store), r) for r in resources)
new_resources = deque()
for future in futures.as_completed(tasks):
r = tasks[future]
try:
res = future.result()
if res is not None:
for nr in res:
new_resources.append(nr)
except Exception as ex:
log.debug(traceback.format_exc())
log.error(ex)
if fail_on_error:
raise ex
resources = new_resources
class Resource(object):
def __init__(self, url, **kwargs):
self.url = url
self.opts = kwargs
self.t = None
self.type = "text/plain"
self.expire_time = None
self.last_seen = None
self._infos = deque(maxlen=config.info_buffer_size)
self.children = deque()
def _null(t):
return t
self.opts.setdefault('cleanup', [])
self.opts.setdefault('via', [])
self.opts.setdefault('fail_on_error', False)
self.opts.setdefault('as', None)
self.opts.setdefault('verify', None)
self.opts.setdefault('filter_invalid', True)
self.opts.setdefault('validate', True)
if "://" not in self.url:
if os.path.isfile(self.url):
self.url = "file://{}".format(os.path.abspath(self.url))
@property
def post(self):
return self.opts['via']
def add_via(self, callback):
self.opts['via'].append(callback)
@property
def cleanup(self):
return self.opts['cleanup']
def __str__(self):
return "Resource {} expires at {} using ".format(self.url, self.expire_time) + \
",".join(["{}={}".format(k, v) for k, v in list(self.opts.items())])
def walk(self):
yield self
for c in self.children:
for cn in c.walk():
yield cn
def is_expired(self):
now = datetime.now()
return self.expire_time is not None and self.expire_time < now
def is_valid(self):
return self.t is not None and not self.is_expired()
def add_info(self, info):
self._infos.append(info)
def add_child(self, url, **kwargs):
opts = deepcopy(self.opts)
del opts['as']
opts.update(kwargs)
r = Resource(url, **opts)
self.children.append(r)
return r
@property
def name(self):
if 'as' in self.opts:
return self.opts['as']
else:
return self.url
@property
def info(self):
if self._infos is None or not self._infos:
return dict()
else:
return self._infos[-1]
def fetch(self, store=None):
info = dict()
info['Resource'] = self.url
self.add_info(info)
data = None
if os.path.isdir(self.url):
data = self.url
info['Directory'] = self.url
elif '://' in self.url:
r = url_get(self.url)
info['HTTP Response Headers'] = r.headers
log.debug("got status_code={:d}, encoding={} from_cache={} from {}".
format(r.status_code, r.encoding, getattr(r, "from_cache", False), self.url))
info['Status Code'] = str(r.status_code)
info['Reason'] = r.reason
if r.ok:
data = r.text
else:
raise ResourceException("Got status={:d} while fetching {}".format(r.status_code, self.url))
else:
raise ResourceException("Unknown resource type {}".format(self.url))
parse_info = parse_resource(self, data)
if parse_info is not None and isinstance(parse_info, dict):
info.update(parse_info)
if self.t is not None:
self.last_seen = datetime.now()
if self.post and isinstance(self.post, list):
for cb in self.post:
if self.t is not None:
self.t = cb(self.t, **self.opts)
if self.is_expired():
info['Expired'] = True
raise ResourceException("Resource at {} expired on {}".format(self.url, self.expire_time))
else:
info['Expired'] = False
for (eid, error) in list(info['Validation Errors'].items()):
log.error(error)
if store is not None:
store.update(self.t, tid=self.name)
return self.children
| 29.702586
| 108
| 0.565085
| 831
| 6,891
| 4.563177
| 0.231047
| 0.042458
| 0.021361
| 0.009494
| 0.078059
| 0.060127
| 0.036392
| 0.025844
| 0.025844
| 0
| 0
| 0.000863
| 0.327674
| 6,891
| 231
| 109
| 29.831169
| 0.817613
| 0.020461
| 0
| 0.094972
| 0
| 0
| 0.06276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145251
| false
| 0
| 0.089385
| 0.061453
| 0.357542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27f693df0e7ea237223f8c2bc9de9a57a4f98dac
| 838
|
py
|
Python
|
tests/test_report.py
|
whalebot-helmsman/pykt-64
|
ee5e0413cd850876d3abc438480fffea4f7b7517
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_report.py
|
whalebot-helmsman/pykt-64
|
ee5e0413cd850876d3abc438480fffea4f7b7517
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_report.py
|
whalebot-helmsman/pykt-64
|
ee5e0413cd850876d3abc438480fffea4f7b7517
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from setup_teardown import start_db, stop_db
from nose.tools import *
from pykt import KyotoTycoon, KTException
@raises(IOError)
def test_err_report():
db = KyotoTycoon()
db.report()
@with_setup(setup=start_db,teardown=stop_db)
def test_report():
db = KyotoTycoon()
db = db.open()
ret = db.report()
ok_(ret)
ok_(isinstance(ret, dict))
db.close()
@with_setup(setup=start_db,teardown=stop_db)
def test_report_with_db():
db = KyotoTycoon("test")
db = db.open()
ret = db.report()
ok_(ret)
ok_(isinstance(ret, dict))
db.close()
@with_setup(setup=start_db,teardown=stop_db)
def test_report_loop():
db = KyotoTycoon()
db = db.open()
for i in xrange(100):
ret = db.report()
ok_(ret)
ok_(isinstance(ret, dict))
db.close()
| 21.487179
| 44
| 0.643198
| 120
| 838
| 4.275
| 0.283333
| 0.054581
| 0.087719
| 0.111111
| 0.623782
| 0.557505
| 0.557505
| 0.557505
| 0.557505
| 0.557505
| 0
| 0.00607
| 0.213604
| 838
| 38
| 45
| 22.052632
| 0.772382
| 0.02506
| 0
| 0.65625
| 0
| 0
| 0.004914
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.09375
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27f931503927cf87b2047c06d44bfc6dbb23b7c2
| 5,416
|
py
|
Python
|
manga_db/extractor/toonily.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 3
|
2021-01-14T16:22:41.000Z
|
2022-02-21T03:31:22.000Z
|
manga_db/extractor/toonily.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 13
|
2021-01-14T10:34:19.000Z
|
2021-05-20T08:47:54.000Z
|
manga_db/extractor/toonily.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 1
|
2022-02-24T03:10:04.000Z
|
2022-02-24T03:10:04.000Z
|
import re
import datetime
import bs4
from typing import Dict, Tuple, Optional, TYPE_CHECKING, ClassVar, Pattern, cast, Match, Any
from .base import BaseMangaExtractor, MangaExtractorData
from ..constants import STATUS_IDS, CENSOR_IDS
if TYPE_CHECKING:
from ..ext_info import ExternalInfo
class ToonilyExtractor(BaseMangaExtractor):
site_name: ClassVar[str] = "Toonily"
site_id: ClassVar[int] = 5
URL_PATTERN_RE: ClassVar[Pattern] = re.compile(
r"(?:https?://)?toonily\.com/webtoon/([-A-Za-z0-9]+)")
BASE_URL = "https://toonily.com"
MANGA_URL = "https://toonily.com/webtoon/{id_onpage}"
def __init__(self, url: str):
super().__init__(url)
self.id_onpage: str = self.book_id_from_url(url)
self.cover_url: Optional[str] = None
self.export_data: Optional[MangaExtractorData] = None
@classmethod
def match(cls, url: str) -> bool:
"""
Returns True on URLs the extractor is compatible with
"""
return bool(cls.URL_PATTERN_RE.match(url))
def extract(self) -> Optional[MangaExtractorData]:
if self.export_data is None:
html = self.get_html(self.url)
if html is None:
return None
data_dict = self._extract_info(html)
self.export_data = MangaExtractorData(
pages=0,
# seem to only be in english
language='English',
collection=[],
groups=[],
parody=[],
character=[],
url=self.url,
id_onpage=self.id_onpage,
imported_from=ToonilyExtractor.site_id,
uploader=None,
upload_date=datetime.date.min,
**data_dict)
return self.export_data
def _extract_info(self, html: str) -> Dict[str, Any]:
res: Dict[str, Any] = {}
soup = bs4.BeautifulSoup(html, "html.parser")
cover_url = soup.select_one("div.summary_image img")
self.cover_url = cover_url.attrs['data-src']
res['title_eng'] = soup.select_one("div.post-title h1").text.strip()
book_data = soup.select_one("div.summary_content")
label_to_idx = {x.get_text().strip(): i for i, x in enumerate(book_data.select("div.summary-heading"))}
content = book_data.select("div.summary-content")
# assumes order stays the same
rating_idx = label_to_idx["Rating"]
res['rating'] = float(content[rating_idx].select_one("#averagerate").text.strip())
res['ratings'] = int(content[rating_idx].select_one("#countrate").text.strip())
# sep is ','
alt_title_idx = label_to_idx["Alt Name(s)"]
alt_titles = [s.strip() for s in content[alt_title_idx].text.split(",")]
if alt_titles[0] == 'N/A':
res['title_foreign'] = None
else:
# @Incomplete take first non-latin title; alnum() supports unicode and thus returns
# true for """"alphanumeric"""" japanese symbols !?!?
non_latin = [s for s in alt_titles if ord(s[0]) > 128]
if non_latin:
res['title_foreign'] = non_latin[0]
else:
res['title_foreign'] = alt_titles[0]
authors = [s.text.strip() for s in content[label_to_idx["Author(s)"]].select("a")]
artists = [s.text.strip() for s in content[label_to_idx["Artist(s)"]].select("a")]
res['artist'] = [n for n in authors if n not in artists] + artists
tags = [a.text.strip() for a in book_data.select('div.genres-content a')]
res['tag'] = tags
res['nsfw'] = 'Mature' in tags
uncensored = 'Uncensored' in tags
res['censor_id'] = (
CENSOR_IDS['Uncensored'] if uncensored else CENSOR_IDS['Censored'])
# type
res['category'] = [content[label_to_idx["Type"]].text.strip()]
# OnGoing or Completed
status_str = content[label_to_idx["Status"]].text.strip().capitalize()
res['status_id'] = STATUS_IDS['Hiatus'] if status_str == 'On Hiatus' else STATUS_IDS[status_str]
# e.g.: 128 Users bookmarked this
# e.g.: 128K Users bookmarked this
favorites_str = book_data.select_one("div.add-bookmark span").text.split()[0].strip().lower()
if 'k' in favorites_str:
res['favorites'] = int(float(favorites_str[:-1]) * 1000)
else:
res['favorites'] = int(favorites_str)
summary = soup.select_one("div.description-summary div.summary__content").text.strip()
# @CleanUp
res['note'] = f"{'Summary: ' if not uncensored else ''}{summary}"
return res
def get_cover(self) -> Optional[str]:
if self.export_data is None:
self.extract()
return self.cover_url
@classmethod
def book_id_from_url(cls, url: str) -> str:
# guaranteed match since we only get passed matching urls
match = cast(Match, cls.URL_PATTERN_RE.match(url))
return match.group(1)
@classmethod
def url_from_ext_info(cls, ext_info: 'ExternalInfo') -> str:
return cls.MANGA_URL.format(id_onpage=ext_info.id_onpage)
@classmethod
def read_url_from_ext_info(cls, ext_info: 'ExternalInfo') -> str:
# @CleanUp just uses first chapter
return f"{cls.url_from_ext_info(ext_info)}/chapter-1"
| 37.351724
| 111
| 0.605613
| 694
| 5,416
| 4.534582
| 0.282421
| 0.028599
| 0.022243
| 0.02701
| 0.125834
| 0.074357
| 0.045758
| 0.045758
| 0.045758
| 0.020972
| 0
| 0.007035
| 0.26514
| 5,416
| 144
| 112
| 37.611111
| 0.783668
| 0.081979
| 0
| 0.090909
| 0
| 0
| 0.137938
| 0.023496
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080808
| false
| 0
| 0.080808
| 0.020202
| 0.30303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27fb6ab9dc39790c3dcbcf43be391bd869cc5d49
| 10,965
|
py
|
Python
|
blindbackup/providers/blindfs.py
|
nagylzs/blindbackup
|
fa0c7a6ef42bb5aefec99eff69a3227c8695fdd9
|
[
"Apache-2.0"
] | 1
|
2020-01-26T05:46:14.000Z
|
2020-01-26T05:46:14.000Z
|
blindbackup/providers/blindfs.py
|
nagylzs/blindbackup
|
fa0c7a6ef42bb5aefec99eff69a3227c8695fdd9
|
[
"Apache-2.0"
] | null | null | null |
blindbackup/providers/blindfs.py
|
nagylzs/blindbackup
|
fa0c7a6ef42bb5aefec99eff69a3227c8695fdd9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os.path
import threading
from .. import cryptfile
from ..util import *
from ..client import create_client
from ..syncdir import FsProvider, FsListener
class BlindFsListener(threading.Thread, FsListener):
def is_stopping(self):
return self.stop_requested.isSet()
def __init__(self, client, sender, relpath, onchange):
self.client = client
self.sender = sender
self.relpath = relpath
self.onchange = onchange
self.stop_requested = threading.Event()
self.stopped = threading.Event()
self.uid = None
threading.Thread.__init__(self)
FsListener.__init__(self) # This will create a dummy uid but we will overwrite it later in run().
def request_stop(self):
"""Request a stop on the listening thread."""
self.stop_requested.set()
def is_stopped(self):
"""Tells if the listening thread has stopped."""
return self.stopped.is_set()
def run(self):
self.stopped.clear()
self.stop_requested.clear()
self.uid = self.client("listenchanges", root=self.relpath)
while not self.stop_requested.is_set():
changes = self.client("pollchanges", uid=self.uid)
if changes:
for eventPath, eventType, eventUid in changes:
self.onchange(self.sender, eventPath, eventType, eventUid)
self.stopped.set()
def get_uid(self):
"""Get unique identifier for the listener.
This can be used to send notification messages that are not to be sent back to this listener."""
return self.uid
class BlindFsProvider(FsProvider):
"""FsProvider that is provided by a backup server.
@param client: A Client instance
@param root: The root parameter must be a list of path elements.
It represents the relative path on the server that will be
snychronized.
"""
@classmethod
def get_name(cls):
return "blindfs"
def __init__(self, path: str, can_create: bool, settings: dict, client=None, root=None):
if root is None:
# Normal construction
if client is None:
self.client = create_client(settings)
else:
self.client = client
if path:
root = path.split("/")
else:
root = []
if root and not root[0]:
raise Exception("BlindFsProvider: root cannot be [''], it must be []. Hint: use :// instead of :///")
if not client.directory_exists(path):
if can_create:
client("mkdir", relpath=path)
# else:
# parser.error("Remote path does not exist: %s" % loc)
else:
# cloned
assert client
assert path is None
assert root
self.client = client
self.root = root
self.settings = settings
self._is_case_sensitive = None
self.tmp_dir = settings.get("tmp_dir", None)
super().__init__()
def clone(self):
res = BlindFsProvider(None, False, self.settings, self.client, self.root)
res.uid = self.get_uid()
return res
def drill(self, relpath):
"""Change root of the FsProvider to a new subdir.
@param relpath: a list of path items
Should only use it on a clone."""
assert (isinstance(relpath, list))
self.root = self.root + relpath
def get_event_relpath(self, event_path):
"""Convert the full path of an event into a path relative to this provider.
@return: a list of path items"""
myroot = "/".join(self.root)
assert (event_path.startswith(myroot))
return event_path[len(myroot) + 1:].split("/")
def _remotepath(self, relpath):
return self.root + relpath
def iscasesensitive(self):
if self._is_case_sensitive is None:
self._is_case_sensitive = self.client("iscasesensitive")
return self._is_case_sensitive
def listdir(self, relpath):
# print("listdir",relpath,self._remotepath(relpath))
return self.client("listdir", relpath=self._remotepath(relpath))
def getinfo(self, items, encrypted):
root = "/".join(self.root)
# map object cannot be serialized, need to convert items to a list.
return self.client(
"getinfo", root=root, items=list(items), encrypted=encrypted)
def sendchanges(self, delet, dcopy, fcopy):
# Delete unwanted first
for dpath in delet:
yield (self.DELETE, "/".join(dpath))
# Then create new directories
infos = self.getinfo(dcopy, bool(self.decryptionkey))
for idx, dpath in enumerate(dcopy):
# use getinfo here, but need to have some buffering?
atime, mtime, fsize = infos[idx]
yield (
self.DIRECTORY,
"/".join(dpath),
atime, mtime)
subdnames, subfnames = self.listdir(dpath)
for change in self.sendchanges(
[],
self._prefixed(dpath, subdnames),
self._prefixed(dpath, subfnames)):
yield change
# Finally send file data
# TODO: make this much more efficient. Do not want to create one request per file, especially if files are small.
infos = self.getinfo(fcopy, bool(self.decryptionkey))
for idx, relpath in enumerate(fcopy):
atime, mtime, fsize = infos[idx]
file_data = self.client.recv_backup(
"/".join(self._remotepath(relpath)))
localpath = create_tmp_file_for(self.tmp_dir)
fout = open(localpath, "wb+")
try:
fout.write(file_data)
fout.close()
yield (
self.FILE, "/".join(relpath),
atime, mtime, fsize, localpath, self.RECEIVER)
finally:
if os.path.isfile(localpath):
os.unlink(localpath)
def receivechanges(self, sender):
# Unfortunately, we have to make our own schedule here.
# Small files should be sent at once to minimize the number
# of requests on the server.
# TODO: store changes in a tmp file because there can be many.
root = "/".join(self.root)
delet, dcopy, fcopy = [], [], []
files, encfiles = [], []
ownedfiles = []
cnt, totalsize = 0, 0
try:
while True:
change = next(sender)
op, *args = change
if op == self.DELETE:
# (self.DELETE, converted_path)
change = (self.DELETE, "/".join(
self.recrypt_path_items(change[1].split("/")))
)
delet.append(change)
cnt += 1
elif op == self.DIRECTORY:
# (self.DIRECTORY,converted_path,atime,mtime)
change = list(change)
change[1] = "/".join(self.recrypt_path_items(change[1].split("/")))
dcopy.append(tuple(change))
cnt += 1
elif op == self.FILE:
# (self.FILE,converted_path,atime,mtime,fsize,fpath,owner)
selpath, atime, mtime, fsize, fpath, owner = args
selpath = "/".join(
self.recrypt_path_items(selpath.split("/")))
if owner == self.RECEIVER:
ownedfiles.append(fpath)
# Hide original full path from the server.
# The owner parameter is meaningless on the server side
# (server cannot own a file on the client side) so it is
# omited.
change = (self.FILE, selpath, atime, mtime, fsize, "")
fcopy.append(change)
cnt += 1
totalsize += args[3]
if self.encryptionkey and self.decryptionkey:
encpath = create_tmp_file_for(fpath)
cryptfile.recrypt_file(
cryptfile.hashkey(self.decryptionkey),
cryptfile.hashkey(self.encryptionkey),
fpath, encpath)
encfiles.append(encpath)
files.append([selpath, encpath])
elif self.encryptionkey:
encpath = create_tmp_file_for(fpath)
cryptfile.encrypt_file(
self.encryptionkey, fpath, encpath)
encfiles.append(encpath)
files.append([selpath, encpath])
elif self.decryptionkey:
encpath = create_tmp_file_for(fpath)
cryptfile.decrypt_file(
self.decryptionkey, fpath, encpath)
encfiles.append(encpath)
files.append([selpath, encpath])
else:
files.append([selpath, fpath])
else:
raise Exception("Protocol error")
if cnt > 1000 or totalsize > 1024 * 1024:
self.client(
"receivechanges",
root=root, uid=self.get_uid(),
delet=delet, dcopy=dcopy, fcopy=fcopy,
files=files
)
for encpath in encfiles:
os.unlink(encpath)
encfiles.clear()
for ownedpath in ownedfiles:
os.unlink(ownedpath)
ownedfiles.clear()
delet.clear()
dcopy.clear()
fcopy.clear()
files.clear()
except StopIteration:
pass
if cnt:
self.client(
"receivechanges",
root=root, uid=self.get_uid(),
delet=delet, dcopy=dcopy, fcopy=fcopy,
files=files
)
for encpath in encfiles:
os.unlink(encpath)
encfiles.clear()
for ownedpath in ownedfiles:
os.unlink(ownedpath)
ownedfiles.clear()
def listenchanges(self, onchange) -> FsListener:
"""Listen for changes in the filesystem."""
# Note: listenchanges always uses relative paths on the sedrver.
# So instead of self.root, we pass "" here!
listener = BlindFsListener(self.client, self, "", onchange)
listener.start()
return listener
| 38.882979
| 121
| 0.529503
| 1,124
| 10,965
| 5.08274
| 0.241993
| 0.026256
| 0.015754
| 0.013303
| 0.19797
| 0.149659
| 0.142657
| 0.136181
| 0.123578
| 0.095221
| 0
| 0.003533
| 0.380483
| 10,965
| 281
| 122
| 39.021352
| 0.83748
| 0.167077
| 0
| 0.238095
| 0
| 0
| 0.023728
| 0
| 0
| 0
| 0
| 0.003559
| 0.02381
| 1
| 0.085714
| false
| 0.004762
| 0.028571
| 0.019048
| 0.17619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27fd6c6f828a7e94f81f249d959e7e48fffdae85
| 3,587
|
py
|
Python
|
examples/computer_vision/harris.py
|
parag-hub/arrayfire-python
|
65040c10833506f212f13e5bcc0e49cb20645e6e
|
[
"BSD-3-Clause"
] | 420
|
2015-07-30T00:02:21.000Z
|
2022-03-28T16:52:28.000Z
|
examples/computer_vision/harris.py
|
parag-hub/arrayfire-python
|
65040c10833506f212f13e5bcc0e49cb20645e6e
|
[
"BSD-3-Clause"
] | 198
|
2015-07-29T17:17:36.000Z
|
2022-01-20T18:31:28.000Z
|
examples/computer_vision/harris.py
|
parag-hub/arrayfire-python
|
65040c10833506f212f13e5bcc0e49cb20645e6e
|
[
"BSD-3-Clause"
] | 75
|
2015-07-29T15:17:54.000Z
|
2022-02-24T06:50:23.000Z
|
#!/usr/bin/env python
#######################################################
# Copyright (c) 2018, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
from time import time
import arrayfire as af
import os
import sys
def draw_corners(img, x, y, draw_len):
# Draw vertical line of (draw_len * 2 + 1) pixels centered on the corner
# Set only the first channel to 1 (green lines)
xmin = max(0, x - draw_len)
xmax = min(img.dims()[1], x + draw_len)
img[y, xmin : xmax, 0] = 0.0
img[y, xmin : xmax, 1] = 1.0
img[y, xmin : xmax, 2] = 0.0
# Draw vertical line of (draw_len * 2 + 1) pixels centered on the corner
# Set only the first channel to 1 (green lines)
ymin = max(0, y - draw_len)
ymax = min(img.dims()[0], y + draw_len)
img[ymin : ymax, x, 0] = 0.0
img[ymin : ymax, x, 1] = 1.0
img[ymin : ymax, x, 2] = 0.0
return img
def harris_demo(console):
root_path = os.path.dirname(os.path.abspath(__file__))
file_path = root_path
if console:
file_path += "/../../assets/examples/images/square.png"
else:
file_path += "/../../assets/examples/images/man.jpg"
img_color = af.load_image(file_path, True);
img = af.color_space(img_color, af.CSPACE.GRAY, af.CSPACE.RGB)
img_color /= 255.0
ix, iy = af.gradient(img)
ixx = ix * ix
ixy = ix * iy
iyy = iy * iy
# Compute a Gaussian kernel with standard deviation of 1.0 and length of 5 pixels
# These values can be changed to use a smaller or larger window
gauss_filt = af.gaussian_kernel(5, 5, 1.0, 1.0)
# Filter second order derivatives
ixx = af.convolve(ixx, gauss_filt)
ixy = af.convolve(ixy, gauss_filt)
iyy = af.convolve(iyy, gauss_filt)
# Calculate trace
itr = ixx + iyy
# Calculate determinant
idet = ixx * iyy - ixy * ixy
# Calculate Harris response
response = idet - 0.04 * (itr * itr)
# Get maximum response for each 3x3 neighborhood
mask = af.constant(1, 3, 3)
max_resp = af.dilate(response, mask)
# Discard responses that are not greater than threshold
corners = response > 1e5
corners = corners * response
# Discard responses that are not equal to maximum neighborhood response,
# scale them to original value
corners = (corners == max_resp) * corners
# Copy device array to python list on host
corners_list = corners.to_list()
draw_len = 3
good_corners = 0
for x in range(img_color.dims()[1]):
for y in range(img_color.dims()[0]):
if corners_list[x][y] > 1e5:
img_color = draw_corners(img_color, x, y, draw_len)
good_corners += 1
print("Corners found: {}".format(good_corners))
if not console:
# Previews color image with green crosshairs
wnd = af.Window(512, 512, "Harris Feature Detector")
while not wnd.close():
wnd.image(img_color)
else:
idx = af.where(corners)
corners_x = idx / float(corners.dims()[0])
corners_y = idx % float(corners.dims()[0])
print(corners_x)
print(corners_y)
if __name__ == "__main__":
if (len(sys.argv) > 1):
af.set_device(int(sys.argv[1]))
console = (sys.argv[2] == '-') if len(sys.argv) > 2 else False
af.info()
print("** ArrayFire Harris Corner Detector Demo **\n")
harris_demo(console)
| 28.927419
| 85
| 0.606078
| 525
| 3,587
| 4.028571
| 0.352381
| 0.029787
| 0.01513
| 0.017021
| 0.194799
| 0.08227
| 0.08227
| 0.08227
| 0.08227
| 0.08227
| 0
| 0.028253
| 0.25007
| 3,587
| 123
| 86
| 29.162602
| 0.757993
| 0.272372
| 0
| 0.029412
| 0
| 0
| 0.069091
| 0.031111
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.058824
| 0
| 0.102941
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7e0022ad51ef52a75fd8fa97ecb5ea7bdfaf493d
| 4,376
|
py
|
Python
|
tests/generate_data.py
|
ngounou92/py-glm
|
83081444e2cbba4d94f9e6b85b6be23e0ff600b8
|
[
"BSD-3-Clause"
] | 127
|
2017-09-01T13:54:43.000Z
|
2022-03-12T11:43:32.000Z
|
tests/generate_data.py
|
cscherrer/py-glm
|
d719d29fb5cc71c2cb5e728db36c6230a69292d8
|
[
"BSD-3-Clause"
] | 8
|
2017-09-01T14:00:55.000Z
|
2020-11-09T14:42:50.000Z
|
tests/generate_data.py
|
cscherrer/py-glm
|
d719d29fb5cc71c2cb5e728db36c6230a69292d8
|
[
"BSD-3-Clause"
] | 35
|
2017-09-01T19:23:04.000Z
|
2022-03-22T13:45:10.000Z
|
import numpy as np
from scipy.linalg import sqrtm
from sklearn.preprocessing import StandardScaler
def make_linear_regression(n_samples=10000,
n_uncorr_features=10, n_corr_features=10,
n_drop_features=4,
include_intercept=True,
coef_range=(-1, 1),
resid_sd=0.25):
X = make_correlated_data(
n_samples, n_uncorr_features, n_corr_features, include_intercept)
parameters = make_regression_coeffs(
X, n_drop_features=n_drop_features, coef_range=coef_range)
y = make_linear_regression_y(X, parameters, resid_sd)
return (X, y, parameters)
def make_logistic_regression(n_samples=10000,
n_uncorr_features=10, n_corr_features=10,
n_drop_features=4,
include_intercept=True,
coef_range=(-1, 1)):
X = make_correlated_data(
n_samples, n_uncorr_features, n_corr_features, include_intercept)
parameters = make_regression_coeffs(
X, n_drop_features=n_drop_features, coef_range=coef_range)
y = make_logistic_regression_y(X, parameters)
return (X, y, parameters)
def make_poisson_regression(n_samples=10000,
n_uncorr_features=10, n_corr_features=10,
n_drop_features=4,
include_intercept=True,
coef_range=(-1, 1)):
X = make_correlated_data(
n_samples, n_uncorr_features, n_corr_features, include_intercept)
parameters = make_regression_coeffs(
X, n_drop_features=n_drop_features, coef_range=coef_range)
y = make_poisson_regression_y(X, parameters)
return (X, y, parameters)
def make_gamma_regression(n_samples=10000,
n_uncorr_features=10, n_corr_features=10,
n_drop_features=4,
include_intercept=True,
coef_range=(-1, 1)):
X = make_correlated_data(
n_samples, n_uncorr_features, n_corr_features, include_intercept)
parameters = make_regression_coeffs(
X, n_drop_features=n_drop_features, coef_range=coef_range)
y = make_gamma_regression_y(X, parameters)
return (X, y, parameters)
def make_uncorrelated_data(n_samples=10000, n_features=25):
X = np.random.normal(size=(n_samples, n_features))
return X
def make_correlated_data(n_samples=10000,
n_uncorr_features=10, n_corr_features=15,
include_intercept=True):
X_uncorr = make_uncorrelated_data(n_samples, n_uncorr_features)
X_corr_base = make_uncorrelated_data(n_samples, n_corr_features)
cov_matrix = make_covariance_matrix(n_corr_features)
X_corr = StandardScaler().fit_transform(np.dot(X_corr_base, cov_matrix))
X = np.column_stack((X_uncorr, X_corr))
if include_intercept:
intercept = np.ones(n_samples).reshape(-1, 1)
return np.column_stack((intercept, X))
return X
def make_covariance_matrix(n_features=15):
A = np.random.normal(size=(n_features, n_features))
A_sq = np.dot(A.T, A)
return sqrtm(A_sq)
def make_regression_coeffs(X, n_drop_features=None, coef_range=(-1, 1)):
n_features = X.shape[1]
parameters = np.random.uniform(coef_range[0], coef_range[1], size=n_features)
if n_drop_features is not None:
drop_idxs = np.random.choice(
list(range(len(parameters))), size=n_drop_features, replace=False)
parameters[drop_idxs] = 0.0
return parameters
def make_linear_regression_y(X, parameters, resid_sd=0.25):
y_systematic = np.dot(X, parameters)
y = y_systematic + np.random.normal(scale=resid_sd, size=X.shape[0])
return y
def make_logistic_regression_y(X, parameters):
y_systematic = np.dot(X, parameters)
p = 1 / (1 + np.exp(-y_systematic))
return np.random.binomial(1, p=p, size=X.shape[0])
def make_poisson_regression_y(X, parameters):
y_systematic = np.dot(X, parameters)
mu = np.exp(y_systematic)
return np.random.poisson(lam=mu, size=X.shape[0])
def make_gamma_regression_y(X, parameters):
y_systematic = np.dot(X, parameters)
mu = np.exp(y_systematic)
return np.random.exponential(scale=mu, size=X.shape[0])
| 41.67619
| 81
| 0.658364
| 597
| 4,376
| 4.475712
| 0.139028
| 0.028069
| 0.072979
| 0.065868
| 0.689371
| 0.651572
| 0.575225
| 0.551272
| 0.523578
| 0.523578
| 0
| 0.026837
| 0.250686
| 4,376
| 104
| 82
| 42.076923
| 0.788045
| 0
| 0
| 0.472527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131868
| false
| 0
| 0.032967
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7e0285965f79d7e3cf86a7275a5d19452f38b750
| 1,735
|
py
|
Python
|
scripts/http-server.py
|
jrbenito/SonoffDIY-tasmotizer
|
1fe9eb9b3b5630102feaf941bd02173d916e81a5
|
[
"MIT"
] | null | null | null |
scripts/http-server.py
|
jrbenito/SonoffDIY-tasmotizer
|
1fe9eb9b3b5630102feaf941bd02173d916e81a5
|
[
"MIT"
] | 3
|
2020-03-30T14:07:54.000Z
|
2020-03-30T22:59:29.000Z
|
scripts/http-server.py
|
jrbenito/SonoffDIY-tasmotizer
|
1fe9eb9b3b5630102feaf941bd02173d916e81a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
"""
fake-registration-server.py
Created by nano on 2018-11-22.
Copyright (c) 2018 VTRUST. All rights reserved.
"""
import tornado.web
import tornado.locks
from tornado.options import define, options, parse_command_line
define("port", default=80, help="run on the given port", type=int)
define("addr", default="192.168.254.1", help="run on the given ip", type=str)
define("debug", default=True, help="run in debug mode")
import os
import signal
def exit_cleanly(signal, frame):
print("Received SIGINT, exiting...")
exit(0)
signal.signal(signal.SIGINT, exit_cleanly)
from base64 import b64encode
import hashlib
import hmac
import binascii
from time import time
timestamp = lambda : int(time())
class FilesHandler(tornado.web.StaticFileHandler):
def parse_url_path(self, url_path):
if not url_path or url_path.endswith('/'):
url_path = url_path + str('index.html')
return url_path
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("You are connected to vtrust-flash")
def main():
parse_command_line()
app = tornado.web.Application(
[
(r"/", MainHandler),
('/files/(.*)', FilesHandler, {'path': str('../files/')}),
(r".*", tornado.web.RedirectHandler, {"url": "http://" + options.addr + "/", "permanent": False}),
],
debug=options.debug,
)
try:
app.listen(options.port, options.addr)
print("Listening on " + options.addr + ":" + str(options.port))
tornado.ioloop.IOLoop.current().start()
except OSError as err:
print("Could not start server on port " + str(options.port))
if err.errno == 98: # EADDRINUSE
print("Close the process on this port and try again")
else:
print(err)
if __name__ == "__main__":
main()
| 25.144928
| 101
| 0.702594
| 246
| 1,735
| 4.865854
| 0.50813
| 0.040936
| 0.026734
| 0.02005
| 0.028404
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022207
| 0.143516
| 1,735
| 68
| 102
| 25.514706
| 0.783311
| 0.089337
| 0
| 0
| 0
| 0
| 0.189809
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.208333
| 0
| 0.354167
| 0.104167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7e0480f047709048b68affbe1e229fbea8aaa94b
| 4,122
|
py
|
Python
|
Set_ADT/linearset.py
|
jaeheeLee17/DS_and_Algorithms_summary
|
917500dd768eae8cfbb02cf2838d494cb720f1c0
|
[
"MIT"
] | null | null | null |
Set_ADT/linearset.py
|
jaeheeLee17/DS_and_Algorithms_summary
|
917500dd768eae8cfbb02cf2838d494cb720f1c0
|
[
"MIT"
] | null | null | null |
Set_ADT/linearset.py
|
jaeheeLee17/DS_and_Algorithms_summary
|
917500dd768eae8cfbb02cf2838d494cb720f1c0
|
[
"MIT"
] | null | null | null |
# Implementation of the Set ADT container using a Python list.
class Set:
# Creates an empty set instance.
def __init__(self):
self._theElements = list()
# Returns the number of items in the set
def __len__(self):
return len(self._theElements)
# Determines if an element is in the set.
def __contains__(self, element):
return element in self._theElements
# Adds a new unique element to the set.
def add(self, element):
if element not in self:
self._theElements.append(element)
# Removes an element from the set.
def remove(self, element):
assert element in self, "The element must be in the set."
self._theElements.remove(item)
# Determines if two sets are equal
def __eq__(self, setB):
if len(self) != len(setB):
return False
else:
# return self.isSubsetOf(setB)
for i in range(len(self)):
if self._theElements[i] != setB._theElements[i]:
return False
return True
# Determines if this set is a subset of setB
def isSubsetOf(self, setB):
for element in self:
if element not in setB:
return False
return True
# Creates a new set from the union of this set and setB
def union(self, setB):
'''
newSet = Set()
newSet._theElements.extend(self._theElements)
for element in setB:
if element not in self:
newSet._theElements.append(element)
return newSet
'''
newSet = Set()
a, b = 0, 0
# Merge the two lists together until one is empty.
while a < len(self) and b < len(setB):
valueA = self._theElements[a]
valueB = self._theElements[b]
if valueA < valueB:
newSet._theElements.append(valueA)
a += 1
elif valueA > valueB:
newSet._theElements.append(valueB)
b += 1
else: # Only one of the two duplicates are appended.
newSet._theElements.append(valueA)
a += 1
b += 1
# If listA contains more items, append them to newList
while a < len(self):
newSet._theElements.append(self._theElements[a])
a += 1
# Or if listB contains more items, append them to newList
while b < len(setB):
newSet._theElements.append(setB._theElements[b])
b += 1
return newSet
# TODO: Creates a new set from the intersection: self set and setB.
def intersect(self, setB):
newSet = Set()
for element in setB:
if element in self:
newSet._theElements.append(element)
return newSet
# TODO: Creates a new set from the difference: self set and setB.
def difference(self, setB):
newSet = Set()
newSet._theElements.extend(self._theElements)
for element in setB:
if element in self:
newSet._theElements.remove(element)
return newSet
# Returns an iterator for traversing the list of items.
def __iter__(self):
return _SetIterator(self._theElements)
# Finds the position of the element within the ordered list..
def _findPosition(self, element):
start = 0
end = len(self) - 1
while start <= end:
mid = (start + end) // 2
if self[mid] == element:
return mid
elif element < self[mid]:
end = mid - 1
else:
start = mid + 1
return start
# An iterator for the Set ADT.
class _SetIterator:
def __init__(self, theElements):
self._SetRef = theElements
self._curidx = 0
def __iter__(self):
return self
def __next__(self):
if self._curidx < len(self._SetRef):
entry = self._SetRef[self._curidx]
self._curidx += 1
return entry
else:
raise StopIteration
| 31.707692
| 71
| 0.563561
| 491
| 4,122
| 4.596741
| 0.217923
| 0.086398
| 0.071334
| 0.018609
| 0.280461
| 0.235268
| 0.198494
| 0.198494
| 0.135135
| 0.102348
| 0
| 0.005701
| 0.361718
| 4,122
| 129
| 72
| 31.953488
| 0.852147
| 0.253518
| 0
| 0.325843
| 0
| 0
| 0.010351
| 0
| 0
| 0
| 0
| 0.007752
| 0.011236
| 1
| 0.168539
| false
| 0
| 0
| 0.044944
| 0.359551
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7e0a3148033e56abb61f66e7e257ace62456c980
| 2,932
|
py
|
Python
|
app/billing/views.py
|
flaviogf/finance
|
86a74e1eea6b19d7fe8c311eb77394a267e26432
|
[
"MIT"
] | null | null | null |
app/billing/views.py
|
flaviogf/finance
|
86a74e1eea6b19d7fe8c311eb77394a267e26432
|
[
"MIT"
] | null | null | null |
app/billing/views.py
|
flaviogf/finance
|
86a74e1eea6b19d7fe8c311eb77394a267e26432
|
[
"MIT"
] | null | null | null |
from flask import (Blueprint, abort, flash, redirect, render_template, request,
url_for)
from flask_login import current_user, login_required
from app import db
from app.billing.forms import CreateBillingForm
from app.models import Billing
from sqlalchemy import desc
billing = Blueprint('billing', __name__)
@billing.route('/billing/create', methods=['GET', 'POST'])
@login_required
def create():
form = CreateBillingForm()
if form.validate_on_submit():
billing = Billing(title=form.title.data,
description=form.description.data,
value=form.value.data,
work_date=form.work_date.data,
user_id=current_user.get_id())
db.session.add(billing)
db.session.commit()
return redirect(url_for('billing.pagination'))
return render_template('create_billing.html', title='Create Billing', form=form)
@billing.route('/billing')
@login_required
def pagination():
page = request.args.get('page', 1, type=int)
billings = (Billing.query
.filter_by(user_id=current_user.get_id())
.order_by(desc(Billing.id))
.paginate(page=page, per_page=5))
return render_template('pagination_billing.html', title='Search Billing', billings=billings)
@billing.route('/billing/<int:id>', methods=['GET', 'POST'])
@login_required
def update(id):
billing = Billing.query.get_or_404(id)
form = CreateBillingForm()
if form.validate_on_submit():
billing.title = form.title.data
billing.description = form.description.data
billing.value = form.value.data
billing.work_date = form.work_date.data
db.session.commit()
flash('Billing updated with successfully.')
return redirect(url_for('billing.update', id=id))
elif request.method == 'GET':
form.title.data = billing.title
form.description.data = billing.description
form.value.data = billing.value
form.work_date.data = billing.work_date
return render_template('create_billing.html', title='Update Billing', form=form)
@billing.route('/billing/<int:id>/confirm-receive')
@login_required
def confirm_receive(id):
billing = Billing.query.get_or_404(id)
if current_user.get_id() != billing.user_id:
abort(403)
billing.confirm_receive()
db.session.commit()
page = request.args.get('page', 1, type=int)
return redirect(url_for('billing.pagination', page=page))
@billing.route('/billing/<int:id>/cancel-receive')
@login_required
def cancel_receive(id):
billing = Billing.query.get_or_404(id)
if current_user.get_id() != billing.user_id:
abort(403)
billing.cancel_receive()
db.session.commit()
page = request.args.get('page', 1, type=int)
return redirect(url_for('billing.pagination', page=page))
| 27.660377
| 96
| 0.664734
| 365
| 2,932
| 5.178082
| 0.205479
| 0.040741
| 0.050265
| 0.033862
| 0.522222
| 0.457143
| 0.321164
| 0.27672
| 0.191534
| 0.191534
| 0
| 0.008204
| 0.210096
| 2,932
| 105
| 97
| 27.92381
| 0.807858
| 0
| 0
| 0.357143
| 0
| 0
| 0.118008
| 0.030014
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.085714
| 0
| 0.257143
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7e0b8779363fd91f6026918cffc7f561df56bcf8
| 9,120
|
py
|
Python
|
flickipedia/mashup.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | 1
|
2016-03-11T09:40:19.000Z
|
2016-03-11T09:40:19.000Z
|
flickipedia/mashup.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | 1
|
2015-02-27T02:23:19.000Z
|
2015-02-27T02:23:19.000Z
|
flickipedia/mashup.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Author: Ryan Faulkner
Date: October 19th, 2014
Container for mashup logic.
"""
import json
import random
from sqlalchemy.orm.exc import UnmappedInstanceError
from flickipedia.redisio import DataIORedis
from flickipedia.model.articles import ArticleModel, ArticleContentModel
from flickipedia.config import log, settings
from flickipedia.model.likes import LikeModel
from flickipedia.model.exclude import ExcludeModel
from flickipedia.model.photos import PhotoModel
from flickipedia.parse import parse_strip_elements, parse_convert_links, \
handle_photo_integrate, format_title_link, add_formatting_generic
def get_article_count():
"""
Fetch total article count
:return: int; total count of articles
"""
DataIORedis().connect()
# Fetch article count from redis (query from DB if not present)
# Refresh according to config for rate
article_count = DataIORedis().read(settings.ARTICLE_COUNT_KEY)
if not article_count \
or random.randint(1, settings.ARTICLE_COUNT_REFRESH_RATE) == 1 \
or article_count < settings.MYSQL_MAX_ROWS:
with ArticleModel() as am:
article_count = am.get_article_count()
DataIORedis().write(settings.ARTICLE_COUNT_KEY, article_count)
return int(article_count)
def get_max_article_id():
"""
Fetch the maximum article ID
:return: int; maximum id from article meta
"""
max_aid = DataIORedis().read(settings.MAX_ARTICLE_ID_KEY)
if not max_aid \
or random.randint(1, settings.ARTICLE_MAXID_REFRESH_RATE) == 1:
with ArticleModel() as am:
max_aid = am.get_max_id()
DataIORedis().write(settings.MAX_ARTICLE_ID_KEY, max_aid)
return max_aid
def get_article_stored_body(article):
"""
Fetch corresponding article object
:param article: str; article name
:return: json, Article; stored page content, corresponding
article model object
"""
with ArticleModel() as am:
article_obj = am.get_article_by_name(article)
try:
with ArticleContentModel() as acm:
body = acm.get_article_content(article_obj._id).markup
except Exception as e:
log.info('Article markup not found: "%s"' % e.message)
body = ''
return body
def get_wiki_content(article):
"""
Retrieve the wiki content from the mediawiki API
:param article: str; article name
:return: Wikipedia; mediawiki api response object
"""
pass
def get_flickr_photos(flickr_json):
"""
Retrience Flickr photo content from Flickr API
:param article: str; article name
:return: list; list of Flickr photo json
"""
photos = []
for i in xrange(settings.NUM_PHOTOS_TO_FETCH):
try:
photos.append(
{
'owner': flickr_json['photos']['photo'][i]['owner'],
'photo_id': flickr_json['photos']['photo'][i]['id'],
'farm': flickr_json['photos']['photo'][i]['farm'],
'server': flickr_json['photos']['photo'][i]['server'],
'title': flickr_json['photos']['photo'][i]['title'],
'secret': flickr_json['photos']['photo'][i]['secret'],
},
)
except (IndexError, KeyError) as e:
log.error('No more photos to process for: - "%s"' % (e.message))
log.debug('Photo info: %s' % (str(photos)))
return photos
def manage_article_storage(max_article_id, article_count):
"""
Handle the storage of new articles
:param max_article_id: int; article id
:param article_count: int; total count of articles
:return: bool; success
"""
if article_count >= settings.MYSQL_MAX_ROWS:
if max_article_id:
# TODO - CHANGE THIS be careful, could iterate many times
article_removed = False
attempts = 0
while not article_removed \
or attempts > settings.MAX_RETRIES_FOR_REMOVE:
attempts += 1
article_id = random.randint(0, int(max_article_id))
with ArticleModel() as am:
log.info('Removing article id: ' + str(article_id))
try:
am.delete_article(article_id)
article_removed = True
except UnmappedInstanceError:
continue
else:
log.error('Could not determine a max article id.')
return True
def handle_article_insert(article, wiki_page_id):
"""
Handle insertion of article meta data
:param article_id: int; article id
:return: int, bool; success
"""
with ArticleModel() as am:
if am.insert_article(article, wiki_page_id):
article_obj = am.get_article_by_name(article)
article_id = article_obj._id
success = True
else:
log.error('Couldn\'t insert article: "%s"' % article)
article_id = -1
success = False
return article_id, success
def handle_article_content_insert(article_id, page_content, is_new_article):
"""
Handle the insertion of article content
:param article_id: int; article id
:param page_content: json; page content
:param is_new_article: bool; a new article?
:return: bool; success
"""
with ArticleContentModel() as acm:
if is_new_article:
acm.insert_article(article_id, json.dumps(page_content))
else:
acm.update_article(article_id, json.dumps(page_content))
def prep_page_content(article_id, article, wiki, photos, user_obj):
"""
Prepare the formatted article content
:param article_id: int; article id
:param article: str; article name
:param wiki_resp: wikipedia; mediawiki api response
:param photos: list; list of photo json
:param user_obj: User; user object for request
:return: dict; formatted page response passed to jinja template
"""
html = parse_strip_elements(wiki.html())
html = parse_convert_links(html)
html = add_formatting_generic(html)
photo_ids = process_photos(article_id, photos, user_obj)
html = handle_photo_integrate(photos, html, article)
page_content = {
'title': format_title_link(wiki.title, article),
'content': html,
'section_img_class': settings.SECTION_IMG_CLASS,
'num_photos': len(photos),
'article_id': article_id,
'user_id': user_obj.get_id(),
'photo_ids': photo_ids
}
return page_content
def update_last_access(article_id):
"""
Update article last access
:param article_id: int; article id
:return: bool; success
"""
pass
def order_photos_by_rank(article_id, photos):
""" Reorders photos by score """
# Compute scores
for i in xrange(len(photos)):
# Get Exclusions & Endorsements
with ExcludeModel() as em:
exclusions = em.get_excludes_article_photo(article_id,
photos[i]['photo_id'])
with LikeModel() as lm:
endorsements = lm.get_likes_article_photo(article_id,
photos[i]['photo_id'])
photos[i]['score'] = len(endorsements) - len(exclusions)
# lambda method for sorting by score descending
f = lambda x, y: cmp(-x['score'], -y['score'])
return sorted(photos, f)
def process_photos(article_id, photos, user_obj):
"""
Handles linking photo results with the model and returns a list of
Flickr photo ids to pass to templating
:param article_id: int; article id
:param photos: list of photos
:param user_obj: User; user object for request
:return: List of Flickr photo ids
"""
photo_ids = []
for photo in photos:
# Ensure that each photo is modeled
with PhotoModel() as pm:
photo_obj = pm.get_photo(photo['photo_id'], article_id)
if not photo_obj:
log.info('Processing photo: "%s"' % str(photo))
if pm.insert_photo(photo['photo_id'], article_id):
photo_obj = pm.get_photo(
photo['photo_id'], article_id)
if not photo_obj:
log.error('DB Error: Could not retrieve or '
'insert: "%s"' % str(photo))
continue
else:
log.error('Couldn\'t insert photo: "%s"' % (
photo['photo_id']))
photo['id'] = photo_obj._id
photo['votes'] = photo_obj.votes
# Retrieve like data
with LikeModel() as lm:
if lm.get_like(article_id, photo_obj._id,
user_obj.get_id()):
photo['like'] = True
else:
photo['like'] = False
photo_ids.append(photo['photo_id'])
return photo_ids
| 35.076923
| 78
| 0.607346
| 1,086
| 9,120
| 4.89779
| 0.20442
| 0.072758
| 0.018049
| 0.023689
| 0.257191
| 0.196089
| 0.144952
| 0.079714
| 0.053394
| 0.020681
| 0
| 0.002191
| 0.299232
| 9,120
| 260
| 79
| 35.076923
| 0.830074
| 0.239474
| 0
| 0.178808
| 0
| 0
| 0.075888
| 0
| 0
| 0
| 0
| 0.003846
| 0
| 1
| 0.07947
| false
| 0.013245
| 0.066225
| 0
| 0.205298
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7e0be21835c15a9296a6ae0c119d0388d9169b45
| 240
|
py
|
Python
|
docs/examples/slider_dimmer.py
|
SatoshiIwasada/BlueDot
|
e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a
|
[
"MIT"
] | 112
|
2017-03-27T17:23:17.000Z
|
2022-03-13T09:51:43.000Z
|
docs/examples/slider_dimmer.py
|
SatoshiIwasada/BlueDot
|
e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a
|
[
"MIT"
] | 109
|
2017-03-29T11:19:54.000Z
|
2022-02-03T14:18:15.000Z
|
docs/examples/slider_dimmer.py
|
SatoshiIwasada/BlueDot
|
e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a
|
[
"MIT"
] | 40
|
2017-03-30T23:23:27.000Z
|
2022-01-21T17:09:11.000Z
|
from bluedot import BlueDot
from gpiozero import PWMLED
from signal import pause
def set_brightness(pos):
brightness = (pos.y + 1) / 2
led.value = brightness
led = PWMLED(27)
bd = BlueDot()
bd.when_moved = set_brightness
pause()
| 17.142857
| 32
| 0.725
| 35
| 240
| 4.885714
| 0.571429
| 0.152047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020513
| 0.1875
| 240
| 13
| 33
| 18.461538
| 0.85641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd665b1231aab43a664a3eab839a54a833e10f79
| 3,144
|
py
|
Python
|
web/env/lib/python3.6/site-packages/test/file/test_includer.py
|
Conbrown100/webfortune
|
779026d064498d36ddeba07e06cc744fb335ceb6
|
[
"Apache-2.0"
] | 8
|
2015-07-30T16:19:18.000Z
|
2021-08-10T21:00:47.000Z
|
web/env/lib/python3.6/site-packages/test/file/test_includer.py
|
Conbrown100/webfortune
|
779026d064498d36ddeba07e06cc744fb335ceb6
|
[
"Apache-2.0"
] | 3
|
2015-01-09T13:53:55.000Z
|
2017-06-05T17:39:46.000Z
|
web/env/lib/python3.6/site-packages/test/file/test_includer.py
|
Conbrown100/webfortune
|
779026d064498d36ddeba07e06cc744fb335ceb6
|
[
"Apache-2.0"
] | 6
|
2015-01-09T13:47:15.000Z
|
2020-12-25T14:09:41.000Z
|
import os
from tempfile import TemporaryDirectory
import codecs
import logging
from grizzled.file.includer import *
from grizzled.os import working_directory
from grizzled.text import strip_margin
import pytest
@pytest.fixture
def log():
return logging.getLogger('test')
def test_simple(log):
outer = '''|First non-blank line.
|Second non-blank line.
|%include "inner.txt"
|Last line.
|'''
inner = '''|Inner line 1
|Inner line 2
|'''
expected = strip_margin(
'''|First non-blank line.
|Second non-blank line.
|Inner line 1
|Inner line 2
|Last line.
|'''
)
with TemporaryDirectory() as dir:
outer_path = os.path.join(dir, "outer.txt")
all = (
(outer, outer_path),
(inner, os.path.join(dir, "inner.txt")),
)
for text, path in all:
log.debug(f'writing "{path}"')
with codecs.open(path, mode='w', encoding='utf-8') as f:
f.write(strip_margin(text))
inc = Includer(outer_path)
lines = [line for line in inc]
res = ''.join(lines)
assert res == expected
def test_nested(log):
outer = '''|First non-blank line.
|Second non-blank line.
|%include "nested1.txt"
|Last line.
|'''
nested1 = '''|Nested 1 line 1
|%include "nested2.txt"
|Nested 1 line 3
|'''
nested2 = '''|Nested 2 line 1
|Nested 2 line 2
|'''
expected = strip_margin(
'''|First non-blank line.
|Second non-blank line.
|Nested 1 line 1
|Nested 2 line 1
|Nested 2 line 2
|Nested 1 line 3
|Last line.
|'''
)
with TemporaryDirectory() as dir:
outer_path = os.path.join(dir, "outer.txt")
all = (
(outer, outer_path),
(nested1, os.path.join(dir, "nested1.txt")),
(nested2, os.path.join(dir, "nested2.txt")),
)
for text, path in all:
with codecs.open(path, mode='w', encoding='utf-8') as f:
f.write(strip_margin(text))
inc = Includer(outer_path)
lines = [line for line in inc]
res = ''.join(lines)
assert res == expected
def test_overflow(log):
outer = '''|First non-blank line.
|Second non-blank line.
|%include "outer.txt"
|Last line.
|'''
with TemporaryDirectory() as dir:
outer_path = os.path.join(dir, "outer.txt")
with codecs.open(outer_path, mode='w', encoding='utf-8') as f:
f.write(strip_margin(outer))
try:
Includer(outer_path, max_nest_level=10)
assert False, "Expected max-nesting exception"
except MaxNestingExceededError as e:
print(e)
def _log_text_file(log, prefix: str, text: str) -> None:
log.debug(f'{prefix}:\n---\n{text}\n---')
| 29.383178
| 70
| 0.515585
| 366
| 3,144
| 4.363388
| 0.215847
| 0.050094
| 0.075141
| 0.048842
| 0.582968
| 0.579837
| 0.534126
| 0.534126
| 0.507201
| 0.507201
| 0
| 0.016451
| 0.361959
| 3,144
| 106
| 71
| 29.660377
| 0.779661
| 0
| 0
| 0.5
| 0
| 0
| 0.294287
| 0.009702
| 0
| 0
| 0
| 0
| 0.036585
| 1
| 0.060976
| false
| 0
| 0.097561
| 0.012195
| 0.170732
| 0.012195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd6a627b6084b5a56d9fe3161a2d00c62052ed2a
| 8,850
|
py
|
Python
|
tbconnect/tests/test_views.py
|
praekeltfoundation/healthcheck
|
3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba
|
[
"BSD-3-Clause"
] | null | null | null |
tbconnect/tests/test_views.py
|
praekeltfoundation/healthcheck
|
3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba
|
[
"BSD-3-Clause"
] | 23
|
2020-07-16T15:40:35.000Z
|
2021-12-13T13:59:30.000Z
|
tbconnect/tests/test_views.py
|
praekeltfoundation/healthcheck
|
3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba
|
[
"BSD-3-Clause"
] | 1
|
2021-02-24T04:58:40.000Z
|
2021-02-24T04:58:40.000Z
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from tbconnect.models import TBCheck, TBTest
from userprofile.models import HealthCheckUserProfile
from userprofile.tests.test_views import BaseEventTestCase
from tbconnect.serializers import TBCheckSerializer
class TBCheckViewSetTests(APITestCase, BaseEventTestCase):
url = reverse("tbcheck-list")
def test_data_validation(self):
"""
The supplied data must be validated, and any errors returned
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbcheck"))
self.client.force_authenticate(user)
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_successful_request(self):
"""
Should create a new TBCheck object in the database
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbcheck"))
self.client.force_authenticate(user)
response = self.client.post(
self.url,
{
"msisdn": "27856454612",
"source": "USSD",
"province": "ZA-WC",
"city": "Cape Town",
"age": TBCheck.AGE_18T40,
"gender": TBCheck.GENDER_FEMALE,
"cough": True,
"fever": True,
"sweat": False,
"weight": True,
"exposure": "yes",
"tracing": True,
"risk": TBCheck.RISK_LOW,
"location": "+40.20361+40.20361",
"follow_up_optin": True,
"language": "eng",
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
[tbcheck] = TBCheck.objects.all()
self.assertEqual(tbcheck.msisdn, "27856454612")
self.assertEqual(tbcheck.source, "USSD")
self.assertEqual(tbcheck.province, "ZA-WC")
self.assertEqual(tbcheck.city, "Cape Town")
self.assertEqual(tbcheck.age, TBCheck.AGE_18T40)
self.assertEqual(tbcheck.gender, TBCheck.GENDER_FEMALE)
self.assertTrue(tbcheck.cough)
self.assertTrue(tbcheck.fever)
self.assertFalse(tbcheck.sweat)
self.assertTrue(tbcheck.weight)
self.assertEqual(tbcheck.exposure, "yes")
self.assertTrue(tbcheck.tracing)
self.assertEqual(tbcheck.risk, TBCheck.RISK_LOW)
self.assertEqual(tbcheck.location, "+40.20361+40.20361")
self.assertTrue(tbcheck.follow_up_optin)
self.assertEqual(tbcheck.language, "eng")
def test_location_validation(self):
"""
Should create a new TBCheck object in the database
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbcheck"))
self.client.force_authenticate(user)
response = self.client.post(
self.url,
{
"msisdn": "27856454612",
"source": "USSD",
"province": "ZA-WC",
"city": "Cape Town",
"age": TBCheck.AGE_18T40,
"gender": TBCheck.GENDER_FEMALE,
"cough": True,
"fever": True,
"sweat": False,
"weight": True,
"exposure": "yes",
"tracing": True,
"risk": TBCheck.RISK_LOW,
},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{"non_field_errors": ["location and city_location are both None"]},
)
def test_creates_user_profile(self):
"""
The user profile should be created when the TB Check is saved
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbcheck"))
self.client.force_authenticate(user)
response = self.client.post(
self.url,
{
"msisdn": "+27856454612",
"source": "USSD",
"province": "ZA-WC",
"city": "Cape Town",
"age": TBCheck.AGE_18T40,
"gender": TBCheck.GENDER_FEMALE,
"cough": True,
"fever": True,
"sweat": False,
"weight": True,
"exposure": "yes",
"tracing": True,
"risk": TBCheck.RISK_LOW,
"location": "+40.20361+40.20361",
},
format="json",
)
profile = HealthCheckUserProfile.objects.get(msisdn="+27856454612")
self.assertEqual(profile.province, "ZA-WC")
self.assertEqual(profile.city, "Cape Town")
self.assertEqual(profile.age, TBCheck.AGE_18T40)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class TBTestViewSetTests(APITestCase, BaseEventTestCase):
url = reverse("tbtest-list")
def test_data_validation(self):
"""
The supplied data must be validated, and any errors returned
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbtest"))
self.client.force_authenticate(user)
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_successful_create_request(self):
"""
Should create a new TBTest object in the database
"""
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="add_tbtest"))
self.client.force_authenticate(user)
response = self.client.post(
self.url,
{
"msisdn": "27856454612",
"source": "WhatsApp",
"result": TBTest.RESULT_PENDING,
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
[tbtest] = TBTest.objects.all()
self.assertEqual(tbtest.msisdn, "27856454612")
self.assertEqual(tbtest.source, "WhatsApp")
self.assertEqual(tbtest.result, TBTest.RESULT_PENDING)
def test_successful_update_request(self):
"""
Should create a new TBTest object in the database
"""
tbtest = TBTest.objects.create(
**{
"msisdn": "27856454612",
"source": "WhatsApp",
"result": TBTest.RESULT_PENDING,
}
)
user = get_user_model().objects.create_user("test")
user.user_permissions.add(Permission.objects.get(codename="change_tbtest"))
self.client.force_authenticate(user)
update_url = reverse("tbtest-detail", args=(tbtest.id,))
response = self.client.patch(update_url, {"result": TBTest.RESULT_POSITIVE})
self.assertEqual(response.status_code, status.HTTP_200_OK)
tbtest.refresh_from_db()
self.assertEqual(tbtest.msisdn, "27856454612")
self.assertEqual(tbtest.source, "WhatsApp")
self.assertEqual(tbtest.result, TBTest.RESULT_POSITIVE)
class TBCheckSerializerTests(TestCase):
def test_valid_tbcheck(self):
"""
If age is <18 skip location and location_
"""
data = {
"msisdn": "+2349039756628",
"source": "WhatsApp",
"province": "ZA-GT",
"city": "<not collected>",
"age": "<18",
"gender": "male",
"cough": "True",
"fever": "False",
"sweat": "False",
"weight": "False",
"exposure": "no",
"tracing": "False",
"risk": "low",
}
serializer = TBCheckSerializer(data=data)
self.assertTrue(serializer.is_valid())
self.assertEqual(
dict(serializer.validated_data),
{
"age": "<18",
"city": "<not collected>",
"cough": True,
"exposure": "no",
"fever": False,
"gender": "male",
"msisdn": "+2349039756628",
"province": "ZA-GT",
"risk": "low",
"source": "WhatsApp",
"sweat": False,
"tracing": False,
"weight": False,
},
)
| 37.184874
| 84
| 0.568701
| 862
| 8,850
| 5.697216
| 0.171694
| 0.085522
| 0.044797
| 0.022806
| 0.598452
| 0.571981
| 0.545917
| 0.537161
| 0.519039
| 0.486663
| 0
| 0.035163
| 0.312316
| 8,850
| 237
| 85
| 37.341772
| 0.771771
| 0.048249
| 0
| 0.548223
| 0
| 0
| 0.129886
| 0
| 0
| 0
| 0
| 0
| 0.177665
| 1
| 0.040609
| false
| 0
| 0.050761
| 0
| 0.116751
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd6abf4d61e22150256649650adbe262b09e0720
| 1,350
|
py
|
Python
|
code/runibm1.py
|
jrod2699/CS159-NLP-Final-Project-
|
76eea6149ab01d5e72232874398458ec9f35227f
|
[
"MIT"
] | null | null | null |
code/runibm1.py
|
jrod2699/CS159-NLP-Final-Project-
|
76eea6149ab01d5e72232874398458ec9f35227f
|
[
"MIT"
] | null | null | null |
code/runibm1.py
|
jrod2699/CS159-NLP-Final-Project-
|
76eea6149ab01d5e72232874398458ec9f35227f
|
[
"MIT"
] | null | null | null |
import nltk
import random
from preprocess import compile_corpus
from nltk.translate import IBMModel1, AlignedSent, Alignment
def run(filename, iterations):
# global variables utilized in the assessment of the IBM Model
global ibm1
global corpus
# construct and modify corpus by adding the system alignments to every sentence pair
corpus = compile_corpus(filename)
ibm1 = IBMModel1(corpus, iterations)
# produce random sentences for testing purposes
get_rand_sent()
def get_rand_sent():
'''
Redirect the standard output of the program -- i.e. the random sentences --
and transfer it over to the appropriate file. From there we will take a
look at the sentence pair and include the hand alignment (gold standard)
to proceed with evaluating the IBM model.
'''
i = 0
while i < 20:
index = random.randint(0, len(corpus))
try:
# only print out "valid" sentence pairs
# valid = sentence pairs with system-created alignments
print(" ".join(corpus[index].mots), "\t", " ".join(corpus[index].words), "\t", corpus[index].alignment)
i += 1
except:
pass
def main():
# change the file based on the langauge being tested
run("data/languages/vie-eng.txt", 5)
if __name__ == "__main__":
main()
| 31.395349
| 115
| 0.665926
| 178
| 1,350
| 4.97191
| 0.578652
| 0.037288
| 0.024859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009921
| 0.253333
| 1,350
| 42
| 116
| 32.142857
| 0.868056
| 0.442222
| 0
| 0
| 0
| 0
| 0.055633
| 0.036161
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0.043478
| 0.173913
| 0
| 0.304348
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd6ea7420f474f3252a16e6bcdeebb2e566cf6e9
| 3,619
|
py
|
Python
|
tests/test_models.py
|
DynamicGravitySystems/DGP
|
5c0b566b846eb25f1e5ede64b2caaaa6a3352a29
|
[
"Apache-2.0"
] | 7
|
2017-08-15T21:51:40.000Z
|
2020-10-28T00:40:23.000Z
|
tests/test_models.py
|
DynamicGravitySystems/DGP
|
5c0b566b846eb25f1e5ede64b2caaaa6a3352a29
|
[
"Apache-2.0"
] | 63
|
2017-08-11T15:12:03.000Z
|
2020-05-23T19:03:46.000Z
|
tests/test_models.py
|
cbertinato/DGP
|
5bb8a30895365eccdd452970c45e248903fca8af
|
[
"Apache-2.0"
] | 4
|
2018-03-29T21:30:26.000Z
|
2020-10-27T20:15:23.000Z
|
# -*- coding: utf-8 -*-
"""
Unit tests for new Project/Flight data classes, including JSON
serialization/de-serialization
"""
import time
from datetime import datetime
from typing import Tuple
from uuid import uuid4
from pathlib import Path
import pytest
import pandas as pd
from dgp.core import DataType
from dgp.core.models.project import AirborneProject
from dgp.core.hdf5_manager import HDF5Manager
from dgp.core.models.datafile import DataFile
from dgp.core.models.dataset import DataSet
from dgp.core.models import flight
from dgp.core.models.meter import Gravimeter
@pytest.fixture()
def make_flight():
def _factory() -> Tuple[str, flight.Flight]:
name = str(uuid4().hex)[:12]
return name, flight.Flight(name)
return _factory
def test_flight_actions(make_flight):
# TODO: Test adding/setting gravimeter
flt = flight.Flight('test_flight')
assert 'test_flight' == flt.name
f1_name, f1 = make_flight() # type: flight.Flight
f2_name, f2 = make_flight() # type: flight.Flight
assert f1_name == f1.name
assert f2_name == f2.name
assert not f1.uid == f2.uid
assert '<Flight %s :: %s>' % (f1_name, f1.uid) == repr(f1)
def test_project_path(project: AirborneProject, tmpdir):
assert isinstance(project.path, Path)
new_path = Path(tmpdir).joinpath("new_prj_path")
project.path = new_path
assert new_path == project.path
def test_project_add_child(project: AirborneProject):
with pytest.raises(TypeError):
project.add_child(None)
def test_project_get_child(make_flight):
prj = AirborneProject(name="Project-2", path=Path('.'))
f1_name, f1 = make_flight()
f2_name, f2 = make_flight()
f3_name, f3 = make_flight()
prj.add_child(f1)
prj.add_child(f2)
prj.add_child(f3)
assert f1 == prj.get_child(f1.uid)
assert f3 == prj.get_child(f3.uid)
assert not f2 == prj.get_child(f1.uid)
with pytest.raises(IndexError):
fx = prj.get_child(str(uuid4().hex))
def test_project_remove_child(make_flight):
prj = AirborneProject(name="Project-3", path=Path('.'))
f1_name, f1 = make_flight()
f2_name, f2 = make_flight()
f3_name, f3 = make_flight()
prj.add_child(f1)
prj.add_child(f2)
assert 2 == len(prj.flights)
assert f1 in prj.flights
assert f2 in prj.flights
assert f3 not in prj.flights
assert not prj.remove_child(f3.uid)
assert prj.remove_child(f1.uid)
assert f1 not in prj.flights
assert 1 == len(prj.flights)
def test_gravimeter():
meter = Gravimeter("AT1A-13")
assert "AT1A" == meter.type
assert "AT1A-13" == meter.name
assert meter.config is None
config = meter.read_config(Path("tests/at1m.ini"))
assert isinstance(config, dict)
with pytest.raises(FileNotFoundError):
config = meter.read_config(Path("tests/at1a-fake.ini"))
assert {} == meter.read_config(Path("tests/sample_gravity.csv"))
def test_dataset(tmpdir):
path = Path(tmpdir).joinpath("test.hdf5")
df_grav = DataFile(DataType.GRAVITY, datetime.utcnow(), Path('gravity.dat'))
df_traj = DataFile(DataType.TRAJECTORY, datetime.utcnow(), Path('gps.dat'))
dataset = DataSet(df_grav, df_traj)
assert df_grav == dataset.gravity
assert df_traj == dataset.trajectory
frame_grav = pd.DataFrame([0, 1, 2])
frame_traj = pd.DataFrame([7, 8, 9])
HDF5Manager.save_data(frame_grav, df_grav, path)
HDF5Manager.save_data(frame_traj, df_traj, path)
expected_concat: pd.DataFrame = pd.concat([frame_grav, frame_traj])
# assert expected_concat.equals(dataset.dataframe)
| 27.210526
| 80
| 0.698812
| 519
| 3,619
| 4.714836
| 0.219653
| 0.04904
| 0.031467
| 0.034736
| 0.200245
| 0.142215
| 0.107887
| 0.071925
| 0.071925
| 0.071925
| 0
| 0.0243
| 0.181266
| 3,619
| 132
| 81
| 27.416667
| 0.801552
| 0.066869
| 0
| 0.137931
| 0
| 0
| 0.051442
| 0.007136
| 0
| 0
| 0
| 0.007576
| 0.287356
| 1
| 0.103448
| false
| 0
| 0.16092
| 0
| 0.287356
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd6f1c1a3069baecfcb5b723cf12a8c76710a022
| 1,312
|
py
|
Python
|
tests/contract/test_concept.py
|
Informasjonsforvaltning/fdk-harvester-bff
|
21f5d41bbe2506d9c23f0e670e6dee1bfe9f0742
|
[
"Apache-2.0"
] | null | null | null |
tests/contract/test_concept.py
|
Informasjonsforvaltning/fdk-harvester-bff
|
21f5d41bbe2506d9c23f0e670e6dee1bfe9f0742
|
[
"Apache-2.0"
] | 20
|
2020-09-23T10:04:48.000Z
|
2022-03-14T07:47:45.000Z
|
tests/contract/test_concept.py
|
Informasjonsforvaltning/fdk-harvester-bff
|
21f5d41bbe2506d9c23f0e670e6dee1bfe9f0742
|
[
"Apache-2.0"
] | null | null | null |
"""Test cases for concepts."""
from typing import Any
import pytest
import requests
@pytest.mark.contract
def test_get_concept_with_id(http_service: Any) -> None:
test_id = "a683bc63-2961-46af-9956-8a4a3f991cc6"
url = f"{http_service}/concepts/{test_id}"
result = requests.get(url=url, headers={"accept": "application/json"})
assert result.headers["Cache-Control"] == "max-age=86400"
parsed_result = result.json()
assert parsed_result["id"] == "a683bc63-2961-46af-9956-8a4a3f991cc6"
assert (
parsed_result["identifier"]
== "http://begrepskatalogen/begrep/88804c36-ff43-11e6-9d97-005056825ca0"
)
assert parsed_result["prefLabel"] == {"nb": "norsk etternavn"}
assert parsed_result["altLabel"] == [{"nb": "etternavn"}]
assert parsed_result["definition"]["text"] == {
"nb": "navn som i rekkefølge er etter fornavn og eventuelt mellomnavn som skal være i henhold til Lov om personnavn"
}
assert parsed_result["definition"]["remark"] == {
"nb": "Kan være bygget opp av to etternavn satt sammen med bindestrek - såkalt dobbelt etternavn. For at et navn skal anses som navn etter navneloven, må det i utgangspunktet være uttrykt med bokstavene i det norske alfabetet med de diakritiske tegn som støttes av folkeregisteret"
}
| 42.322581
| 289
| 0.70503
| 169
| 1,312
| 5.384615
| 0.585799
| 0.092308
| 0.118681
| 0.03956
| 0.074725
| 0.074725
| 0
| 0
| 0
| 0
| 0
| 0.068266
| 0.17378
| 1,312
| 30
| 290
| 43.733333
| 0.771218
| 0.018293
| 0
| 0
| 0
| 0.043478
| 0.540562
| 0.081903
| 0
| 0
| 0
| 0
| 0.304348
| 1
| 0.043478
| false
| 0
| 0.130435
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd71a2a6d5b1e71ced9722bf68301238887fd3c8
| 95,557
|
py
|
Python
|
DexParse.py
|
liumengdeqq/DexParse
|
769899e26f01700c690ed82c48790d1000efb5f1
|
[
"Apache-2.0"
] | 16
|
2015-11-19T01:51:52.000Z
|
2020-03-10T06:24:28.000Z
|
DexParse.py
|
CvvT/DexParse
|
80c3f4a27e7163536f98584c5e7f7ec35a9451b8
|
[
"Apache-2.0"
] | null | null | null |
DexParse.py
|
CvvT/DexParse
|
80c3f4a27e7163536f98584c5e7f7ec35a9451b8
|
[
"Apache-2.0"
] | 22
|
2015-09-15T02:20:48.000Z
|
2021-06-24T02:55:09.000Z
|
#! /usr/bin/python
# coding=utf-8
import struct
import os
import hashlib
import Instruction
Access_Flag = {'public': 1, 'private': 2, 'protected': 4, 'static': 8, 'final': 0x10,
'synchronized': 0x20, 'volatile': 0x40, 'bridge': 0x40, 'transient': 0x80,
'varargs': 0x80, 'native': 0x100, 'interface': 0x200, 'abstract': 0x400,
'strictfp': 0x800, 'synthetic': 0x1000, 'annotation': 0x2000, 'enum': 0x4000,
'constructor': 0x10000, 'declared_synchronized': 0x20000}
TypeDescriptor = {'void': 'V', 'boolean': 'Z', 'byte': 'B', 'short': 'S', 'char': 'C',
'int': 'I', 'long': 'J', 'float': 'F', 'double': 'D', 'boolean[]': '[Z',
'byte[]': '[B', 'short[]': '[S', 'char[]': '[C', 'int[]': 'I',
'long[]': '[J', 'float[]': '[F', 'double[]': 'D'}
ShortyDescriptor = {'void': 'V', 'boolean': 'Z', 'byte': 'B', 'short': 'S', 'char': 'C',
'int': 'I', 'long': 'J', 'float': 'F', 'double': 'D'}
ACSII = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '0': 0,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15}
def checksum(f, len):
a = 1
b = 0
f.seek(12)
print("file size is :", len)
for i in range(12, len):
onebyte = struct.unpack("B", f.read(1))[0]
a = (a + onebyte) % 65521
b = (b + a) % 65521
return b << 16 | a
def get_file_sha1(f):
f.seek(32) # skip magic, checksum, sha
sha = hashlib.sha1()
while True:
data = f.read(1024)
if not data:
break
sha.update(data)
return sha.hexdigest()
def rightshift(value, n):
mask = 0x80000000
check = value & mask
if check != mask:
return value >> n
else:
submask = mask
for loop in range(0, n):
submask = (submask | (mask >> loop))
strdata = struct.pack("I", submask | (value >> n))
ret = struct.unpack("i", strdata)[0]
return ret
def readunsignedleb128(file):
res = struct.unpack("B", file.read(1))[0]
if res > 0x7f:
cur = struct.unpack("B", file.read(1))[0]
res = (res & 0x7f) | ((cur & 0x7f) << 7)
if cur > 0x7f:
cur = struct.unpack("B", file.read(1))[0]
res |= (cur & 0x7f) << 14
if cur > 0x7f:
cur = struct.unpack("B", file.read(1))[0]
res |= (cur & 0x7f) << 21
if cur > 0x7f:
cur = struct.unpack("B", file.read(1))[0]
res |= cur << 28
if res == 44370793110:
print(file.tell())
return res
def readsignedleb128(file):
res = struct.unpack("B", file.read(1))[0]
if res <= 0x7f:
res = rightshift((res << 25), 25)
else:
cur = struct.unpack("B", file.read(1))[0]
res = (res & 0x7f) | ((cur & 0x7f) << 7)
if cur <= 0x7f:
res = rightshift((res << 18), 18)
else:
cur = struct.unpack("B", file.read(1))[0]
res |= (cur & 0x7f) << 14
if cur <= 0x7f:
res = rightshift((res << 11), 11)
else:
cur = struct.unpack("B", file.read(1))[0]
res |= (cur & 0x7f) << 21
if cur <= 0x7f:
res = rightshift((res << 4), 4)
else:
cur = struct.unpack("B", file.read(1))[0]
res |= cur << 28
return res
def writesignedleb128(num, file):
if num >= 0:
writeunsignedleb128(num, file)
else:
mask = 0x80000000
for i in range(0, 32):
tmp = num & mask
mask >>= 1
if tmp == 0:
break
loop = 32 - i + 1
while loop > 7:
cur = num & 0x7f | 0x80
num >>= 7
file.write(struct.pack("B", cur))
loop -= 7
cur = num & 0x7f
file.write(struct.pack("B", cur))
def signedleb128forlen(num):
if num >= 0:
return unsignedleb128forlen(num)
else:
mask = 0x80000000
for i in range(0, 32):
tmp = num & mask
mask >>= 1
if tmp == 0:
break
loop = 32 - i + 1
if loop % 7 == 0:
return loop / 7
else:
return loop / 7 + 1
def writeunsignedleb128(num, file):
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7F | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
file.write(struct.pack("B", num))
def unsignedleb128forlen(num):
len = 1
temp = num
while num > 0x7f:
len += 1
num >>= 7
if len > 5:
print("error for unsignedleb128forlen", temp)
os._exit(num)
return len
def writeunsignedleb128p1alignshort(num, file):
num += 1
if num <= 0x7f:
if file.tell() % 2 == 1:
file.write(struct.pack("B", num))
else:
# print(hex(num))
file.write(struct.pack("B", num | 0x80))
file.write(struct.pack("B", 0))
else:
cur = num & 0x7F | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
if file.tell() % 2 == 1:
file.write(struct.pack("B", num))
else:
file.write(struct.pack("B", num | 0x80))
file.write(struct.pack("B", 0))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
if file.tell() % 2 == 1:
file.write(struct.pack("B", num))
else:
file.write(struct.pack("B", num | 0x80))
file.write(struct.pack("B", 0))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
if file.tell() % 2 == 1:
file.write(struct.pack("B", num))
else:
file.write(struct.pack("B", num | 0x80))
file.write(struct.pack("B", 0))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if file.tell() % 2 == 1:
file.write(struct.pack("B", num))
else:
file.write(struct.pack("B", num | 0x80))
file.write(struct.pack("B", 0))
def readunsignedleb128p1(file):
res = readunsignedleb128(file)
return res - 1
def writeunsignedleb128p1(num, file):
writeunsignedleb128(num+1, file)
def unsignedleb128p1forlen(num):
return unsignedleb128forlen(num+1)
def getutf8str(file):
string = []
while 1:
onebyte = struct.unpack("B", file.read(1))[0]
if onebyte == 0:
break
string.append(onebyte)
return bytearray(string).decode("utf-8")
def getstr(bytes):
return bytearray(bytes).decode("utf-8")
class EncodedArray:
def __init__(self, file):
self.size = readunsignedleb128(file)
self.values = []
for i in range(0, self.size):
self.values.append(EncodedValue(file))
def copytofile(self, file):
writeunsignedleb128(self.size, file)
for i in range(0, self.size):
self.values[i].copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.size)
for i in range(0, self.size):
off = self.values[i].makeoffset(off)
return off
def printf(self):
print("encoded array size", self.size)
class EncodedValue:
def __init__(self, file):
self.onebyte = struct.unpack("B", file.read(1))[0]
self.type = self.onebyte & 0x1F
self.arg = (self.onebyte >> 5) & 0x7
self.value = []
if self.type == 0x00:
# print 'here 0x00 VALUE_BYTE in class : ' + str(curClass_idx)
if self.arg != 0:
print ("[-] Ca ,get error in VALUE_BYTE")
os._exit(1)
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x02:
# print 'here 0x02 VALUE_SHORT in class : ' + str(curClass_idx)
if self.arg >= 2:
print ("[-] Ca ,get error in VALUE_SHORT at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x03:
# print 'here 0x03 VALUE_CHAR in class : ' + str(curClass_idx)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x04:
# print 'here 0x04 VALUE_INT in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_INT at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x06:
# print 'here 0x06 VALUE_LONG in class : ' + str(curClass_idx)
if self.arg >= 8:
print ("[-] Ca ,get error in VALUE_LONG at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x10:
# print 'here 0x10 VALUE_FLOAT in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_FLOAT at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x11:
# print 'here 0x11 VALUE_DOUBLE in class : ' + str(curClass_idx)
if self.arg >= 8:
print ("[-] Ca ,get error in VALUE_DOUBLE at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x17:
# print 'here 0x17 VALUE_STRING in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_STRING at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x18:
# print 'here 0x18 VALUE_TYPE in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_TYPE at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x19:
# print 'here 0x19 VALUE_FIELD in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_FIELD at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x1a:
# print 'here 0x1a VALUE_METHOD in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_METHOD at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x1b:
# print 'here 0x1b VALUE_ENUM in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_ENUM at class : ")
os._exit(1)
for i in range(0, self.arg+1):
self.value.append(struct.unpack("B", file.read(1))[0])
elif self.type == 0x1c:
# print 'here 0x1c VALUE_ARRAY in class : ' + str(curClass_idx)
if self.arg != 0x00:
print ("[-] Ca ,get error in VALUE_ARRAY")
os._exit(1)
self.value.append(EncodedArray(file))
elif self.type == 0x1d:
# print 'here 0x1d VALUE_ANNOTATION in class : ' + str(curClass_idx)
if self.arg != 0:
os._exit()
self.value.append(EncodedAnnotation(file))
# if case(0x1e):
# print 'here 0x1e VALUE_NULL in class : ' + str(curClass_idx)
# break
# if case(0x1f):
# print 'here 0x1f VALUE_BOOLEAN in class : ' + str(curClass_idx)
# break
def copytofile(self, file):
file.write(struct.pack("B", self.onebyte))
if self.type <= 0x1b:
for i in range(0, self.arg+1):
file.write(struct.pack("B", self.value[i]))
elif self.type == 0x1c:
self.value[0].copytofile(file)
elif self.type == 0x1d:
self.value[0].copytofile(file)
def makeoffset(self, off):
off += 1
if self.type <= 0x1b:
off += self.arg+1
elif self.type == 0x1c:
off = self.value[0].makeoffset(off)
elif self.type == 0x1d:
off = self.value[0].makeoffset(off)
return off
def printf(self):
print("encoded value :", self.type, self.arg)
# ----------------------------------------------------------------------------------------
class AnnotationElement:
def __init__(self, file):
self.name_idx = readunsignedleb128(file)
self.value = EncodedValue(file)
def copytofile(self, file):
writeunsignedleb128(self.name_idx, file)
self.value.copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.name_idx)
off = self.value.makeoffset(off)
return off
class EncodedAnnotation:
def __init__(self, file):
self.type_idx = readunsignedleb128(file)
self.size = readunsignedleb128(file)
self.elements = [] # annotation_element[size]
for i in range(0, self.size):
self.elements.append(AnnotationElement(file))
def copytofile(self, file):
writeunsignedleb128(self.type_idx, file)
writeunsignedleb128(self.size, file)
for i in range(0, self.size):
self.elements[i].copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.type_idx)
off += unsignedleb128forlen(self.size)
for i in range(0, self.size):
off = self.elements[i].makeoffset(off)
return off
class DexHeader:
def __init__(self, file, mode=0):
if mode == 0:
self.start = file.tell()
self.magic = []
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.magic.append(chr(struct.unpack("B", file.read(1))[0]))
self.version = []
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.version.append(chr(struct.unpack("B", file.read(1))[0]))
self.checksum = struct.unpack("I", file.read(4))[0]
self.signature = file.read(20)
self.file_size = struct.unpack("I", file.read(4))[0]
self.header_size = struct.unpack("I", file.read(4))[0]
self.endian_tag = hex(struct.unpack("I", file.read(4))[0])
self.link_size = struct.unpack("I", file.read(4))[0]
self.link_off = struct.unpack("I", file.read(4))[0]
self.map_off = struct.unpack("I", file.read(4))[0]
self.string_ids_size = struct.unpack("I", file.read(4))[0]
self.string_ids_off = struct.unpack("I", file.read(4))[0]
self.type_ids_size = struct.unpack("I", file.read(4))[0]
self.type_ids_off = struct.unpack("I", file.read(4))[0]
self.proto_ids_size = struct.unpack("I", file.read(4))[0]
self.proto_ids_off = struct.unpack("I", file.read(4))[0]
self.field_ids_size = struct.unpack("I", file.read(4))[0]
self.field_ids_off = struct.unpack("I", file.read(4))[0]
self.method_ids_size = struct.unpack("I", file.read(4))[0]
self.method_ids_off = struct.unpack("I", file.read(4))[0]
self.class_defs_size = struct.unpack("I", file.read(4))[0]
self.class_defs_off = struct.unpack("I", file.read(4))[0]
self.data_size = struct.unpack("I", file.read(4))[0]
self.data_off = struct.unpack("I", file.read(4))[0]
self.len = file.tell() - self.start
def create(self, dexfile):
self.magic = []
self.magic.append('d')
self.magic.append('e')
self.magic.append('x')
self.magic.append(0x0A)
self.version = []
self.version.append('0')
self.version.append('3')
self.version.append('5')
self.version.append(0)
self.checksum = 1234
self.signature = "idontknow"
self.file_size = 1234
self.header_size = 112
self.endian_tag = 0x12345678
self.link_size = 0
self.link_off = 0
# self.map_off = dexfile.dexmaplist
def copytofile(self, file):
file.seek(self.start, 0)
file.write(struct.pack("B", ord(self.magic[0])))
file.write(struct.pack("B", ord(self.magic[1])))
file.write(struct.pack("B", ord(self.magic[2])))
file.write(struct.pack("B", ord(self.magic[3])))
file.write(struct.pack("B", ord(self.version[0])))
file.write(struct.pack("B", ord(self.version[1])))
file.write(struct.pack("B", ord(self.version[2])))
file.write(struct.pack("B", ord(self.version[3])))
file.write(struct.pack("I", self.checksum))
file.write(self.signature)
file.write(struct.pack("I", self.file_size))
file.write(struct.pack("I", self.header_size))
file.write(struct.pack("I", int(self.endian_tag, 16)))
file.write(struct.pack("I", self.link_size))
file.write(struct.pack("I", self.link_off))
file.write(struct.pack("I", self.map_off))
file.write(struct.pack("I", self.string_ids_size))
file.write(struct.pack("I", self.string_ids_off))
file.write(struct.pack("I", self.type_ids_size))
file.write(struct.pack("I", self.type_ids_off))
file.write(struct.pack("I", self.proto_ids_size))
file.write(struct.pack("I", self.proto_ids_off))
file.write(struct.pack("I", self.field_ids_size))
file.write(struct.pack("I", self.field_ids_off))
file.write(struct.pack("I", self.method_ids_size))
file.write(struct.pack("I", self.method_ids_off))
file.write(struct.pack("I", self.class_defs_size))
file.write(struct.pack("I", self.class_defs_off))
file.write(struct.pack("I", self.data_size))
file.write(struct.pack("I", self.data_off))
def makeoffset(self, dexmaplist):
self.string_ids_size = dexmaplist[1].size
self.string_ids_off = dexmaplist[1].offset
self.type_ids_size = dexmaplist[2].size
self.type_ids_off = dexmaplist[2].offset
self.proto_ids_size = dexmaplist[3].size
self.proto_ids_off = dexmaplist[3].offset
self.field_ids_size = dexmaplist[4].size
self.field_ids_off = dexmaplist[4].offset
self.method_ids_size = dexmaplist[5].size
self.method_ids_off = dexmaplist[5].offset
self.class_defs_size = dexmaplist[6].size
self.class_defs_off = dexmaplist[6].offset
self.data_off = dexmaplist[0x1000].offset
self.data_size = 0
self.map_off = dexmaplist[0x1000].offset
self.file_size = 0
def printf(self):
print ("DEX FILE HEADER:")
print ("magic: ", self.magic)
print ("version: ", self.version)
print ("checksum: ", self.checksum)
print ("signature: ", self.signature)
print ("file_size: ", self.file_size)
print ("header_size: ", self.header_size)
print ("endian_tag: ", self.endian_tag)
print ("link_size: ", self.link_size)
print ("link_off: ", self.link_off)
print ("map_off: ", self.map_off)
print ("string_ids_size: ", self.string_ids_size)
print ("string_ids_off: ", self.string_ids_off)
print ("type_ids_size: ", self.type_ids_size)
print ("type_ids_off: ", self.type_ids_off)
print ("proto_ids_size: ", self.proto_ids_size)
print ("proto_ids_off: ", self.proto_ids_off)
print ("field_ids_size: ", self.field_ids_size)
print ("field_ids_off: ", self.field_ids_off)
print ("method_ids_size: ", self.method_ids_size)
print ("method_ids_off: ", self.method_ids_off)
print ("class_defs_size: ", self.class_defs_size)
print ("class_defs_off: ", self.class_defs_off)
print ("data_size: ", self.data_size)
print ("data_off: ", self.data_off)
class DexStringID:
def __init__(self, file, mode=1):
if mode == 1:
self.stringDataoff = struct.unpack("I", file.read(4))[0] # in file
file.seek(self.stringDataoff, 0)
self.size = readunsignedleb128(file)
self.str = getutf8str(file)
self.ref = None
else:
self.stringDataoff = 0
self.size = 0
self.str = ""
self.ref = None
def addstrID(self, str):
self.ref = str
self.str = getstr(str.str)
def copytofile(self, file):
# self.stringDataoff = self.ref.start
file.write(struct.pack("I", self.ref.start))
def getreference(self, dexmaplist):
self.ref = dexmaplist[0x2002].getreference(self.stringDataoff)
def printf(self):
print ("size: ", self.size, " str: ", self.str, "dataof: ", self.stringDataoff)
class DexTypeID:
def __init__(self, file, str_table, mode=1):
if mode == 1:
self.descriptorIdx = struct.unpack("I", file.read(4))[0] # in file
self.str = str_table[self.descriptorIdx].str
else:
self.descriptorIdx = 0
self.str = ""
def addtype(self, index, string):
self.descriptorIdx = index
self.str = string
def copytofile(self, file):
file.write(struct.pack("I", self.descriptorIdx))
def printf(self):
print ("type id: ", self.str)
class DexProtoId:
def __init__(self, file, str_table, type_table, mode=1):
if mode == 1:
self.shortyIdx = struct.unpack("I", file.read(4))[0] # in file
self.returnTypeIdx = struct.unpack("I", file.read(4))[0] # in file
self.parametersOff = struct.unpack("I", file.read(4))[0] # in file
self.name = str_table[self.shortyIdx].str
self.returnstr = type_table[self.returnTypeIdx].str
self.ref = None
else:
self.shortyIdx = 0
self.returnTypeIdx = 0
self.parametersOff = 0
self.ref = None
def addproto(self, idx, typeidx, reference):
self.shortyIdx = idx
self.returnTypeIdx = typeidx
self.ref = reference
def copytofile(self, file):
file.write(struct.pack("I", self.shortyIdx))
file.write(struct.pack("I", self.returnTypeIdx))
if self.ref is not None:
file.write(struct.pack("I", self.ref.start))
else:
file.write(struct.pack("I", 0))
def getreference(self, dexmaplist):
self.ref = dexmaplist[0x1001].getreference(self.parametersOff)
def printf(self):
print ("return Type:", self.returnstr)
print ("methodname:", self.name)
if self.ref is not None:
self.ref.printf()
class DexFieldId:
def __init__(self, file, str_table, type_table, mode=1):
if mode == 1:
self.classIdx = struct.unpack("H", file.read(2))[0] # in file
self.typeIdx = struct.unpack("H", file.read(2))[0] # in file
self.nameIdx = struct.unpack("I", file.read(4))[0] # in file
self.classstr = type_table[self.classIdx].str
self.typestr = type_table[self.typeIdx].str
self.name = str_table[self.nameIdx].str
def addfield(self, classidx, typeidx, nameidx):
self.classIdx = classidx
self.typeIdx = typeidx
self.nameIdx = nameidx
def copytofile(self, file):
file.write(struct.pack("H", self.classIdx))
file.write(struct.pack("H", self.typeIdx))
file.write(struct.pack("I", self.nameIdx))
def printf(self):
print ("classstr:", self.classstr)
print ("typestr:", self.typestr)
print ("name:", self.name)
print ()
class DexMethodId:
def __init__(self, file, str_table, type_table, proto_table, mode=1):
if mode == 1:
self.classIdx = struct.unpack("H", file.read(2))[0] # in file
self.protoIdx = struct.unpack("H", file.read(2))[0] # in file
self.nameIdx = struct.unpack("I", file.read(4))[0] # in file
self.classstr = type_table[self.classIdx].str
self.name = str_table[self.nameIdx].str
else:
self.classIdx = 0
self.protoIdx = 0
self.nameIdx = 0
def addmethod(self, class_idx, proto_idx, name_idx):
self.classIdx = class_idx
self.protoIdx = proto_idx
self.nameIdx = name_idx
def copytofile(self, file):
file.write(struct.pack("H", self.classIdx))
file.write(struct.pack("H", self.protoIdx))
file.write(struct.pack("I", self.nameIdx))
def printf(self):
print ("classstr:", self.classstr)
print ("name:", self.name)
print ()
class DexClassDef:
def __init__(self, file, str_table, type_table, mode=1):
if mode == 1:
self.classIdx = struct.unpack("I", file.read(4))[0] # in file
self.accessFlags = struct.unpack("I", file.read(4))[0] # in file
self.superclassIdx = struct.unpack("I", file.read(4))[0] # in file
self.interfacesOff = struct.unpack("I", file.read(4))[0] # in file
self.sourceFileIdx = struct.unpack("I", file.read(4))[0] # in file
self.annotationsOff = struct.unpack("I", file.read(4))[0] # in file
self.classDataOff = struct.unpack("I", file.read(4))[0] # in file
self.staticValuesOff = struct.unpack("I", file.read(4))[0] # in file
self.classstr = type_table[self.classIdx].str
self.superclassstr = type_table[self.superclassIdx].str
if self.sourceFileIdx == 0xFFFFFFFF:
self.sourceFilestr = "NO_INDEX"
else:
self.sourceFilestr = str_table[self.sourceFileIdx].str
else:
self.classIdx = 0
self.accessFlags = 0
self.superclassIdx = 0
self.interfacesOff = 0
self.sourceFileIdx = 0
self.annotationsOff = 0
self.classDataOff = 0
self.staticValuesOff = 0
self.interfacesRef = None
self.annotationsRef = None
self.classDataRef = None
self.staticValuesRef = None
def addclassdef(self, classidx, access, superclass, source):
self.classIdx = classidx
self.accessFlags = access
self.superclassIdx = superclass
self.sourceFileIdx = source
def addclassdefref(self, interref, annoref, classref, staticref):
self.interfacesRef = interref
self.annotationsRef = annoref
self.classDataRef = classref
self.staticValuesRef = staticref
# get class data reference by its name,e.g. Lcom/cc/test/MainActivity;
def getclassdefref(self, str):
if self.classstr == str and self.classDataOff > 0:
return self.classDataRef
return None
def copytofile(self, file):
file.write(struct.pack("I", self.classIdx))
file.write(struct.pack("I", self.accessFlags))
file.write(struct.pack("I", self.superclassIdx))
if self.interfacesRef is not None:
file.write(struct.pack("I", self.interfacesRef.start))
# print(self.interfacesRef.start)
else:
file.write(struct.pack("I", 0))
file.write(struct.pack("I", self.sourceFileIdx))
if self.annotationsRef is not None:
file.write(struct.pack("I", self.annotationsRef.start))
# print(self.annotationsRef.start)
else:
file.write(struct.pack("I", 0))
if self.classDataRef is not None:
file.write(struct.pack("I", self.classDataRef.start))
else:
file.write(struct.pack("I", 0))
if self.staticValuesRef is not None:
file.write(struct.pack("I", self.staticValuesRef.start))
else:
file.write(struct.pack("I", 0))
def getreference(self, dexmaplist):
self.interfacesRef = dexmaplist[0x1001].getreference(self.interfacesOff)
if 0x2006 in dexmaplist.keys():
self.annotationsRef = dexmaplist[0x2006].getreference(self.annotationsOff)
self.classDataRef = dexmaplist[0x2000].getreference(self.classDataOff)
if 0x2005 in dexmaplist.keys():
self.staticValuesRef = dexmaplist[0x2005].getreference(self.staticValuesOff)
def printf(self):
print ("classtype:", self.classIdx, self.classstr)
print("access flag:", self.accessFlags)
print ("superclasstype:", self.superclassIdx, self.superclassstr)
print ("iterface off", self.interfacesOff)
print("source file index", self.sourceFilestr)
print("annotations off", self.annotationsOff)
print("class data off", self.classDataOff)
print("static values off", self.staticValuesOff)
if self.interfacesRef is not None:
self.interfacesRef.printf()
if self.annotationsRef is not None:
self.annotationsRef.printf()
if self.classDataRef is not None:
self.classDataRef.printf()
if self.staticValuesRef is not None:
self.staticValuesRef.printf()
class StringData:
def __init__(self, file, mode = 1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.size = readunsignedleb128(file) # in file
self.str = [] # getutf8str(file) # in file
while 1:
onebyte = struct.unpack("B", file.read(1))[0]
if onebyte == 0:
break
self.str.append(onebyte)
else:
self.start = 0
self.len = 0
self.size = 0
self.str = []
def addstr(self, str):
self.size = len(str)
self.str = bytearray(str)
def copytofile(self, file):
writeunsignedleb128(self.size, file)
for i in range(0, len(self.str)):
file.write(struct.pack("B", self.str[i]))
file.write(struct.pack("B", 0))
def makeoffset(self, off):
self.start = off
self.len = len(self.str) + unsignedleb128forlen(self.size)
return off + self.len + 1 # 1 byte for '\0'
def modify(self, str):
self.size = len(str)
self.str = bytearray(str)
def printf(self):
print (getstr(self.str))
class TypeItem: # alignment: 4 bytes
def __init__(self, file, type_table, mode=1):
if mode == 1:
self.start = file.tell()
self.size = struct.unpack("I", file.read(4))[0] # in file
self.list = []
self.str = []
self.len = 0
for i in range(0, self.size):
self.list.append(struct.unpack("H", file.read(2))[0]) # in file
self.str.append(type_table[self.list[i]].str)
if self.size % 2 == 1:
struct.unpack("H", file.read(2)) # for alignment
else:
self.start = 0
self.size = 0
self.list = None
self.str = None
self.len = 0
def addtypeItem(self, type_list, str_list):
self.size = len(type_list)
self.list = type_list
self.str = str_list
def copytofile(self, file):
file.write(struct.pack("I", self.size))
for i in range(0, self.size):
file.write(struct.pack("H", self.list[i]))
if self.size % 2 == 1:
file.write(struct.pack("H", 0))
def equal(self, param_list, length):
if length != self.size:
return False
for i in range(0, self.size):
if param_list[i] != self.str[i]:
return False
return True
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.len = 4 + 2 * self.size
self.start = off
return off + self.len
def printf(self):
for i in range(0, self.size):
print (self.list[i], self.str[i])
# alignment: 4bytes
class AnnotationsetItem:
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.size = struct.unpack("I", file.read(4))[0] # in file
self.entries = [] # annotation_off, offset of annotation_item
self.ref = []
for i in range(0, self.size):
self.entries.append(struct.unpack("I", file.read(4))[0])
def copytofile(self, file):
file.write(struct.pack("I", self.size))
for i in range(0, self.size):
file.write(struct.pack("I", self.ref[i].start))
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.start = off
self.len = 4 + 4 * self.size
return off + self.len
def getreference(self, dexmaplist):
for i in range(0, self.size):
self.ref.append(dexmaplist[0x2004].getreference(self.entries[i]))
def printf(self):
print ("size: ", self.size)
# alignment: 4bytes
class AnnotationsetrefList:
def __init__(self, file):
self.start = file.tell()
self.size = struct.unpack("I", file.read(4))[0] # in file
self.list = [] # annotaions_off, offset of annotation_set_item
self.ref = []
self.len = 0
for i in range(0, self.size):
self.list.append(struct.unpack("I", file.read(4))[0])
def copytofile(self, file):
file.write(struct.pack("I", self.size))
for i in range(0, self.size):
if self.ref[i] is not None:
file.write(struct.pack("I", self.ref[i].start))
else:
file.write(struct.pack("I", 0))
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.start = off
self.len = 4 + 4 * self.size
return off + self.len
def getreference(self, dexmaplist):
for i in range(0, self.size):
self.ref.append(dexmaplist[0x1003].getreference(self.list[i]))
def printf(self):
print ("size: ", self.size)
class Encodedfield:
def __init__(self, file, mode=1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.field_idx_diff = readunsignedleb128(file)
self.access_flags = readunsignedleb128(file)
else:
self.len = 0
self.field_idx_diff = 0
self.access_flags = 1
self.field_idx = 0 # need to set later
def __lt__(self, other): # for sort
return self.field_idx_diff < other.field_idx_diff
def addfield(self, idx, flag):
self.field_idx = idx
self.access_flags = int(flag)
def copytofile(self, file):
writeunsignedleb128(self.field_idx_diff, file)
writeunsignedleb128(self.access_flags, file)
def makeoffset(self, off):
self.start = off
self.len += unsignedleb128forlen(self.field_idx_diff)
self.len += unsignedleb128forlen(self.access_flags)
return off + self.len
def printf(self):
print ("diff: ", self.field_idx_diff)
print ("access: ", self.access_flags)
class Encodedmethod:
def __init__(self, file, mode=1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.method_idx_diff = readunsignedleb128(file)
self.access_flags = readunsignedleb128(file)
self.code_off = readunsignedleb128(file)
self.coderef = None
else:
self.len = 0
self.method_idx_diff = 0
self.access_flags = 0
self.coderef = 0
self.method_idx = 0 # need to set later
self.modified = 0 # if set this var, means that code_off will moodified to zero
def addmethod(self, method_idx, access, ref):
self.method_idx = method_idx
self.access_flags = int(access)
self.coderef = ref
def copytofile(self, file):
writeunsignedleb128(self.method_idx_diff, file)
writeunsignedleb128(self.access_flags, file)
if self.modified == 1:
writeunsignedleb128(0, file)
elif self.coderef is not None:
writeunsignedleb128(self.coderef.start, file)
else:
writeunsignedleb128(0, file)
def makeoffset(self, off):
self.start = off
self.len += unsignedleb128forlen(self.method_idx_diff)
self.len += unsignedleb128forlen(self.access_flags)
if self.modified == 1:
self.len += unsignedleb128forlen(0)
elif self.coderef is not None:
self.len += unsignedleb128forlen(self.coderef.start)
else:
self.len += unsignedleb128forlen(0)
return off + self.len
def getreference(self, dexmaplist):
self.coderef = dexmaplist[0x2001].getreference(self.code_off)
def printf(self):
print ("method_idx_diff: ", self.method_idx_diff)
print("method idx:", self.method_idx)
print ("access: ", self.access_flags)
print ("code off: ", self.code_off)
# alignment:none
class ClassdataItem:
def __init__(self, file, mode=1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.static_field_size = readunsignedleb128(file)
self.instance_fields_size = readunsignedleb128(file)
self.direct_methods_size = readunsignedleb128(file)
self.virtual_methods_size = readunsignedleb128(file)
self.static_fields = []
self.instance_fields = []
self.direct_methods = []
self.virtual_methods = []
for i in range(0, self.static_field_size):
self.static_fields.append(Encodedfield(file))
for i in range(0, self.instance_fields_size):
self.instance_fields.append(Encodedfield(file))
for i in range(0, self.direct_methods_size):
self.direct_methods.append(Encodedmethod(file))
for i in range(0, self.virtual_methods_size):
self.virtual_methods.append(Encodedmethod(file))
else:
self.static_field_size = 0
self.instance_fields_size = 0
self.direct_methods_size = 0
self.virtual_methods_size = 0
self.static_fields = []
self.instance_fields = []
self.direct_methods = []
self.virtual_methods = []
def addstaticfield(self, field_idx, accessflag):
self.static_field_size += 1
field = Encodedfield(None, 2)
field.addfield(field_idx, accessflag)
self.static_fields.append(field)
def addinstancefield(self, field_idx, accessflag):
self.instance_fields_size += 1
field = Encodedfield(None, 2)
field.addfield(field_idx, accessflag)
self.instance_fields.append(field)
def adddirectmethod(self, method_idx, accessflag, code_ref):
method = Encodedmethod(None, 2)
method.addmethod(method_idx, accessflag, code_ref)
self.direct_methods_size += 1
self.direct_methods.append(method)
def addvirtualmethod(self, method_idx, accessflag, code_ref):
method = Encodedmethod(None, 2)
method.addmethod(method_idx, accessflag, code_ref)
self.virtual_methods_size += 1
self.virtual_methods.append(method)
def commit(self): # call this when everything done, just for static field by now
if self.static_field_size > 0:
# self.static_fields.sort() # since each field added has the largest index
# there is no need to sort the list
last = 0
for i in range(0, self.static_field_size):
self.static_fields[i].field_idx_diff = self.static_fields[i].field_idx - last
last = self.static_fields[i].field_idx
if self.instance_fields_size > 0:
last = 0
for i in range(0, self.instance_fields_size):
self.instance_fields[i].field_idx_diff = self.instance_fields[i].field_idx - last
last = self.instance_fields[i].field_idx
if self.direct_methods_size > 0:
last = 0
for i in range(0, self.direct_methods_size):
self.direct_methods[i].method_idx_diff = self.direct_methods[i].method_idx - last
last = self.direct_methods[i].method_idx
if self.virtual_methods_size > 0:
last = 0
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].method_idx_diff = self.virtual_methods[i].method_idx - last
last = self.virtual_methods[i].method_idx
def copytofile(self, file):
writeunsignedleb128(self.static_field_size, file)
writeunsignedleb128(self.instance_fields_size, file)
writeunsignedleb128(self.direct_methods_size, file)
writeunsignedleb128(self.virtual_methods_size, file)
for i in range(0, self.static_field_size):
self.static_fields[i].copytofile(file)
for i in range(0, self.instance_fields_size):
self.instance_fields[i].copytofile(file)
for i in range(0, self.direct_methods_size):
self.direct_methods[i].copytofile(file)
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].copytofile(file)
# besides adding refenrence, also need to set the correct index
def getreference(self, dexmaplist):
last = 0
for i in range(0, self.static_field_size):
self.static_fields[i].field_idx = last + self.static_fields[i].field_idx_diff
last = self.static_fields[i].field_idx
last = 0
for i in range(0, self.instance_fields_size):
self.instance_fields[i].field_idx = last + self.instance_fields[i].field_idx_diff
last = self.instance_fields[i].field_idx
last = 0
for i in range(0, self.direct_methods_size):
self.direct_methods[i].getreference(dexmaplist)
self.direct_methods[i].method_idx = last + self.direct_methods[i].method_idx_diff
last = self.direct_methods[i].method_idx
last = 0
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].getreference(dexmaplist)
self.virtual_methods[i].method_idx = last + self.virtual_methods[i].method_idx_diff
last = self.virtual_methods[i].method_idx
def makeoffset(self, off):
self.start = off
off += unsignedleb128forlen(self.static_field_size)
off += unsignedleb128forlen(self.instance_fields_size)
off += unsignedleb128forlen(self.direct_methods_size)
off += unsignedleb128forlen(self.virtual_methods_size)
for i in range(0, self.static_field_size):
off = self.static_fields[i].makeoffset(off)
for i in range(0, self.instance_fields_size):
off = self.instance_fields[i].makeoffset(off)
for i in range(0, self.direct_methods_size):
off = self.direct_methods[i].makeoffset(off)
for i in range(0, self.virtual_methods_size):
off = self.virtual_methods[i].makeoffset(off)
self.len = off - self.start
return off
def printf(self):
print ("static field size: ", self.static_field_size)
print ("instance fields size: ", self.instance_fields_size)
print ("direct methods size: ", self.direct_methods_size)
print ("virtual methods size: ", self.virtual_methods_size)
for i in range(0, self.static_field_size):
self.static_fields[i].printf()
for i in range(0, self.instance_fields_size):
self.instance_fields[i].printf()
for i in range(0, self.direct_methods_size):
self.direct_methods[i].printf()
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].printf()
class TryItem:
def __init__(self, file):
self.start = file.tell()
self.start_addr = struct.unpack("I", file.read(4))[0] # in file
self.insn_count = struct.unpack("H", file.read(2))[0] # in file
self.handler_off = struct.unpack("H", file.read(2))[0] # in file
self.len = 0
def copytofile(self, file):
file.write(struct.pack("I", self.start_addr))
file.write(struct.pack("H", self.insn_count))
file.write(struct.pack("H", self.handler_off))
def makeoffset(self, off):
self.start = off
self.len = 4 + 2 + 2
return off + self.len
def printf(self):
print ("start_Addr: ", self.start_addr)
print ("insn_count: ", self.insn_count)
print ("handler_off: ", self.handler_off)
print ()
class EncodedTypeAddrPair:
def __init__(self, file):
self.type_idx = readunsignedleb128(file)
self.addr = readunsignedleb128(file)
def copytofile(self, file):
writeunsignedleb128(self.type_idx, file)
writeunsignedleb128(self.addr, file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.type_idx)
off += unsignedleb128forlen(self.addr)
return off
def printf(self):
print ("type idx: ", self.type_idx)
print ("addr: ", self.addr)
print ()
class EncodedhandlerItem:
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.size = readsignedleb128(file)
self.handlers = []
# print("start handler item", abs(self.size))
for i in range(0, abs(self.size)):
self.handlers.append(EncodedTypeAddrPair(file))
if self.size <= 0:
self.catch_all_addr = readunsignedleb128(file)
def copytofile(self, file):
writesignedleb128(self.size, file)
for i in range(0, abs(self.size)):
self.handlers[i].copytofile(file)
if self.size <= 0:
writeunsignedleb128(self.catch_all_addr, file)
def makeoffset(self, off):
self.start = off
off += signedleb128forlen(self.size)
for i in range(0, abs(self.size)):
off = self.handlers[i].makeoffset(off)
if self.size <= 0:
off += unsignedleb128forlen(self.catch_all_addr)
self.len = off - self.start
return off
class EncodedhandlerList:
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.size = readunsignedleb128(file)
self.list = []
for i in range(0, self.size):
self.list.append(EncodedhandlerItem(file))
def copytofile(self, file):
file.seek(self.start, 0)
writeunsignedleb128(self.size, file)
for i in range(0, self.size):
self.list[i].copytofile(file)
def makeoffset(self, off):
self.start = off
off += unsignedleb128forlen(self.size)
for i in range(0, self.size):
off = self.list[i].makeoffset(off)
return off
# alignment: 4bytes
class CodeItem:
def __init__(self, file, mode=1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.register_size = struct.unpack("H", file.read(2))[0] # in file
self.ins_size = struct.unpack("H", file.read(2))[0] # in file
self.outs_size = struct.unpack("H", file.read(2))[0] # in file
self.tries_size = struct.unpack("H", file.read(2))[0] # in file
self.debug_info_off = struct.unpack("I", file.read(4))[0] # in file
self.insns_size = struct.unpack("I", file.read(4))[0] # in file
self.insns = []
self.debugRef = None
for i in range(0, self.insns_size):
self.insns.append(struct.unpack("H", file.read(2))[0])
if self.tries_size != 0 and self.insns_size % 2 == 1:
self.padding = struct.unpack("H", file.read(2))[0]
self.tries = []
for i in range(0, self.tries_size):
self.tries.append(TryItem(file))
if self.tries_size != 0:
self.handler = EncodedhandlerList(file)
align = file.tell() % 4 # for alignment
if align != 0:
file.read(4-align)
else:
self.start = 0
self.len = 0
self.register_size = 0
self.ins_size = 0
self.outs_size = 0
self.tries_size = 0
self.debug_info_off = 0
self.insns_size = 0
self.insns = []
self.debugRef = None
self.padding = 0
self.tries = []
self.handler = None
def addcode(self, reg_size, insize, outsize, triessize, debugoff, inssize, insnslist, debugref, trieslist, handlerref):
self.register_size = reg_size
self.ins_size = insize
self.outs_size = outsize
self.tries_size = triessize
self.debug_info_off = debugoff
self.insns_size = inssize
self.insns = insnslist
self.debugRef = debugref
self.tries = trieslist
self.handler = handlerref
def copytofile(self, file):
file.seek(self.start, 0)
file.write(struct.pack("H", self.register_size))
file.write(struct.pack("H", self.ins_size))
file.write(struct.pack("H", self.outs_size))
file.write(struct.pack("H", self.tries_size))
if self.debugRef is not None:
file.write(struct.pack("I", self.debugRef.start))
else:
file.write(struct.pack("I", 0))
file.write(struct.pack("I", self.insns_size))
for i in range(0, self.insns_size):
file.write(struct.pack("H", self.insns[i]))
if self.tries_size != 0 and self.insns_size % 2 == 1:
file.write(struct.pack("H", self.padding))
for i in range(0, self.tries_size):
self.tries[i].copytofile(file)
if self.tries_size != 0:
self.handler.copytofile(file)
align = file.tell() % 4 # for alignment
if align != 0:
for i in range(0, 4-align):
file.write(struct.pack("B", 0))
# print("code item addr:", file.tell())
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.start = off
off += (4 * 2 + 2 * 4) # 4 ushort and 2 uint
off += (2 * self.insns_size)
if self.tries_size != 0 and self.insns_size % 2 == 1: # for padding
off += 2
for i in range(0, self.tries_size):
off = self.tries[i].makeoffset(off)
if self.tries_size != 0:
off = self.handler.makeoffset(off)
self.len = off - self.start
return off
def getreference(self, dexmaplist):
self.debugRef = dexmaplist[0x2003].getreference(self.debug_info_off)
def printf(self):
print("registers_size:", self.register_size)
print("ins_size, outs_size, tries_size:", self.ins_size, self.outs_size, self.tries_size)
print("debug info of:", self.debug_info_off)
print("insn_size:", self.insns_size)
for i in range(0, self.insns_size):
print(self.insns[i])
tmp = Instruction.InstructionSet(self.insns)
tmp.printf()
# alignment: none
class AnnotationItem:
Visibity = {0: 'VISIBITITY_BUILD', 1: 'VISIBILITY_RUNTIME', 2: 'VISIBILITY_SYSTEM'}
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.visibility = struct.unpack("B", file.read(1))[0] # infile
self.annotation = EncodedAnnotation(file)
def copytofile(self, file):
file.write(struct.pack("B", self.visibility))
self.annotation.copytofile(file)
def makeoffset(self, off):
self.start = off
off += 1
off = self.annotation.makeoffset(off)
self.len = off - self.start
return off
# alignment: none
class EncodedArrayItem:
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.value = EncodedArray(file)
def copytofile(self, file):
self.value.copytofile(file)
def makeoffset(self, off):
# if self.start == 1096008:
self.start = off
off = self.value.makeoffset(off)
self.len = off - self.start
return off
def printf(self):
print("None for EncodedArrayItem by now")
class FieldAnnotation:
def __init__(self, file):
self.field_idx = struct.unpack("I", file.read(4))[0] # in file
self.annotations_off = struct.unpack("I", file.read(4))[0] # in file, offset of annotation_set_item
self.annotations_off_ref = None
def copytofile(self, file):
file.write(struct.pack("I", self.field_idx))
file.write(struct.pack("I", self.annotations_off_ref.start))
def makeoffset(self, off):
off += 4 * 2
return off
def getreference(self, dexmaplist):
self.annotations_off_ref = dexmaplist[0x1003].getreference(self.annotations_off)
class MethodAnnotation:
def __init__(self, file):
self.method_idx = struct.unpack("I", file.read(4))[0] # in file
self.annotations_off = struct.unpack("I", file.read(4))[0] # in file
self.annotations_off_ref = None
def copytofile(self, file):
file.write(struct.pack("I", self.method_idx))
file.write(struct.pack("I", self.annotations_off_ref.start))
def makeoffset(self, off):
off += 4 * 2
return off
def getreference(self, dexmaplist):
self.annotations_off_ref = dexmaplist[0x1003].getreference(self.annotations_off)
class ParamterAnnotation:
def __init__(self, file):
self.method_idx = struct.unpack("I", file.read(4))[0] # in file
self.annotations_off = struct.unpack("I", file.read(4))[0] # in file. offset of "annotation_set_ref_list"
self.annotations_off_ref = None
def copytofile(self, file):
file.write(struct.pack("I", self.method_idx))
file.write(struct.pack("I", self.annotations_off_ref.start))
def makeoffset(self, off):
off += 4 * 2
return off
def getreference(self, dexmaplist):
self.annotations_off_ref = dexmaplist[0x1002].getreference(self.annotations_off)
# alignment: 4 bytes
class AnnotationsDirItem:
def __init__(self, file):
self.start = file.tell()
self.len = 0
self.class_annotations_off = struct.unpack("I", file.read(4))[0] # in file
self.fields_size = struct.unpack("I", file.read(4))[0] # in file
self.annotated_methods_size = struct.unpack("I", file.read(4))[0] # in file
self.annotate_parameters_size = struct.unpack("I", file.read(4))[0] # in file
self.field_annotations = [] # field_annotation[size]
self.method_annotations = []
self.parameter_annotations = []
self.class_annotations_ref = None
for i in range(0, self.fields_size):
self.field_annotations.append(FieldAnnotation(file))
for i in range(0, self.annotated_methods_size):
self.method_annotations.append(MethodAnnotation(file))
for i in range(0, self.annotate_parameters_size):
self.parameter_annotations.append(ParamterAnnotation(file))
def copytofile(self, file):
if self.class_annotations_ref is not None:
file.write(struct.pack("I", self.class_annotations_ref.start))
else:
file.write(struct.pack("I", self.class_annotations_off))
file.write(struct.pack("I", self.fields_size))
file.write(struct.pack("I", self.annotated_methods_size))
file.write(struct.pack("I", self.annotate_parameters_size))
for i in range(0, self.fields_size):
self.field_annotations[i].copytofile(file)
for i in range(0, self.annotated_methods_size):
self.method_annotations[i].copytofile(file)
for i in range(0, self.annotate_parameters_size):
self.parameter_annotations[i].copytofile(file)
def makeoffset(self, off):
self.start = off
off += 4 * 4
for i in range(0, self.fields_size):
off = self.field_annotations[i].makeoffset(off)
for i in range(0, self.annotated_methods_size):
off = self.method_annotations[i].makeoffset(off)
for i in range(0, self.annotate_parameters_size):
off = self.parameter_annotations[i].makeoffset(off)
self.len = off - self.start
return off
def getreference(self, dexmaplist):
self.class_annotations_ref = dexmaplist[0x1003].getreference(self.class_annotations_off)
for i in range(0, self.fields_size):
self.field_annotations[i].getreference(dexmaplist)
for i in range(0, self.annotated_methods_size):
self.method_annotations[i].getreference(dexmaplist)
for i in range(0, self.annotate_parameters_size):
self.parameter_annotations[i].getreference(dexmaplist)
def printf(self):
print("None for AnnotationDirItem by now")
# alignment: none
class DebugInfo:
def __init__(self, file, mode=1):
if mode == 1:
self.start = file.tell()
self.len = 0
self.line_start = readunsignedleb128(file)
self.parameters_size = readunsignedleb128(file)
self.parameter_names = []
for i in range(0, self.parameters_size):
self.parameter_names.append(readunsignedleb128p1(file))
self.debug = []
while 1:
onebyte = struct.unpack("B", file.read(1))[0]
self.debug.append(onebyte)
if onebyte == 0:
break
elif onebyte == 1:
self.debug.append(readunsignedleb128(file))
elif onebyte == 2:
self.debug.append(readsignedleb128(file))
elif onebyte == 3:
self.debug.append(readunsignedleb128(file))
self.debug.append(readunsignedleb128p1(file))
self.debug.append(readunsignedleb128p1(file))
elif onebyte == 4:
self.debug.append(readunsignedleb128(file))
self.debug.append(readunsignedleb128p1(file))
self.debug.append(readunsignedleb128p1(file))
self.debug.append(readunsignedleb128p1(file))
elif onebyte == 5:
self.debug.append(readunsignedleb128(file))
elif onebyte == 6:
self.debug.append(readunsignedleb128(file))
elif onebyte == 9:
self.debug.append(readunsignedleb128p1(file))
else:
self.start = 0
self.len = 0
self.line_start = 0
self.parameters_size = 0
self.parameter_names = []
self.debug = []
def adddebugitem(self, linestart, paramsize, names_list, debug_list):
self.line_start = linestart
self.parameters_size = paramsize
self.parameter_names = names_list
self.debug = debug_list
def copytofile(self, file):
file.seek(self.start, 0)
writeunsignedleb128(self.line_start, file)
writeunsignedleb128(self.parameters_size, file)
for i in range(0, self.parameters_size):
# print(self.parameter_names[i])
# if i == self.parameters_size-1:
# writeunsignedleb128p1alignshort(self.parameter_names[i], file)
# else:
writeunsignedleb128p1(self.parameter_names[i], file)
index = 0
while 1:
onebyte = self.debug[index]
file.write(struct.pack("B", onebyte))
index += 1
if onebyte == 0:
break
elif onebyte == 1:
writeunsignedleb128(self.debug[index], file)
index += 1
elif onebyte == 2:
writesignedleb128(self.debug[index], file)
index += 1
elif onebyte == 3:
writeunsignedleb128(self.debug[index], file)
writeunsignedleb128p1(self.debug[index+1], file)
writeunsignedleb128p1(self.debug[index+2], file)
index += 3
elif onebyte == 4:
writeunsignedleb128(self.debug[index], file)
writeunsignedleb128p1(self.debug[index+1], file)
writeunsignedleb128p1(self.debug[index+2], file)
writeunsignedleb128p1(self.debug[index+3], file)
index += 4
elif onebyte == 5:
writeunsignedleb128(self.debug[index], file)
index += 1
elif onebyte == 6:
writeunsignedleb128(self.debug[index], file)
index += 1
elif onebyte == 9:
writeunsignedleb128p1(self.debug[index], file)
index += 1
def printf(self):
print(self.line_start, self.parameters_size)
def makeoffset(self, off):
self.start = off
off += unsignedleb128forlen(self.line_start)
off += unsignedleb128forlen(self.parameters_size)
for i in range(0, self.parameters_size):
off += unsignedleb128p1forlen(self.parameter_names[i])
index = 0
while 1:
onebyte = self.debug[index]
off += 1
index += 1
if onebyte == 0:
break
elif onebyte == 1:
off += unsignedleb128forlen(self.debug[index])
index += 1
elif onebyte == 2:
off += signedleb128forlen(self.debug[index])
index += 1
elif onebyte == 3:
off += unsignedleb128forlen(self.debug[index])
off += unsignedleb128p1forlen(self.debug[index+1])
off += unsignedleb128p1forlen(self.debug[index+2])
index += 3
elif onebyte == 4:
off += unsignedleb128forlen(self.debug[index])
off += unsignedleb128p1forlen(self.debug[index+1])
off += unsignedleb128p1forlen(self.debug[index+2])
off += unsignedleb128p1forlen(self.debug[index+3])
index += 4
elif onebyte == 5:
off += unsignedleb128forlen(self.debug[index])
index += 1
elif onebyte == 6:
off += unsignedleb128forlen(self.debug[index])
index += 1
elif onebyte == 9:
off += unsignedleb128p1forlen(self.debug[index])
index += 1
self.len = off - self.start
return off
class DexMapItem:
Constant = {0: 'TYPE_HEADER_ITEM', 1: 'TYPE_STRING_ID_ITEM', 2: 'TYPE_TYPE_ID_ITEM',
3: 'TYPE_PROTO_ID_ITEM', 4: 'TYPE_FIELD_ID_ITEM', 5: 'TYPE_METHOD_ID_ITEM',
6: 'TYPE_CLASS_DEF_ITEM', 0x1000: 'TYPE_MAP_LIST', 0x1001: 'TYPE_TYPE_LIST',
0x1002: 'TYPE_ANNOTATION_SET_REF_LIST', 0x1003: 'TYPE_ANNOTATION_SET_ITEM',
0x2000: 'TYPE_CLASS_DATA_ITEM', 0x2001: 'TYPE_CODE_ITEM', 0x2002: 'TYPE_STRING_DATA_ITEM',
0x2003: 'TYPE_DEBUG_INFO_ITEM', 0x2004: 'TYPE_ANNOTATION_ITEM', 0x2005: 'TYPE_ENCODED_ARRAY_ITEM',
0x2006: 'TYPE_ANNOTATIONS_DIRECTORY_ITEM'}
def __init__(self, file):
self.type = struct.unpack("H", file.read(2))[0]
self.unused = struct.unpack("H", file.read(2))[0]
self.size = struct.unpack("I", file.read(4))[0]
self.offset = struct.unpack("I", file.read(4))[0]
self.item = []
self.len = 0 # the length of the item
def addstr(self, str): # return index of the string, I put it on the last position simply
if self.type == 0x2002:
strdata = StringData(None, 2) # new a empty class
strdata.addstr(str)
self.item.append(strdata)
self.size += 1
return strdata
else:
print("error in add string")
return None
def addstrID(self, strdata):
if self.type == 1:
stringid = DexStringID(None, 2)
stringid.addstrID(strdata)
self.item.append(stringid)
self.size += 1
else:
print("error in add string id")
def addtypeID(self, field):
if self.type == 4:
self.item.append(field)
self.size += 1
else:
print("error in add type id")
def addclassdata(self, classdata):
if self.type == 0x2000:
self.item.append(classdata)
self.size += 1
else:
print("error in add class data")
def addtypeid(self, index, str):
if self.type == 2:
type = DexTypeID(None, None, 2)
type.addtype(index, str)
self.item.append(type)
self.size += 1
else:
print("error in add type id")
def addmethodid(self, class_idx, proto_idx, name_idx):
method = DexMethodId(None, None, None, None, 2)
method.addmethod(class_idx, proto_idx, name_idx)
print("add method id", proto_idx)
self.item.append(method)
self.size += 1
def addclassdef(self, classdef):
if self.type == 6:
self.item.append(classdef)
self.size += 1
else:
print("error in add class def")
def addprotoid(self, short_idx, type_idx, paramref):
if self.type == 3:
proto = DexProtoId(None, None, None, 2)
proto.addproto(short_idx, type_idx, paramref)
self.item.append(proto)
self.size += 1
else:
print("error in add proto id")
def addtypelist(self, typeitem):
if self.type == 0x1001:
self.item.append(typeitem)
self.size += 1
else:
print("error in add type list")
def addcodeitem(self, codeitem):
if self.type == 0x2001:
self.item.append(codeitem)
self.size += 1
else:
print("error in add code item")
def adddebugitem(self, debugitem):
if self.type == 0x2003:
self.item.append(debugitem)
self.size += 1
else:
print("error in add debug item")
def copytofile(self, file):
file.seek(self.offset, 0)
if self.type <= 0x2006:
align = file.tell() % 4
if align != 0:
for i in range(0, 4-align):
file.write(struct.pack("B", 0))
print("copytofile:", DexMapItem.Constant[self.type], file.tell())
for i in range(0, self.size):
self.item[i].copytofile(file)
# if self.type == 0x2002:
# print("for debug", i, getstr(self.item[i].str))
def printf(self, index):
print ("type: ", DexMapItem.Constant[self.type])
print ("size: ", self.size)
print ("offset: ", self.offset)
if self.type == index:
for i in range(0, self.size):
self.item[i].printf()
print ()
def setitem(self, file, dexmapitem):
file.seek(self.offset)
for i in range(0, self.size):
if self.type == 1: # string
file.seek(self.offset+i*4, 0)
self.item.append(DexStringID(file))
elif self.type == 2:
file.seek(self.offset+i*4, 0)
self.item.append(DexTypeID(file, dexmapitem[1].item)) # make sure has already build string table
elif self.type == 3:
file.seek(self.offset+i*12, 0)
self.item.append(DexProtoId(file, dexmapitem[1].item, dexmapitem[2].item))
elif self.type == 4:
file.seek(self.offset+i*8, 0)
self.item.append(DexFieldId(file, dexmapitem[1].item, dexmapitem[2].item))
elif self.type == 5:
file.seek(self.offset+i*8, 0)
self.item.append(DexMethodId(file, dexmapitem[1].item, dexmapitem[2].item, dexmapitem[3].item))
elif self.type == 6:
file.seek(self.offset+i*32, 0)
self.item.append(DexClassDef(file, dexmapitem[1].item, dexmapitem[2].item))
elif self.type == 0x1001: # TYPE_TYPE_LIST
self.item.append(TypeItem(file, dexmapitem[2].item))
elif self.type == 0x1002: # TYPE_ANNOTATION_SET_REF_LIST
self.item.append(AnnotationsetrefList(file))
elif self.type == 0x1003: # TYPE_ANNOTATION_SET_ITEM
self.item.append(AnnotationsetItem(file))
elif self.type == 0x2000: # TYPE_CLASS_DATA_ITEM
self.item.append(ClassdataItem(file))
elif self.type == 0x2001: # TYPE_CODE_ITEM
self.item.append(CodeItem(file))
elif self.type == 0x2002: # TYPE_STRING_DATA_ITEM
self.item.append(StringData(file))
elif self.type == 0x2003: # TYPE_DEBUG_INFO_ITEM
self.item.append(DebugInfo(file))
elif self.type == 0x2004: # TYPE_ANNOTATION_ITEM
self.item.append(AnnotationItem(file))
elif self.type == 0x2005: # TYPE_ENCODED_ARRAY_ITEM
self.item.append(EncodedArrayItem(file))
elif self.type == 0x2006: # TYPE_ANNOTATIONS_DIRECTORY_ITEM
self.item.append(AnnotationsDirItem(file))
def makeoffset(self, off):
if self.type < 0x2000 or self.type == 0x2001 or self.type == 0x2006:
align = off % 4
if align != 0:
off += (4 - align)
self.offset = off
if self.type == 0: # header
self.len = 112
elif self.type == 1: # string id
self.len = 4 * self.size
elif self.type == 2: # type id
self.len = 4 * self.size
elif self.type == 3: # proto id
self.len = 12 * self.size
elif self.type == 4: # field id
self.len = 8 * self.size
elif self.type == 5: # method id
self.len = 8 * self.size
elif self.type == 6: # class def
self.len = 32 * self.size
elif self.type == 0x1000: # map list, resolve specially in dexmaplist class
pass
elif 0x1001 <= self.type <= 0x2006: # type list, annotation ref set list, annotation set item...
for i in range(0, self.size):
off = self.item[i].makeoffset(off)
# if self.type == 0x2002:
# print("for debug", i, off)
self.len = off - self.offset
if self.type == 0x2000:
print("the off is:", off)
if self.type <= 6:
return off + self.len
else:
return off
def getref(self, dexmaplist):
for i in range(0, self.size):
self.item[i].getreference(dexmaplist)
def getreference(self, addr):
if addr == 0:
return None
i = 0
for i in range(0, self.size):
if self.item[i].start == addr:
return self.item[i]
if i >= self.size:
os._exit(addr)
return None
def getrefbystr(self, str): # for modify the string data
if self.type == 0x2002:
for i in range(0, self.size):
if getstr(self.item[i].str) == str:
return self.item[i]
else:
print("error occur here", self.type)
return None
def getindexbyname(self, str): # search for type id item
for i in range(0, self.size):
if self.item[i].str == str:
print("find index of", DexMapItem.Constant[self.type], str)
return i
print("did not find it in", DexMapItem.Constant[self.type])
return -1
def getindexbyproto(self, short_idx, return_type_idx, param_list, length): # called by item, index of 3
for i in range(0, self.size):
if short_idx == self.item[i].shortyIdx and return_type_idx == self.item[i].returnTypeIdx:
if self.item[i].ref is not None:
if self.item[i].ref.equal(param_list, length):
return i
return -1
class DexMapList:
Seq = (0, 1, 2, 3, 4, 5, 6, 0x1000, 0x1001, 0x1002, 0x1003, 0x2001, 0x2000, 0x2002,
0x2003, 0x2004, 0x2005, 0x2006)
def __init__(self, file, offset):
file.seek(offset, 0)
self.start = offset
self.size = struct.unpack("I", file.read(4))[0]
mapitem = []
self.dexmapitem = {}
for i in range(0, self.size):
mapitem.append(DexMapItem(file))
for i in range(0, self.size):
mapitem[i].setitem(file, self.dexmapitem)
self.dexmapitem[mapitem[i].type] = mapitem[i]
def copy(self, file):
for i in range(0, len(DexMapList.Seq)):
index = DexMapList.Seq[i]
if index in self.dexmapitem.keys():
print(index, "start at:", file.tell())
if index != 0x1000:
self.dexmapitem[index].copytofile(file)
else:
self.copytofile(file)
def copytofile(self, file):
print("output map list", file.tell())
file.seek(self.start, 0)
file.write(struct.pack("I", self.size))
for i in range(0, len(DexMapList.Seq)):
index = DexMapList.Seq[i]
if index in self.dexmapitem.keys():
# print(self.dexmapitem[index].type)
file.write(struct.pack("H", self.dexmapitem[index].type))
file.write(struct.pack("H", self.dexmapitem[index].unused))
file.write(struct.pack("I", self.dexmapitem[index].size))
file.write(struct.pack("I", self.dexmapitem[index].offset))
def makeoff(self):
off = 0
for i in range(0, len(DexMapList.Seq)):
index = DexMapList.Seq[i]
if index in self.dexmapitem.keys():
align = off % 4
if align != 0:
off += (4 - align)
if index != 0x1000:
off = self.dexmapitem[index].makeoffset(off)
else:
off = self.makeoffset(off)
return off
def makeoffset(self, off):
self.start = off
off += (4 + self.size * 12)
self.dexmapitem[0x1000].offset = self.start
return off
def getreference(self):
self.dexmapitem[1].getref(self.dexmapitem)
self.dexmapitem[3].getref(self.dexmapitem)
self.dexmapitem[6].getref(self.dexmapitem)
if 0x1002 in self.dexmapitem.keys():
self.dexmapitem[0x1002].getref(self.dexmapitem)
if 0x1003 in self.dexmapitem.keys():
self.dexmapitem[0x1003].getref(self.dexmapitem)
self.dexmapitem[0x2000].getref(self.dexmapitem)
self.dexmapitem[0x2001].getref(self.dexmapitem)
if 0x2006 in self.dexmapitem.keys():
self.dexmapitem[0x2006].getref(self.dexmapitem)
def getrefbystr(self, str):
return self.dexmapitem[0x2002].getrefbystr(str)
def printf(self, index):
print ("DexMapList:")
print ("size: ", self.size)
for i in self.dexmapitem:
self.dexmapitem[i].printf(index)
# default: 0 create from file 1 create from memory
class DexFile:
def __init__(self, filename, mode=0):
if mode == 0:
file = open(filename, 'rb')
self.dexheader = DexHeader(file)
self.dexmaplist = DexMapList(file, self.dexheader.map_off)
self.dexmaplist.dexmapitem[0].item.append(self.dexheader)
self.dexmaplist.getreference()
file.close()
def copytofile(self, filename):
if os.path.exists(filename):
os.remove(filename)
file = open(filename, 'wb+')
file.seek(0, 0)
self.makeoffset()
self.dexmaplist.copy(file)
rest = self.dexheader.file_size -file.tell()
for i in range(0, rest):
file.write(struct.pack("B", 0))
file_sha = get_file_sha1(file)
tmp = bytes(file_sha)
i = 0
file.seek(12)
while i < 40:
num = (ACSII[tmp[i]] << 4) + ACSII[tmp[i+1]]
file.write(struct.pack("B", num))
i += 2
csum = checksum(file, self.dexheader.file_size)
print("checksum:", hex(csum), "file size:", self.dexheader.file_size)
file.seek(8)
file.write(struct.pack("I", csum))
file.close()
def printf(self, index):
if index == 0:
self.dexheader.printf()
else:
self.dexmaplist.printf(index)
def printclasscode(self, class_name, method_name):
index = self.dexmaplist.dexmapitem[2].getindexbyname(class_name)
if index < 0:
print("did not find the class", class_name)
return
count = self.dexmaplist.dexmapitem[6].size
classcoderef = None
for i in range(0, count):
if self.dexmaplist.dexmapitem[6].item[i].classIdx == index:
print("the class def index is :", i)
self.dexmaplist.dexmapitem[6].item[i].printf()
classdataref = self.dexmaplist.dexmapitem[6].item[i].classDataRef
flag = False
if classdataref is not None:
for i in range(0, classdataref.direct_methods_size):
methodref = self.dexmaplist.dexmapitem[5].item[classdataref.direct_methods[i].method_idx]
print(methodref.name, classdataref.direct_methods[i].method_idx)
if methodref.name == method_name:
print("find the direct method:", methodref.classstr, methodref.name,
classdataref.direct_methods[i].access_flags, classdataref.direct_methods[i].code_off)
classcoderef = classdataref.direct_methods[i].coderef
if classcoderef is not None:
classcoderef.printf()
else:
print("the code item is None")
flag = True
break
if flag:
break
print("did not find the direct method")
for j in range(0, classdataref.virtual_methods_size):
methodref = self.dexmaplist.dexmapitem[5].item[classdataref.virtual_methods[j].method_idx]
print(methodref.name)
if methodref.name == method_name:
print("find the virtual method:", methodref.classstr, methodref.name,
classdataref.virtual_methods[j].access_flags, classdataref.virtual_methods[j].code_off)
classcoderef = classdataref.virtual_methods[j].coderef
classcoderef.printf()
flag = True
break
if flag is False:
print("did not find the virtual method")
# if flag: # find the class data item, now get and print the code item
# classcoderef.printf()
# print("print done")
# else:
# print("sonething wrong here")
# with open(method_name, "wb") as file:
# classcoderef.copytofile(file)
# file.close()
break
if classcoderef is not None:
classcoderef.printf()
def makeoffset(self):
off = self.dexmaplist.makeoff()
align = off % 4
if align != 0:
off += (4 - align)
self.dexheader.makeoffset(self.dexmaplist.dexmapitem)
self.dexheader.file_size = off
self.dexheader.data_size = off - self.dexheader.map_off
def modifystr(self, src, dst):
strData = self.dexmaplist.getrefbystr(src)
if strData is not None:
print("find string", src)
strData.modify(dst)
def addstr(self, str):
strdata = self.dexmaplist.dexmapitem[0x2002].addstr(str)
strdata.printf()
self.dexmaplist.dexmapitem[1].addstrID(strdata)
return self.dexmaplist.dexmapitem[1].size-1 # return the index of the str
def addtype(self, str):
index = self.addstr(str)
self.dexmaplist.dexmapitem[2].addtypeid(index, str)
return self.dexmaplist.dexmapitem[2].size-1
def addfield(self, classidx, type_str, name_str):
field = DexFieldId(None, None, None, 2)
str_idx = self.dexmaplist.dexmapitem[1].getindexbyname(name_str)
if str_idx < 0:
str_idx = self.addstr(name_str)
if type_str in TypeDescriptor.keys(): # transform the type str to type descriptor
type_str = TypeDescriptor[type_str]
type_idx = self.dexmaplist.dexmapitem[2].getindexbyname(type_str)
if type_idx < 0:
print("did not find this type in type ids", type_str)
type_idx = self.addtype(type_str)
field.addfield(classidx, type_idx, str_idx)
self.dexmaplist.dexmapitem[4].addtypeID(field)
return self.dexmaplist.dexmapitem[4].size-1
# classtype: Lcom/cc/test/Dexparse;
def addclass(self, classtype, accessflag, superclass, sourcefile):
item = DexClassDef(None, None, None, 2)
strdata = self.dexmaplist.getrefbystr(classtype)
if strdata is not None:
print("This class is existing", classtype)
return
type_index = self.addtype(classtype)
super_index = self.dexmaplist.dexmapitem[2].getindexbyname(superclass)
if super_index < 0: # did not find it
print("This super class is not exiting", superclass)
return
source_index = self.dexmaplist.dexmapitem[1].getindexbyname(sourcefile)
if source_index < 0:
source_index = self.addstr(sourcefile)
item.addclassdef(type_index, accessflag, super_index, source_index)
self.dexmaplist.dexmapitem[6].addclassdef(item)
return item
def addclassData(self, classdataref):
self.dexmaplist.dexmapitem[0x2000].addclassdata(classdataref)
# add proto id and return the index,
# if already exist just return the index
def addproto(self, proto_list, return_str):
size = len(proto_list)
proto = ""
if return_str in ShortyDescriptor.keys():
proto += ShortyDescriptor[return_str]
else:
proto += "L"
for i in range(0, size):
str = proto_list[i]
if str in ShortyDescriptor.keys():
proto += ShortyDescriptor[str]
else:
proto += 'L' # for reference of class or array
short_idx = self.dexmaplist.dexmapitem[1].getindexbyname(proto)
if short_idx < 0:
print("did not find this string in string ids", proto)
short_idx = self.addstr(proto)
if return_str in TypeDescriptor.keys(): # transform to type descriptor
return_str = TypeDescriptor[return_str]
type_idx = self.dexmaplist.dexmapitem[2].getindexbyname(return_str)
if type_idx < 0:
print("did not find this type in type ids", return_str)
type_idx = self.addtype(return_str)
proto_idx = self.dexmaplist.dexmapitem[3].getindexbyproto(short_idx, type_idx, proto_list, size)
if proto_idx >= 0:
return proto_idx
typeItem = TypeItem(None, None, 2)
type_list = []
str_list = []
for i in range(0, size):
type_str = proto_list[i]
if type_str in TypeDescriptor.keys():
type_str = TypeDescriptor[type_str]
type_index = self.dexmaplist.dexmapitem[2].getindexbyname(type_str)
if type_index < 0:
print("did not find this param in type ids", type_str)
type_index = self.addtype(type_str)
type_list.append(type_index)
str_list.append(type_str)
typeItem.addtypeItem(type_list, str_list)
self.dexmaplist.dexmapitem[0x1001].addtypelist(typeItem)
self.dexmaplist.dexmapitem[3].addprotoid(short_idx, type_idx, typeItem)
return self.dexmaplist.dexmapitem[3].size-1
def addmethod(self, class_idx, proto_list, return_str, name):
name_idx = self.dexmaplist.dexmapitem[1].getindexbyname(name)
if name_idx < 0:
name_idx = self.addstr(name)
self.dexmaplist.dexmapitem[5].addmethodid(class_idx, self.addproto(proto_list, return_str), name_idx)
return self.dexmaplist.dexmapitem[5].size-1
def addcode(self, ref):
self.dexmaplist.dexmapitem[0x2001].addcodeitem(ref)
def adddebug(self, debugitem):
self.dexmaplist.dexmapitem[0x2003].adddebugitem(debugitem)
def getmethodItem(self, class_name, method_name):
index = self.dexmaplist.dexmapitem[2].getindexbyname(class_name)
if index < 0:
print("did not find the class", class_name)
return
else:
print("find the class, index is :", index)
count = self.dexmaplist.dexmapitem[6].size
encoded_method = None
method_idx = 0
def_idx = 0
for i in range(0, count):
if self.dexmaplist.dexmapitem[6].item[i].classIdx == index:
def_idx = i
self.dexmaplist.dexmapitem[6].item[i].printf()
classdataref = self.dexmaplist.dexmapitem[6].item[i].classDataRef
flag = False
if classdataref is not None:
for i in range(0, classdataref.direct_methods_size):
methodref = self.dexmaplist.dexmapitem[5].item[classdataref.direct_methods[i].method_idx]
print(methodref.name, classdataref.direct_methods[i].method_idx)
if methodref.name == method_name:
print("find the direct method:", methodref.classstr, methodref.name,
classdataref.direct_methods[i].access_flags, classdataref.direct_methods[i].code_off)
encoded_method = classdataref.direct_methods[i]
method_idx = classdataref.direct_methods[i].method_idx
flag = True
break
if flag:
break
print("did not find the direct method")
for j in range(0, classdataref.virtual_methods_size):
methodref = self.dexmaplist.dexmapitem[5].item[classdataref.virtual_methods[j].method_idx]
print(methodref.name)
if methodref.name == method_name:
print("find the virtual method:", methodref.classstr, methodref.name,
classdataref.virtual_methods[j].access_flags, classdataref.virtual_methods[j].code_off)
encoded_method = classdataref.virtual_methods[j]
method_idx = classdataref.virtual_methods[j].method_idx
flag = True
break
if flag is False:
print("did not find the virtual method")
break
return {"method": encoded_method, "classidx": index, "methodidx": method_idx, "defidx": def_idx}
def verifyclass(self, def_idx):
classdef = self.dexmaplist.dexmapitem[6].item[def_idx]
classdef.accessFlags |= 0x00010000
def gettypeid(self, type):
return self.dexmaplist.dexmapitem[2].getindexbyname(type)
def jiaguAll(dexfile, outfile):
method_list = [] # record all method need to protect
tmp_method = dexfile.getmethodItem("Lcom/cc/test/MainActivity;", "onCreate")
method_list.append({"access": tmp_method["method"].access_flags, "ref": tmp_method["method"].coderef,
"classidx": tmp_method["classidx"], "methodidx": tmp_method["methodidx"]})
tmp_method["method"].access_flags = int(Access_Flag['native'] | Access_Flag['public'])
tmp_method["method"].modified = 1
# change the access flag, make it native
dexfile.makeoffset() # make offset
if os.path.exists(outfile): # if exists, delete it
print("the file is exist, just replace it")
os.remove(outfile)
file = open(outfile, 'wb+')
file.seek(0, 0)
size = len(method_list)
filesize = dexfile.dexheader.file_size # in order to adjust the dex file
dexfile.dexheader.file_size += 16 * size # each injected data need 16 bytes
dexfile.dexmaplist.copy(file)
file.seek(filesize, 0)
print("file size :", filesize, " size : ", size)
for i in range(0, size):
file.write(struct.pack("I", method_list[i]["classidx"]))
file.write(struct.pack("I", method_list[i]["methodidx"]))
file.write(struct.pack("I", method_list[i]["access"]))
file.write(struct.pack("I", method_list[i]["ref"].start))
print("inject data :", method_list[i]["classidx"], method_list[i]["methodidx"])
# assume that the code ref is not None, otherwise it make no sense(no need to protect)
file_sha = get_file_sha1(file)
tmp = bytes(file_sha)
i = 0
file.seek(12)
while i < 40:
num = (ACSII[tmp[i]] << 4) + ACSII[tmp[i+1]]
file.write(struct.pack("B", num))
i += 2
csum = checksum(file, dexfile.dexheader.file_size)
print("checksum:", hex(csum), "file size:", dexfile.dexheader.file_size)
file.seek(8)
file.write(struct.pack("I", csum))
file.close()
if __name__ == '__main__':
dexfile = DexFile("classes.dex")
# jiaguAll(dexfile, "classescp.dex")
# dexfile.printclasscode("Lcom/cc/test/MainActivity;", "onCreate")
# dexfile.printf(3)
# dexfile.addstr("DexParse.java")
# dexfile.addstr("Lcom/cc/test/DexParse.java")
# dexfile.modifystr("A Text From CwT", "A Text From DexParse")
# dexfile.printf()
# note: you need to delete file classescp.dex first, otherwise
# new dex file will append the old one
# dexfile.copytofile("classescp.dex")
| 40.958851
| 124
| 0.551744
| 11,335
| 95,557
| 4.548655
| 0.04914
| 0.019298
| 0.039566
| 0.050117
| 0.612347
| 0.546985
| 0.494424
| 0.455827
| 0.402335
| 0.355728
| 0
| 0.034288
| 0.329154
| 95,557
| 2,332
| 125
| 40.976415
| 0.770014
| 0.050891
| 0
| 0.509554
| 0
| 0
| 0.041912
| 0.001973
| 0
| 0
| 0.010353
| 0
| 0
| 1
| 0.099951
| false
| 0.00049
| 0.00196
| 0.00245
| 0.154826
| 0.092602
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd7312c0409e17edc8a594caad14c3eebd8edb1f
| 5,344
|
py
|
Python
|
cookie.py
|
cppchriscpp/fortune-cookie
|
46e433e1ae06a8ad742b252d642f8620bde9e38b
|
[
"MIT"
] | null | null | null |
cookie.py
|
cppchriscpp/fortune-cookie
|
46e433e1ae06a8ad742b252d642f8620bde9e38b
|
[
"MIT"
] | null | null | null |
cookie.py
|
cppchriscpp/fortune-cookie
|
46e433e1ae06a8ad742b252d642f8620bde9e38b
|
[
"MIT"
] | null | null | null |
import markovify
import re
import nltk
import os
import urllib.request
from shutil import copyfile
# We need a temporary(ish) place to store the data we retrieve.
# If you are running this in a docker container you may want to mount a volume and use it.
# Also be sure to make a symlink between it and the assets directory. See our dockerfile for an example!
datadir = "./web/assets/data"
if 'DATA_DIR' in os.environ:
datadir = os.environ['DATA_DIR']
if not os.path.exists(datadir):
os.mkdir(datadir)
# Basically the example from the markovify documentation that uses parts of speech and stuff to make better sentences
class POSifiedText(markovify.Text):
def word_split(self, sentence):
words = re.split(self.word_split_pattern, sentence)
words = [ "::".join(tag) for tag in nltk.pos_tag(words) ]
return words
def word_join(self, words):
sentence = " ".join(word.split("::")[0] for word in words)
return sentence
# Grab a list of fortunes from Github
if not os.path.exists(datadir+"/cookie.txt"):
urllib.request.urlretrieve("https://raw.githubusercontent.com/ianli/fortune-cookies-galore/master/fortunes.txt", datadir+"/cookie.txt")
# Grab the US constitution raw text
if not os.path.exists(datadir+'/const.txt'):
urllib.request.urlretrieve("https://www.usconstitution.net/const.txt", datadir+"/const.txt")
if not os.path.exists(datadir+'/tweeter.txt'):
urllib.request.urlretrieve("https://raw.githubusercontent.com/ElDeveloper/tweets/master/tweets_text.txt", datadir+"/tweeter.txt")
# Read both files into variables
with open(datadir+"/cookie.txt") as f:
text = f.read()
with open(datadir+'/const.txt') as f:
tswext = f.read()
with open(datadir+"/tweeter.txt") as f:
tweetext = f.read()
# Break up the text to make it more workable
cookie_text_split = text.split("\n")
const_text_split = tswext.split("\n")
tweet_text_split = tweetext.split("\n")
# Some cleanup to remove things in the fortune cookie file that aren't really fortunes.
# (There are some odd facts and quotes in here. This is a bit barbaric, but this is a fun project anyway! No need for perfection...)
def excluded(string):
if string.startswith("Q:"):
return False
if "\"" in string:
return False
if "--" in string:
return False
return True
# Same thing for the constitution text - this just removes the comment at the top.
def exwifted(string):
if "[" in string:
return False
return True
# Apply the cleanups from above
cookie_text_split[:] = [x for x in cookie_text_split if excluded(x)]
const_text_split[:] = [x for x in const_text_split if exwifted(x)]
# Merge the text back into one big blob like markovify expects. (There's probably a better way to do this, but again, fun project. Efficiency's not that important...
cookie_text_model = POSifiedText("\n".join(cookie_text_split))
const_text_model = POSifiedText("\n".join(const_text_split))
tweet_text_model = POSifiedText("\n".join(tweet_text_split))
# Combine them into a terrifying structure
const_and_cookie_model = markovify.combine([cookie_text_model, const_text_model])
tweet_and_cookie_model = markovify.combine([cookie_text_model, tweet_text_model], [4, 1])
everything_model = markovify.combine([cookie_text_model, const_text_model, tweet_text_model], [4, 1, 1])
# Print a couple lines to the terminal to show that everything's working...
print("Examples:")
for i in range(5):
print(const_and_cookie_model.make_short_sentence(240, tries=25))
# Now, open a temporary file and write some javascript surrounding our story.
with open(datadir+"/cookie.js.new", "w+") as file:
# NOTE: I don't escape anything here... with bad seed text it'd be quite possible to inject weird js, etc.
file.write("window.fortuneCookies=[\n")
print("Running cookie")
# Write 100 lines of junk into the js file. Note that leaving the closing comma is ok, as javascript doesn't care.
for i in range(250):
file.write("\"" + cookie_text_model.make_short_sentence(240, tries=25) + "\",\n")
# Close it up!
file.write("];")
print("Running const + cookie")
file.write("window.constCookies=[\n")
for i in range(250):
file.write("\"" + const_and_cookie_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running const only")
file.write("window.constLines=[\n")
for i in range(250):
file.write("\"" + const_text_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running tweet only")
file.write("window.tweetLines=[\n")
for i in range(250):
file.write("\"" + tweet_text_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running tweet cookie")
file.write("window.tweetCookie=[\n")
for i in range(250):
file.write("\"" + tweet_and_cookie_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running everything")
file.write("window.everythingCookie=[\n")
for i in range(250):
file.write("\"" + everything_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
# Finally, copy our temp file over the old one, so clients can start seeing it.
copyfile(datadir+"/cookie.js.new", datadir+"/cookie.js")
| 36.60274
| 165
| 0.698915
| 804
| 5,344
| 4.539801
| 0.309701
| 0.044384
| 0.011507
| 0.021096
| 0.340822
| 0.291233
| 0.256164
| 0.216712
| 0.165205
| 0.124658
| 0
| 0.014218
| 0.170846
| 5,344
| 145
| 166
| 36.855172
| 0.809524
| 0.281811
| 0
| 0.202247
| 0
| 0.011236
| 0.272727
| 0.10427
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044944
| false
| 0
| 0.067416
| 0
| 0.213483
| 0.089888
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd78ccdbc7f44ee790bb4e0e5bb66afdadb94039
| 3,329
|
py
|
Python
|
2021/05_2/solution.py
|
budavariam/advent_of_code
|
0903bcbb0df46371b6a340ca2be007dce6470c66
|
[
"MIT"
] | null | null | null |
2021/05_2/solution.py
|
budavariam/advent_of_code
|
0903bcbb0df46371b6a340ca2be007dce6470c66
|
[
"MIT"
] | null | null | null |
2021/05_2/solution.py
|
budavariam/advent_of_code
|
0903bcbb0df46371b6a340ca2be007dce6470c66
|
[
"MIT"
] | 1
|
2022-02-11T13:14:50.000Z
|
2022-02-11T13:14:50.000Z
|
""" Advent of code 2021 day 05 / 2 """
import math
from os import path
import re
from collections import Counter
class Code(object):
def __init__(self, lines):
self.lines = lines
def printmap(self, dim, minx, miny, maxx, maxy):
for i in range(miny, maxy + 1):
ln = ""
for j in range(minx, maxx+1):
pos = f"{i}-{j}"
ln += str(dim.get(pos)) if dim.get(pos) is not None else '.'
print(ln)
print(dim)
def solve(self):
# print(self.lines)
minx, miny, maxx, maxy = 0, 0, 0, 0
dim = {}
cnt = 0
xa, xb, ya, yb = -1, -1, -1, -1
for line in self.lines:
x1, y1, x2, y2 = line
xa, xb = sorted([x1, x2])
ya, yb = sorted([y1, y2])
minx = min(minx, xa)
miny = min(miny, ya)
maxx = max(maxx, xb)
maxy = max(maxy, yb)
if x1 == x2:
# print("hor", y1, x1, y2, x2, ya, xa, yb, xb)
for i in range(ya, yb+1):
pos = f"{i}-{x1}"
if dim.get(pos) is not None:
dim[pos] += 1
else:
dim[pos] = 1
elif y1 == y2:
# print("vert", y1, x1, y2, x2, ya, xa, yb, xb)
for i in range(xa, xb+1):
pos = f"{y1}-{i}"
if dim.get(pos) is not None:
dim[pos] += 1
else:
dim[pos] = 1
else:
# print("diag", y1, x1, y2, x2, ya, xa, yb, xb)
if x1 < x2:
for i, x in enumerate(range(x1, x2+1)):
if y1 < y2:
pos = f"{y1+i}-{x}"
else:
pos = f"{y1-i}-{x}"
if dim.get(pos) is not None:
dim[pos] += 1
else:
dim[pos] = 1
else:
for i, x in enumerate(range(x2, x1+1)):
if y1 < y2:
pos = f"{y2-i}-{x}"
else:
pos = f"{y2+i}-{x}"
if dim.get(pos) is not None:
dim[pos] += 1
else:
dim[pos] = 1
# self.printmap(dim, minx, miny, maxx, maxy)
for i in dim.values():
if i > 1:
cnt += 1
return cnt
def preprocess(raw_data):
pattern = re.compile(r'(\d+),(\d+) -> (\d+),(\d+)')
processed_data = []
for line in raw_data.split("\n"):
match = re.match(pattern, line)
data = [int(match.group(1)), int(match.group(2)),
int(match.group(3)), int(match.group(4))]
# data = line
processed_data.append(data)
return processed_data
def solution(data):
""" Solution to the problem """
lines = preprocess(data)
solver = Code(lines)
return solver.solve()
if __name__ == "__main__":
with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file:
print(solution(input_file.read()))
| 31.40566
| 82
| 0.393812
| 407
| 3,329
| 3.164619
| 0.243243
| 0.037267
| 0.043478
| 0.051242
| 0.307453
| 0.285714
| 0.236025
| 0.220497
| 0.169255
| 0.169255
| 0
| 0.043231
| 0.471913
| 3,329
| 105
| 83
| 31.704762
| 0.68942
| 0.080204
| 0
| 0.26506
| 0
| 0
| 0.036137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060241
| false
| 0
| 0.048193
| 0
| 0.156627
| 0.048193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd7b422625225dcfe35545919a8429eaaa584545
| 378
|
py
|
Python
|
Qualification/1-ForegoneSolution/Solution.py
|
n1try/codejam-2019
|
3cedc74915eca7384adaf8f6a68eeb21ada1beaf
|
[
"MIT"
] | null | null | null |
Qualification/1-ForegoneSolution/Solution.py
|
n1try/codejam-2019
|
3cedc74915eca7384adaf8f6a68eeb21ada1beaf
|
[
"MIT"
] | null | null | null |
Qualification/1-ForegoneSolution/Solution.py
|
n1try/codejam-2019
|
3cedc74915eca7384adaf8f6a68eeb21ada1beaf
|
[
"MIT"
] | null | null | null |
import re
t = int(input())
for i in range(0, t):
chars = input()
m1, m2 = [None] * len(chars), [None] * len(chars)
for j in range(0, len(chars)):
m1[j] = "3" if chars[j] == "4" else chars[j]
m2[j] = "1" if chars[j] == "4" else "0"
s1 = ''.join(m1)
s2 = ''.join(m2)
print("Case #{}: {} {}".format(i + 1, s1, re.sub(r'^0*', '', s2)))
| 29.076923
| 70
| 0.457672
| 64
| 378
| 2.703125
| 0.453125
| 0.138728
| 0.092486
| 0.104046
| 0.150289
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070111
| 0.283069
| 378
| 13
| 70
| 29.076923
| 0.568266
| 0
| 0
| 0
| 0
| 0
| 0.060686
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd7bd590362f7ad441cb4aaacc481be5a9c4d64c
| 1,645
|
py
|
Python
|
1.imdbData.py
|
batucimenn/imdbScraperOnWaybackMachine2
|
e6d92b5c794a2603a05e986b587a796d2a80fd8d
|
[
"MIT"
] | null | null | null |
1.imdbData.py
|
batucimenn/imdbScraperOnWaybackMachine2
|
e6d92b5c794a2603a05e986b587a796d2a80fd8d
|
[
"MIT"
] | null | null | null |
1.imdbData.py
|
batucimenn/imdbScraperOnWaybackMachine2
|
e6d92b5c794a2603a05e986b587a796d2a80fd8d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# Scraper movies data from Imdb
# In[ ]:
import csv
import pandas as pd
# Year range to collect data.
# In[ ]:
startYear=int(input("startYear: "))
finishYear=int(input("finishYear: "))
# File path to save. Ex: C:\Users\User\Desktop\newFile
# In[ ]:
filePath = input("File path: "+"r'")+("/")
# Create csv and set the titles.
# In[ ]:
with open(filePath+str(startYear)+"_"+str(finishYear)+".csv", mode='w', newline='') as yeni_dosya:
yeni_yazici = csv.writer(yeni_dosya, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
yeni_yazici.writerow(['Title'+";"+'Film'+";"+'Year'])
yeni_dosya.close()
# Download title.basics.tsv.gz from https://datasets.imdbws.com/. Extract data.tsv, print it into csv.
# In[ ]:
with open("data.tsv",encoding="utf8") as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter="\t")
for line in tsvreader:
try:
ceviri=int(line[5])
if(ceviri>=startYear and ceviri<=finishYear and (line[1]=="movie" or line[1]=="tvMovie")):
print(line[0]+";"+line[3]+";"+line[5]+";"+line[1])
line0=line[0].replace("\"","")
line5=line[5].replace("\"","")
with open(filePath+str(startYear)+"_"+str(finishYear)+".csv", mode='a', newline='') as yeni_dosya:
yeni_yazici = csv.writer(yeni_dosya, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
yeni_yazici.writerow([line0+";"+line[3]+";"+line5])
yeni_dosya.close()
except:
pass
| 26.532258
| 117
| 0.570821
| 201
| 1,645
| 4.60199
| 0.482587
| 0.058378
| 0.021622
| 0.041081
| 0.328649
| 0.328649
| 0.328649
| 0.328649
| 0.328649
| 0.224865
| 0
| 0.01279
| 0.239514
| 1,645
| 61
| 118
| 26.967213
| 0.726619
| 0.189666
| 0
| 0.166667
| 0
| 0
| 0.07803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.041667
| 0.083333
| 0
| 0.083333
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd7c26cf48ae51b52e75c459ca5537852b6f4936
| 2,680
|
py
|
Python
|
effective_python/metaclass_property/descriptor_demo.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 1
|
2018-12-19T22:07:56.000Z
|
2018-12-19T22:07:56.000Z
|
effective_python/metaclass_property/descriptor_demo.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 12
|
2020-03-14T05:32:26.000Z
|
2022-03-12T00:08:49.000Z
|
effective_python/metaclass_property/descriptor_demo.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 1
|
2018-12-19T22:08:00.000Z
|
2018-12-19T22:08:00.000Z
|
"""
@author: magician
@file: descriptor_demo.py
@date: 2020/1/14
"""
from weakref import WeakKeyDictionary
class Homework(object):
"""
Homework
"""
def __init__(self):
self._grade = 0
@property
def grade(self):
return self._grade
@grade.setter
def grade(self, value):
if not(0 <= value <= 100):
raise ValueError('Grade must be between 0 and 120')
self._grade = value
# class Exam(object):
# """
# Exam
# """
# def __init__(self):
# self._writing_grade = 0
# self._math_grade = 0
#
# @staticmethod
# def _check_grade(value):
# if not(0 <= value <= 100):
# raise ValueError('Grade must be between 0 and 100')
#
# @property
# def writing_grade(self):
# return self._writing_grade
#
# @writing_grade.setter
# def writing_grade(self, value):
# self._check_grade(value)
# self._writing_grade = value
#
# @property
# def math_grade(self):
# return self._math_grade
#
# @math_grade.setter
# def math_grade(self, value):
# self._check_grade(value)
# self._math_grade = value
class Grade(object):
"""
Grade
"""
def __init__(self):
# self._value = 0
# keep instance status
# self._values = {}
# preventing memory leaks
self._values = WeakKeyDictionary()
def __get__(self, instance, instance_type):
# return self._value
if instance is None:
return self
return self._values.get(instance, 0)
def __set__(self, instance, value):
if not (0 <= value <= 100):
raise ValueError('Grade must be between 0 and 100')
# self._value = value
self._values[instance] = value
class Exam(object):
"""
Exam
"""
math_grade = Grade()
writing_grade = Grade()
science_grade = Grade()
if __name__ == '__main__':
galileo = Homework()
galileo.grade = 95
# first_exam = Exam()
# first_exam.writing_grade = 82
# first_exam.science_grade = 99
# print('Writing', first_exam.writing_grade)
# print('Science', first_exam.science_grade)
#
# second_exam = Exam()
# second_exam.writing_grade = 75
# second_exam.science_grade = 99
# print('Second', second_exam.writing_grade, 'is right')
# print('First', first_exam.writing_grade, 'is wrong')
first_exam = Exam()
first_exam.writing_grade = 82
second_exam = Exam()
second_exam.writing_grade = 75
print('First ', first_exam.writing_grade, 'is right')
print('Second ', second_exam.writing_grade, 'is right')
| 23.304348
| 65
| 0.596642
| 313
| 2,680
| 4.805112
| 0.210863
| 0.12766
| 0.095745
| 0.069814
| 0.422872
| 0.363697
| 0.360372
| 0.316489
| 0.115691
| 0.115691
| 0
| 0.026069
| 0.284328
| 2,680
| 114
| 66
| 23.508772
| 0.758081
| 0.462313
| 0
| 0.111111
| 0
| 0
| 0.07377
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.027778
| 0.027778
| 0.444444
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd7c5d171d30796fbb3b1df9d4223d6476d4d998
| 3,584
|
py
|
Python
|
afk-q-babyai/babyai/layers/aggrerator.py
|
IouJenLiu/AFK
|
db2b47bb3a5614b61766114b87f143e4a61a4a8d
|
[
"MIT"
] | 1
|
2022-03-12T03:10:29.000Z
|
2022-03-12T03:10:29.000Z
|
afk-q-babyai/babyai/layers/aggrerator.py
|
IouJenLiu/AFK
|
db2b47bb3a5614b61766114b87f143e4a61a4a8d
|
[
"MIT"
] | null | null | null |
afk-q-babyai/babyai/layers/aggrerator.py
|
IouJenLiu/AFK
|
db2b47bb3a5614b61766114b87f143e4a61a4a8d
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import torch.nn.functional as F
def masked_softmax(x, m=None, axis=-1):
'''
x: batch x time x hid
m: batch x time (optional)
'''
x = torch.clamp(x, min=-15.0, max=15.0)
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=axis, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=axis, keepdim=True) + 1e-6)
return softmax
class ScaledDotProductAttention(torch.nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = torch.nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
attn = masked_softmax(attn, mask, 2)
__attn = self.dropout(attn)
output = torch.bmm(__attn, v)
return output, attn
class MultiHeadAttention(torch.nn.Module):
''' From Multi-Head Attention module
https://github.com/jadore801120/attention-is-all-you-need-pytorch'''
def __init__(self, block_hidden_dim, n_head, dropout=0.1, q_dim=128):
super().__init__()
self.q_dim = q_dim
self.n_head = n_head
self.block_hidden_dim = block_hidden_dim
self.w_qs = torch.nn.Linear(q_dim, n_head * block_hidden_dim, bias=False)
self.w_ks = torch.nn.Linear(block_hidden_dim, n_head * block_hidden_dim, bias=False)
self.w_vs = torch.nn.Linear(block_hidden_dim, n_head * block_hidden_dim, bias=False)
torch.nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (q_dim * 2)))
torch.nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (block_hidden_dim * 2)))
torch.nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (block_hidden_dim * 2)))
self.attention = ScaledDotProductAttention(temperature=np.power(block_hidden_dim, 0.5))
self.fc = torch.nn.Linear(n_head * block_hidden_dim, block_hidden_dim)
self.layer_norm = torch.nn.LayerNorm(self.block_hidden_dim)
torch.nn.init.xavier_normal_(self.fc.weight)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, q, mask, k, v):
# q: batch x len_q x hid
# k: batch x len_k x hid
# v: batch x len_v x hid
# mask: batch x len_q x len_k
# output: batch x len_q x hid
# attn: batch x len_q x len_k
batch_size, len_q = q.size(0), q.size(1)
len_k, len_v = k.size(1), v.size(1)
assert mask.size(1) == len_q
assert mask.size(2) == len_k
residual = q
q = self.w_qs(q).view(batch_size, len_q, self.n_head, self.block_hidden_dim)
k = self.w_ks(k).view(batch_size, len_k, self.n_head, self.block_hidden_dim)
v = self.w_vs(v).view(batch_size, len_v, self.n_head, self.block_hidden_dim)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self.block_hidden_dim) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, self.block_hidden_dim) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, self.block_hidden_dim) # (n*b) x lv x dv
mask = mask.repeat(self.n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
attn = attn.view(self.n_head, batch_size, len_q, -1)
attn = torch.mean(attn, 0) # batch x lq x lk
output = None
return output, attn
| 41.195402
| 103
| 0.624163
| 609
| 3,584
| 3.466338
| 0.180624
| 0.104216
| 0.132639
| 0.076741
| 0.409758
| 0.333965
| 0.2937
| 0.180957
| 0.147797
| 0.106585
| 0
| 0.023827
| 0.238839
| 3,584
| 87
| 104
| 41.195402
| 0.75
| 0.114676
| 0
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.083333
| false
| 0
| 0.05
| 0
| 0.216667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd8175ceff7997ec372ad498a63c3ba3b5e8e259
| 1,066
|
py
|
Python
|
tests/test_oas_cache.py
|
maykinmedia/zgw-consumers
|
9b0759d9b7c3590b245004afd4c5e5474785bf91
|
[
"MIT"
] | 2
|
2021-04-25T11:29:33.000Z
|
2022-03-08T14:06:58.000Z
|
tests/test_oas_cache.py
|
maykinmedia/zgw-consumers
|
9b0759d9b7c3590b245004afd4c5e5474785bf91
|
[
"MIT"
] | 27
|
2020-04-01T07:33:02.000Z
|
2022-03-14T09:11:05.000Z
|
tests/test_oas_cache.py
|
maykinmedia/zgw-consumers
|
9b0759d9b7c3590b245004afd4c5e5474785bf91
|
[
"MIT"
] | 2
|
2020-07-30T15:40:47.000Z
|
2020-11-30T10:56:29.000Z
|
import threading
from zds_client.oas import schema_fetcher
def test_schema_fetch_twice(oas):
schema = oas.fetch()
assert isinstance(schema, dict)
assert oas.mocker.call_count == 1
oas.fetch()
# check that the cache is used
assert oas.mocker.call_count == 1
def test_clear_caches_in_between(oas):
schema = oas.fetch()
assert isinstance(schema, dict)
assert oas.mocker.call_count == 1
schema_fetcher.cache.clear()
oas.fetch()
assert oas.mocker.call_count == 2
def test_cache_across_threads(oas):
def _target():
# disable the local python cache
schema_fetcher.cache._local_cache = {}
oas.fetch()
thread1 = threading.Thread(target=_target)
thread2 = threading.Thread(target=_target)
# start thread 1 and let it complete, this ensures the schema is stored in the
# cache
thread1.start()
thread1.join()
# start thread 2 and let it complete, we can now verify the call count
thread2.start()
thread2.join()
assert oas.mocker.call_count == 1
| 21.755102
| 82
| 0.684803
| 148
| 1,066
| 4.77027
| 0.351351
| 0.076487
| 0.106232
| 0.134561
| 0.29745
| 0.263456
| 0.192635
| 0.192635
| 0.192635
| 0.192635
| 0
| 0.015777
| 0.227017
| 1,066
| 48
| 83
| 22.208333
| 0.841019
| 0.197936
| 0
| 0.423077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd81f57132ba4b8e36862c9d9eb8179dcba9623a
| 4,165
|
py
|
Python
|
src/uproot_browser/tree.py
|
amangoel185/uproot-browser
|
8181913ac04d0318b05256923d8980d6d3acaa7f
|
[
"BSD-3-Clause"
] | 12
|
2022-03-18T11:47:26.000Z
|
2022-03-25T13:57:08.000Z
|
src/uproot_browser/tree.py
|
amangoel185/uproot-browser
|
8181913ac04d0318b05256923d8980d6d3acaa7f
|
[
"BSD-3-Clause"
] | 7
|
2022-03-18T11:40:36.000Z
|
2022-03-29T22:15:01.000Z
|
src/uproot_browser/tree.py
|
amangoel185/uproot-browser
|
8181913ac04d0318b05256923d8980d6d3acaa7f
|
[
"BSD-3-Clause"
] | 1
|
2022-03-21T14:37:07.000Z
|
2022-03-21T14:37:07.000Z
|
"""
Display tools for TTrees.
"""
from __future__ import annotations
import dataclasses
import functools
from pathlib import Path
from typing import Any, Dict
import uproot
from rich.console import Console
from rich.markup import escape
from rich.text import Text
from rich.tree import Tree
console = Console()
__all__ = ("make_tree", "process_item", "print_tree", "UprootItem", "console")
def __dir__() -> tuple[str, ...]:
return __all__
@dataclasses.dataclass
class UprootItem:
path: str
item: Any
@property
def is_dir(self) -> bool:
return isinstance(self.item, (uproot.reading.ReadOnlyDirectory, uproot.TTree))
def meta(self) -> dict[str, Any]:
return process_item(self.item)
def label(self) -> Text:
return process_item(self.item)["label"] # type: ignore[no-any-return]
@property
def children(self) -> list[UprootItem]:
if not self.is_dir:
return []
items = {key.split(";")[0] for key in self.item.keys()}
return [
UprootItem(f"{self.path}/{key}", self.item[key]) for key in sorted(items)
]
def make_tree(node: UprootItem, *, tree: Tree | None = None) -> Tree:
"""
Given an object, build a rich.tree.Tree output.
"""
if tree is None:
tree = Tree(**node.meta())
else:
tree = tree.add(**node.meta())
for child in node.children:
make_tree(child, tree=tree)
return tree
@functools.singledispatch
def process_item(uproot_object: Any) -> Dict[str, Any]:
"""
Given an unknown object, return a rich.tree.Tree output. Specialize for known objects.
"""
name = getattr(uproot_object, "name", "<unnamed>")
classname = getattr(uproot_object, "classname", uproot_object.__class__.__name__)
label = Text.assemble(
"❓ ",
(f"{name} ", "bold"),
(classname, "italic"),
)
return {"label": label}
@process_item.register
def _process_item_tfile(
uproot_object: uproot.reading.ReadOnlyDirectory,
) -> Dict[str, Any]:
"""
Given an TFile, return a rich.tree.Tree output.
"""
path = Path(uproot_object.file_path)
result = {
"label": Text.from_markup(
f":file_folder: [link file://{path}]{escape(path.name)}"
),
"guide_style": "bold bright_blue",
}
return result
@process_item.register
def _process_item_ttree(uproot_object: uproot.TTree) -> Dict[str, Any]:
"""
Given an tree, return a rich.tree.Tree output.
"""
label = Text.assemble(
"🌴 ",
(f"{uproot_object.name} ", "bold"),
f"({uproot_object.num_entries:g})",
)
result = {
"label": label,
"guide_style": "bold bright_green",
}
return result
@process_item.register
def _process_item_tbranch(uproot_object: uproot.TBranch) -> Dict[str, Any]:
"""
Given an branch, return a rich.tree.Tree output.
"""
jagged = isinstance(
uproot_object.interpretation, uproot.interpretation.jagged.AsJagged
)
icon = "🍃 " if jagged else "🍁 "
label = Text.assemble(
icon,
(f"{uproot_object.name} ", "bold"),
(f"{uproot_object.typename}", "italic"),
)
result = {"label": label}
return result
@process_item.register
def _process_item_th(uproot_object: uproot.behaviors.TH1.Histogram) -> Dict[str, Any]:
"""
Given an histogram, return a rich.tree.Tree output.
"""
icon = "📊 " if uproot_object.kind == "COUNT" else "📈 "
sizes = " × ".join(f"{len(ax)}" for ax in uproot_object.axes)
label = Text.assemble(
icon,
(f"{uproot_object.name} ", "bold"),
(f"{uproot_object.classname} ", "italic"),
f"({sizes})",
)
result = {"label": label}
return result
# pylint: disable-next=redefined-outer-name
def print_tree(entry: str, *, console: Console = console) -> None:
"""
Prints a tree given a specification string. Currently, that must be a
single filename. Colons are not allowed currently in the filename.
"""
upfile = uproot.open(entry)
tree = make_tree(UprootItem("/", upfile))
console.print(tree)
| 25.090361
| 90
| 0.623049
| 514
| 4,165
| 4.910506
| 0.27821
| 0.085578
| 0.023772
| 0.030903
| 0.251585
| 0.173138
| 0.110539
| 0.110539
| 0.043582
| 0.043582
| 0
| 0.00063
| 0.237215
| 4,165
| 165
| 91
| 25.242424
| 0.791627
| 0.135414
| 0
| 0.221154
| 0
| 0
| 0.129199
| 0.032443
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.096154
| 0.038462
| 0.355769
| 0.028846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd8681cae85c92327aba29d9f6d3628698abb698
| 1,811
|
py
|
Python
|
frootspi_examples/launch/conductor.launch.py
|
SSL-Roots/FrootsPi
|
3aff59342a9d3254d8b089b66aeeed59bcb66c7b
|
[
"Apache-2.0"
] | 2
|
2021-11-27T10:57:01.000Z
|
2021-11-27T11:25:52.000Z
|
frootspi_examples/launch/conductor.launch.py
|
SSL-Roots/FrootsPi
|
3aff59342a9d3254d8b089b66aeeed59bcb66c7b
|
[
"Apache-2.0"
] | 1
|
2018-07-31T13:29:57.000Z
|
2018-07-31T13:36:50.000Z
|
frootspi_examples/launch/conductor.launch.py
|
SSL-Roots/FrootsPi
|
3aff59342a9d3254d8b089b66aeeed59bcb66c7b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Roots
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import ComposableNodeContainer
from launch_ros.actions import PushRosNamespace
from launch_ros.descriptions import ComposableNode
def generate_launch_description():
declare_arg_robot_id = DeclareLaunchArgument(
'id', default_value='0',
description=('Set own ID.')
)
push_ns = PushRosNamespace(['robot', LaunchConfiguration('id')])
# robot_id = LaunchConfiguration('robot_id')
container = ComposableNodeContainer(
name='frootspi_container',
namespace='',
package='rclcpp_components',
executable='component_container', # component_container_mtはmulti threads
composable_node_descriptions=[
ComposableNode(
package='frootspi_conductor',
plugin='frootspi_conductor::Conductor',
name='frootspi_conductor',
extra_arguments=[{'use_intra_process_comms': True}],
),
],
output='screen',
)
return LaunchDescription([
declare_arg_robot_id,
push_ns,
container
])
| 34.826923
| 81
| 0.703479
| 197
| 1,811
| 6.314721
| 0.568528
| 0.048232
| 0.03135
| 0.025723
| 0.041801
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006369
| 0.219768
| 1,811
| 51
| 82
| 35.509804
| 0.874027
| 0.343457
| 0
| 0
| 0
| 0
| 0.144075
| 0.044331
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.1875
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd876869a60981b01094fa1c90ddae1cb851c885
| 1,639
|
py
|
Python
|
src/vnf/l23filter/controllers/InitializeDbController.py
|
shield-h2020/vnsfs
|
864bdd418d3910b86783044be94d2bdb07e95aec
|
[
"Apache-2.0"
] | 2
|
2018-11-06T17:55:56.000Z
|
2021-02-09T07:40:17.000Z
|
src/vnf/l23filter/controllers/InitializeDbController.py
|
shield-h2020/vnsfs
|
864bdd418d3910b86783044be94d2bdb07e95aec
|
[
"Apache-2.0"
] | null | null | null |
src/vnf/l23filter/controllers/InitializeDbController.py
|
shield-h2020/vnsfs
|
864bdd418d3910b86783044be94d2bdb07e95aec
|
[
"Apache-2.0"
] | 4
|
2018-03-28T18:06:26.000Z
|
2021-07-17T00:33:55.000Z
|
import logging
from sqlalchemy import create_engine, event
from configuration import config as cnf
from helpers.DbHelper import on_connect, db_session, assert_database_type
from models import Base, Flow
# from models.depreciated import Metric
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.ERROR)
class InitializeDbController:
def create_DB(self):
mysqldbType = "mysql"
connection_string = None
# empty string
connection_string = mysqldbType + cnf.DATABASE_CONN_STRING
print(connection_string)
# if connection_string.startswith('sqlite'):
# db_file = re.sub("sqlite.*:///", "", connection_string)
# os.makedirs(os.path.dirname(db_file))
engine = create_engine(connection_string, echo=False)
# event.listen(engine, 'connect', on_connect)
conn = engine.connect()
conn.execute("commit")
conn.execute("CREATE DATABASE IF NOT EXISTS test;")
conn.close()
def init_DB(self):
# if connection_string.startswith('sqlite'):
# db_file = re.sub("sqlite.*:///", "", connection_string)
# os.makedirs(os.path.dirname(db_file))
# 3 commands for creating database
base = Base.Base()
Flow.Flow()
engine = assert_database_type()
base.metadata.create_all(engine)
response = "OK"
return response
def delete_DB(self):
engine = assert_database_type()
base = Base.Base()
for tbl in reversed(base.metadata.sorted_tables):
tbl.drop(engine, checkfirst=True)
| 26.015873
| 73
| 0.649786
| 185
| 1,639
| 5.594595
| 0.427027
| 0.123672
| 0.052174
| 0.054106
| 0.239614
| 0.185507
| 0.185507
| 0.185507
| 0.185507
| 0.185507
| 0
| 0.00081
| 0.246492
| 1,639
| 62
| 74
| 26.435484
| 0.837247
| 0.254423
| 0
| 0.133333
| 0
| 0
| 0.053719
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.166667
| 0
| 0.333333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd8a5381cdea04589d3919c507d39969d9014954
| 3,533
|
py
|
Python
|
cdist/plugin.py
|
acerv/pytest-cdist
|
24a3f0987c3bc2821b91374c93d6b1303a7aca81
|
[
"MIT"
] | null | null | null |
cdist/plugin.py
|
acerv/pytest-cdist
|
24a3f0987c3bc2821b91374c93d6b1303a7aca81
|
[
"MIT"
] | null | null | null |
cdist/plugin.py
|
acerv/pytest-cdist
|
24a3f0987c3bc2821b91374c93d6b1303a7aca81
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
cdist-plugin implementation.
Author:
Andrea Cervesato <andrea.cervesato@mailbox.org>
"""
import pytest
from cdist import __version__
from cdist.redis import RedisResource
from cdist.resource import ResourceError
def pytest_addoption(parser):
"""
Plugin configurations.
"""
parser.addini(
"cdist_hostname",
"cdist resource hostname (default: localhost)",
default="localhost"
)
parser.addini(
"cdist_port",
"cdist resource port (default: 6379)",
default="6379"
)
parser.addini(
"cdist_autolock",
"Enable/Disable configuration automatic lock (default: True)",
default="True"
)
group = parser.getgroup("cdist")
group.addoption(
"--cdist-config",
action="store",
dest="cdist_config",
default="",
help="configuration key name"
)
class Plugin:
"""
cdist plugin definition, handling client and pytest hooks.
"""
def __init__(self):
self._client = None
@staticmethod
def _get_autolock(config):
"""
Return autolock parameter.
"""
autolock = config.getini("cdist_autolock").lower() == "true"
return autolock
def pytest_report_header(self, config):
"""
Create the plugin report to be shown during the session.
"""
config_name = config.option.cdist_config
if not config_name:
return None
# fetch configuration data
hostname = config.getini("cdist_hostname")
port = config.getini("cdist_port")
autolock = self._get_autolock(config)
# create report lines
lines = list()
lines.append("cdist %s -- resource: %s:%s, configuration: %s, autolock: %s" %
(__version__, hostname, port, config_name, autolock))
return lines
def pytest_sessionstart(self, session):
"""
Initialize client, fetch data and update pytest configuration.
"""
config_name = session.config.option.cdist_config
if not config_name:
return None
# fetch data
hostname = session.config.getini("cdist_hostname")
port = session.config.getini("cdist_port")
autolock = self._get_autolock(session.config)
# create client
try:
self._client = RedisResource(hostname=hostname, port=int(port))
if autolock:
self._client.lock(config_name)
# pull configuration
config = self._client.pull(config_name)
except ResourceError as err:
raise pytest.UsageError(err)
# update pytest configuration
for key, value in config.items():
try:
# check if key is available inside pytest configuration
session.config.getini(key)
except ValueError:
continue
session.config._inicache[key] = value
def pytest_sessionfinish(self, session, exitstatus):
"""
Unlock configuration when session finish.
"""
config_name = session.config.option.cdist_config
if not config_name:
return None
autolock = self._get_autolock(session.config)
if autolock:
self._client.unlock(config_name)
def pytest_configure(config):
"""
Print out some session informations.
"""
config.pluginmanager.register(Plugin(), "plugin.cdist")
| 26.765152
| 85
| 0.601472
| 353
| 3,533
| 5.866856
| 0.31728
| 0.048286
| 0.041043
| 0.033317
| 0.184935
| 0.156929
| 0.133269
| 0.133269
| 0.090777
| 0.090777
| 0
| 0.003654
| 0.302859
| 3,533
| 131
| 86
| 26.969466
| 0.83719
| 0.16728
| 0
| 0.226667
| 0
| 0
| 0.139427
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093333
| false
| 0
| 0.053333
| 0
| 0.226667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd8a85c0cecb1f0067a9c558a9299006820a9bf0
| 2,851
|
py
|
Python
|
superironic/utils.py
|
jimrollenhagen/superironic
|
45f8c50a881a0728c3d86e0783f9ee6baa47559d
|
[
"Apache-2.0"
] | null | null | null |
superironic/utils.py
|
jimrollenhagen/superironic
|
45f8c50a881a0728c3d86e0783f9ee6baa47559d
|
[
"Apache-2.0"
] | null | null | null |
superironic/utils.py
|
jimrollenhagen/superironic
|
45f8c50a881a0728c3d86e0783f9ee6baa47559d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from superironic import colors
from superironic import config
def get_envs_in_group(group_name):
"""
Takes a group_name and finds any environments that have a SUPERIRONIC_GROUP
configuration line that matches the group_name.
"""
envs = []
for section in config.ironic_creds.sections():
if (config.ironic_creds.has_option(section, 'SUPERIRONIC_GROUP') and
config.ironic_creds.get(section,
'SUPERIRONIC_GROUP') == group_name):
envs.append(section)
return envs
def is_valid_environment(env):
"""Check if config file contains `env`."""
valid_envs = config.ironic_creds.sections()
return env in valid_envs
def is_valid_group(group_name):
"""
Checks to see if the configuration file contains a SUPERIRONIC_GROUP
configuration option.
"""
valid_groups = []
for section in config.ironic_creds.sections():
if config.ironic_creds.has_option(section, 'SUPERIRONIC_GROUP'):
valid_groups.append(config.ironic_creds.get(section,
'SUPERIRONIC_GROUP'))
valid_groups = list(set(valid_groups))
if group_name in valid_groups:
return True
else:
return False
def print_valid_envs(valid_envs):
"""Prints the available environments."""
print("[%s] Your valid environments are:" %
(colors.gwrap('Found environments')))
print("%r" % valid_envs)
def warn_missing_ironic_args():
"""Warn user about missing Ironic arguments."""
msg = """
[%s] No arguments were provided to pass along to ironic.
The superironic script expects to get commands structured like this:
superironic [environment] [command]
Here are some example commands that may help you get started:
superironic prod node-list
superironic prod node-show
superironic prod port-list
"""
print(msg % colors.rwrap('Missing arguments'))
def rm_prefix(name):
"""
Removes ironic_ os_ ironicclient_ prefix from string.
"""
if name.startswith('ironic_'):
return name[7:]
elif name.startswith('ironicclient_'):
return name[13:]
elif name.startswith('os_'):
return name[3:]
else:
return name
| 30.98913
| 79
| 0.681866
| 366
| 2,851
| 5.177596
| 0.409836
| 0.044327
| 0.062797
| 0.039578
| 0.149868
| 0.138259
| 0.138259
| 0.092876
| 0.092876
| 0.092876
| 0
| 0.005477
| 0.231498
| 2,851
| 91
| 80
| 31.32967
| 0.859425
| 0.327955
| 0
| 0.083333
| 0
| 0
| 0.26049
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.020833
| 0.041667
| 0
| 0.333333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd8bcd859196def3cab1defab94ee20606249351
| 22,340
|
py
|
Python
|
torchreid/engine/image/classmemoryloss_QA.py
|
Arindam-1991/deep_reid
|
ab68d95c2229ef5b832a6a6b614a9b91e4984bd5
|
[
"MIT"
] | 1
|
2021-03-27T17:27:47.000Z
|
2021-03-27T17:27:47.000Z
|
torchreid/engine/image/classmemoryloss_QA.py
|
Arindam-1991/deep_reid
|
ab68d95c2229ef5b832a6a6b614a9b91e4984bd5
|
[
"MIT"
] | null | null | null |
torchreid/engine/image/classmemoryloss_QA.py
|
Arindam-1991/deep_reid
|
ab68d95c2229ef5b832a6a6b614a9b91e4984bd5
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function, absolute_import
import numpy as np
import torch, sys
import os.path as osp
from torchreid import metrics
from torchreid.losses import TripletLoss, CrossEntropyLoss
from torchreid.losses import ClassMemoryLoss
from ..engine import Engine
from ..pretrainer import PreTrainer
# Required for new engine run defination
import time, datetime
from torch import nn
from torchreid.utils import (
MetricMeter, AverageMeter, re_ranking, open_all_layers, Logger,
open_specified_layers, visualize_ranked_results
)
from torch.utils.tensorboard import SummaryWriter
from torchreid.utils.serialization import load_checkpoint, save_checkpoint
class ImageQAConvEngine(Engine):
r"""Triplet-loss engine for image-reid.
Args:
datamanager (DataManager): an instance of ``torchreid.data.ImageDataManager``
or ``torchreid.data.VideoDataManager``.
model (nn.Module): model instance.
optimizer (Optimizer): an Optimizer.
margin (float, optional): margin for triplet loss. Default is 0.3.
weight_t (float, optional): weight for triplet loss. Default is 1.
weight_x (float, optional): weight for softmax loss. Default is 1.
scheduler (LRScheduler, optional): if None, no learning rate decay will be performed.
use_gpu (bool, optional): use gpu. Default is True.
label_smooth (bool, optional): use label smoothing regularizer. Default is True.
Examples::
import torchreid
datamanager = torchreid.data.ImageDataManager(
root='path/to/reid-data',
sources='market1501',
height=256,
width=128,
combineall=False,
batch_size=32,
num_instances=4,
train_sampler='RandomIdentitySampler' # this is important
)
model = torchreid.models.build_model(
name='resnet50',
num_classes=datamanager.num_train_pids,
loss='triplet'
)
model = model.cuda()
optimizer = torchreid.optim.build_optimizer(
model, optim='adam', lr=0.0003
)
scheduler = torchreid.optim.build_lr_scheduler(
optimizer,
lr_scheduler='single_step',
stepsize=20
)
engine = torchreid.engine.ImageTripletEngine(
datamanager, model, optimizer, margin=0.3,
weight_t=0.7, weight_x=1, scheduler=scheduler
)
engine.run(
max_epoch=60,
save_dir='log/resnet50-triplet-market1501',
print_freq=10
)
"""
def __init__(
self,
datamanager,
model,
optimizer,
matcher,
margin = 0.3,
weight_t=1,
weight_clsm=1,
scheduler=None,
use_gpu=True,
label_smooth=True,
mem_batch_size = 16,
):
super(ImageQAConvEngine, self).__init__(datamanager, use_gpu)
self.datamanager = datamanager
self.model = model
self.matcher = matcher
self.optimizer = optimizer
self.scheduler = scheduler
self.register_model('model', model, optimizer, scheduler)
assert weight_t >= 0 and weight_clsm >= 0
assert weight_t + weight_clsm > 0
self.weight_t = weight_t
self.weight_clsm = weight_clsm
self.criterion_t = TripletLoss(margin=margin)
self.criterion_clsmloss = ClassMemoryLoss(self.matcher, datamanager.num_train_pids, mem_batch_size = mem_batch_size)
if self.use_gpu:
self.criterion_clsmloss = self.criterion_clsmloss.cuda()
def save_model(self, epoch, rank1, save_dir):
save_checkpoint(
{
'model': self.model.module.state_dict(),
'criterion': self.criterion_clsmloss.module.state_dict(),
'optim': self.optimizer.state_dict(),
'epoch': epoch + 1,
'rank1': rank1
},
fpath = osp.join(save_dir, self.method_name, self.sub_method_name, 'checkpoint.pth.tar')
)
def pretrain(self, test_only, output_dir):
"""
This function either loads an already trained model or pre-trains a model before actual
training for a better starting point.
"""
if self.resume or test_only:
print('Loading checkpoint...')
if self.resume and (self.resume != 'ori'):
checkpoint = load_checkpoint(self.resume)
else:
checkpoint = load_checkpoint(osp.join(output_dir, self.method_name, self.sub_method_name, 'checkpoint.pth.tar'))
self.model.load_state_dict(checkpoint['model'])
self.criterion_clsmloss.load_state_dict(checkpoint['criterion'])
self.optimizer.load_state_dict(checkpoint['optim'])
start_epoch = checkpoint['epoch']
print("=> Start epoch {} ".format(start_epoch))
elif self.pre_epochs > 0:
pre_tr = PreTrainer(
self.model,
self.criterion_clsmloss,
self.optimizer,
self.datamanager,
self.pre_epochs,
self.pmax_steps,
self.pnum_trials)
result_file = osp.join(output_dir, self.method_name, 'pretrain_metric.txt')
self.model, self.criterion_clsmloss, self.optimizer = pre_tr.train(result_file, self.method_name, self.sub_method_name)
def train(self, print_freq=10, print_epoch=False, fixbase_epoch=0, open_layers=None):
print(".... Calling train defination from new engine run ... !")
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
info_dict = {} # Dictonary containing all the information (loss, accuracy, lr etc)
if self.weight_t > 0:
losses_t = AverageMeter()
if self.weight_clsm > 0:
losses_clsm = AverageMeter()
precisions = AverageMeter()
self.set_model_mode('train')
self.two_stepped_transfer_learning(
self.epoch, fixbase_epoch, open_layers
)
self.num_batches = len(self.train_loader)
end = time.time()
for self.batch_idx, data in enumerate(self.train_loader):
data_time.update(time.time() - end)
loss_summary = self.forward_backward(data)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if self.weight_t > 0:
losses_t.update(loss_summary['loss_t'], self.targets_sz)
if self.weight_clsm > 0:
losses_clsm.update(loss_summary['loss_clsm'], self.targets_sz)
precisions.update(loss_summary['acc'], self.targets_sz)
if (self.batch_idx + 1) % print_freq == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta_str = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch: [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr:.6f}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta_str,
losses=losses,
lr=self.get_current_lr()
),
end='\r'
)
if self.writer is not None:
n_iter = self.epoch * self.num_batches + self.batch_idx
self.writer.add_scalar('Train/time', batch_time.avg, n_iter)
self.writer.add_scalar('Train/data', data_time.avg, n_iter)
for name, meter in losses.meters.items():
self.writer.add_scalar('Train/' + name, meter.avg, n_iter)
self.writer.add_scalar(
'Train/lr', self.get_current_lr(), n_iter
)
end = time.time()
info_dict['lr'] = list(map(lambda group: group['lr'], self.optimizer.param_groups))
self.update_lr()
# Returing the relevant info in dictionary
if self.weight_t > 0:
info_dict['loss_t_avg'] = losses_t.avg
if self.weight_clsm > 0:
info_dict['loss_clsm_avg'] = losses_clsm.avg
info_dict['prec_avg'] = precisions.avg
return info_dict
def run(
self,
save_dir='log',
max_epoch=0,
start_epoch=0,
print_freq=10, # If print_freq is invalid if print_epoch is set true
print_epoch=False,
fixbase_epoch=0,
open_layers=None,
start_eval=0,
eval_freq=-1,
test_only=False,
dist_metric='euclidean',
train_resume = False,
pre_epochs = 1,
pmax_steps = 2000,
pnum_trials = 10,
acc_thr = 0.6,
enhance_data_aug = False,
method_name = 'QAConv',
sub_method_name = 'res50_layer3',
qbatch_sz = None,
gbatch_sz = None,
normalize_feature=False,
visrank=False,
visrank_topk=10,
use_metric_cuhk03=False,
ranks=[1, 5, 10, 20],
rerank=False
):
r"""A unified pipeline for training and evaluating a model.
Args:
save_dir (str): directory to save model.
max_epoch (int): maximum epoch.
start_epoch (int, optional): starting epoch. Default is 0.
print_freq (int, optional): print_frequency. Default is 10.
fixbase_epoch (int, optional): number of epochs to train ``open_layers`` (new layers)
while keeping base layers frozen. Default is 0. ``fixbase_epoch`` is counted
in ``max_epoch``.
open_layers (str or list, optional): layers (attribute names) open for training.
start_eval (int, optional): from which epoch to start evaluation. Default is 0.
eval_freq (int, optional): evaluation frequency. Default is -1 (meaning evaluation
is only performed at the end of training).
test_only (bool, optional): if True, only runs evaluation on test datasets.
Default is False.
dist_metric (str, optional): distance metric used to compute distance matrix
between query and gallery. Default is "euclidean".
normalize_feature (bool, optional): performs L2 normalization on feature vectors before
computing feature distance. Default is False.
visrank (bool, optional): visualizes ranked results. Default is False. It is recommended to
enable ``visrank`` when ``test_only`` is True. The ranked images will be saved to
"save_dir/visrank_dataset", e.g. "save_dir/visrank_market1501".
visrank_topk (int, optional): top-k ranked images to be visualized. Default is 10.
use_metric_cuhk03 (bool, optional): use single-gallery-shot setting for cuhk03.
Default is False. This should be enabled when using cuhk03 classic split.
ranks (list, optional): cmc ranks to be computed. Default is [1, 5, 10, 20].
rerank (bool, optional): uses person re-ranking (by Zhong et al. CVPR'17).
Default is False. This is only enabled when test_only=True.
"""
self.resume = train_resume
self.pre_epochs = pre_epochs
self.pmax_steps = pmax_steps
self.pnum_trials = pnum_trials
self.acc_thr = acc_thr
self.enhance_data_aug = enhance_data_aug
self.method_name = method_name
self.sub_method_name = sub_method_name
self.qbatch_sz = qbatch_sz
self.gbatch_sz = gbatch_sz
if visrank and not test_only:
raise ValueError(
'visrank can be set to True only if test_only=True'
)
print(".... Running from new engine run defination ... !")
# Building log file and location to save model checkpoint
log_file = osp.join(save_dir, self.method_name, self.sub_method_name, 'pretrain_metric.txt')
sys.stdout = Logger(log_file)
# Pre-training the network for warm-start
self.pretrain(test_only, save_dir) # test_only automatically loads model from checkpoint
self.criterion_clsmloss = nn.DataParallel(self.criterion_clsmloss)
self.model = nn.DataParallel(self.model)
if test_only:
self.test(
dist_metric=dist_metric,
normalize_feature=normalize_feature,
visrank=visrank,
visrank_topk=visrank_topk,
save_dir=save_dir,
use_metric_cuhk03=use_metric_cuhk03,
ranks=ranks,
rerank=rerank
)
return
if self.writer is None:
self.writer = SummaryWriter(log_dir=save_dir)
time_start = time.time()
self.start_epoch = start_epoch
self.max_epoch = max_epoch
print('=> Start training')
for self.epoch in range(self.start_epoch, self.max_epoch):
info_dict = self.train(
print_freq=print_freq,
print_epoch = print_epoch,
fixbase_epoch=fixbase_epoch,
open_layers=open_layers
)
train_time = time.time() - time_start
lr = info_dict['lr']
if print_epoch:
if self.weight_t > 0 and self.weight_clsm > 0:
print(
'* Finished epoch %d at lr=[%g, %g, %g]. Loss_t: %.3f. Loss_clsm: %.3f. Acc: %.2f%%. Training time: %.0f seconds. \n'
% (self.epoch + 1, lr[0], lr[1], lr[2],
info_dict['loss_t_avg'], info_dict['loss_clsm_avg'], info_dict['prec_avg'] * 100, train_time))
elif self.weight_t > 0:
print(
'* Finished epoch %d at lr=[%g, %g, %g]. Loss_t: %.3f. Training time: %.0f seconds. \n'
% (self.epoch + 1, lr[0], lr[1], lr[2], info_dict['loss_t_avg'], train_time))
elif self.weight_clsm > 0:
print(
'* Finished epoch %d at lr=[%g, %g, %g]. Loss_clsm: %.3f. Acc: %.2f%%. Training time: %.0f seconds. \n'
% (self.epoch + 1, lr[0], lr[1], lr[2], info_dict['loss_clsm_avg'], info_dict['prec_avg'] * 100, train_time))
if (self.epoch + 1) >= start_eval \
and eval_freq > 0 \
and (self.epoch+1) % eval_freq == 0 \
and (self.epoch + 1) != self.max_epoch:
rank1 = self.test(
dist_metric=dist_metric,
normalize_feature=normalize_feature,
visrank=visrank,
visrank_topk=visrank_topk,
save_dir=save_dir,
use_metric_cuhk03=use_metric_cuhk03,
ranks=ranks
)
self.save_model(self.epoch, rank1, save_dir)
# Modify transforms and re-initilize train dataloader
if not self.enhance_data_aug and self.epoch < self.max_epoch - 1:
if 'prec_avg' not in info_dict.keys():
self.enhance_data_aug = True
print('Start to Flip and Block only for triplet loss')
self.datamanager.QAConv_train_loader()
elif info_dict['prec_avg'] > self.acc_thr:
self.enhance_data_aug = True
print('\nAcc = %.2f%% > %.2f%%. Start to Flip and Block.\n' % (info_dict['prec_avg']* 100, self.acc_thr *100))
self.datamanager.QAConv_train_loader()
if self.max_epoch > 0:
print('=> Final test')
rank1 = self.test(
dist_metric=dist_metric,
normalize_feature=normalize_feature,
visrank=visrank,
visrank_topk=visrank_topk,
save_dir=save_dir,
use_metric_cuhk03=use_metric_cuhk03,
ranks=ranks
)
self.save_model(self.epoch, rank1, save_dir)
elapsed = round(time.time() - time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed {}'.format(elapsed))
if self.writer is not None:
self.writer.close()
# Defining evaluation mechanism
@torch.no_grad()
def _evaluate(
self,
dataset_name='',
query_loader=None,
gallery_loader=None,
dist_metric='euclidean',
normalize_feature=False,
visrank=False,
visrank_topk=10,
save_dir='',
use_metric_cuhk03=False,
ranks=[1, 5, 10, 20],
rerank=False,
):
batch_time = AverageMeter()
def _feature_extraction(data_loader):
f_, pids_, camids_ = [], [], []
for batch_idx, data in enumerate(data_loader):
imgs, pids, camids = self.parse_data_for_eval(data)
if self.use_gpu:
imgs = imgs.cuda()
end = time.time()
features = self.extract_features(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
f_.append(features)
pids_.extend(pids)
camids_.extend(camids)
f_ = torch.cat(f_, 0)
pids_ = np.asarray(pids_)
camids_ = np.asarray(camids_)
return f_, pids_, camids_
print('Extracting features from query set ...')
qf, q_pids, q_camids = _feature_extraction(query_loader)
print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))
print('Extracting features from gallery set ...')
gf, g_pids, g_camids = _feature_extraction(gallery_loader)
print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))
print('Speed: {:.4f} sec/batch'.format(batch_time.avg))
if normalize_feature:
print('Normalzing features with L2 norm ...')
qf = F.normalize(qf, p=2, dim=1)
gf = F.normalize(gf, p=2, dim=1)
print(
'Computing distance matrix is with metric={} ...'.format('QAConv_kernel')
)
distmat = metrics.pairwise_distance_using_QAmatcher(
self.matcher, qf, gf,
prob_batch_size = self.qbatch_sz,
gal_batch_size = self.gbatch_sz)
distmat = distmat.numpy()
if rerank:
print('Applying person re-ranking ...')
distmat_qq = metrics.pairwise_distance_using_QAmatcher(
self.matcher, qf, qf,
prob_batch_size = self.qbatch_sz,
gal_batch_size = self.qbatch_sz)
distmat_gg = metrics.pairwise_distance_using_QAmatcher(
self.matcher, gf, gf,
prob_batch_size = self.gbatch_sz,
gal_batch_size = self.gbatch_sz)
distmat = re_ranking(distmat, distmat_qq, distmat_gg)
print('Computing CMC and mAP ...')
cmc, mAP = metrics.evaluate_rank(
distmat,
q_pids,
g_pids,
q_camids,
g_camids,
use_metric_cuhk03=use_metric_cuhk03
)
print('** Results **')
print('mAP: {:.1%}'.format(mAP))
print('CMC curve')
for r in ranks:
print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
if visrank:
visualize_ranked_results(
distmat,
self.datamanager.fetch_test_loaders(dataset_name),
self.datamanager.data_type,
width=self.datamanager.width,
height=self.datamanager.height,
save_dir=osp.join(save_dir, 'visrank_' + dataset_name),
topk=visrank_topk
)
return cmc[0], mAP
def forward_backward(self, data):
imgs, pids, camids, dsetids = self.parse_data_for_train_DG(data)
if self.use_gpu:
imgs = imgs.cuda()
pids = pids.cuda()
self.targets_sz = pids.size(0)
features = self.model(imgs)
loss = 0
loss_summary = {}
# print("Algorithm is at epoch : {}".format(self.epoch))
if self.weight_t > 0:
loss_t = self.compute_loss(self.criterion_t, features, pids)
loss += self.weight_t * loss_t
loss_summary['loss_t'] = loss_t.item()
if self.weight_clsm > 0:
loss_clsm, acc = self.compute_loss(self.criterion_clsmloss, features, pids)
loss += self.weight_clsm * loss_clsm
loss_summary['loss_clsm'] = loss_clsm.item()
loss_summary['acc'] = acc.item() #metrics.accuracy(outputs, pids)[0].item()
assert loss_summary
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss_summary
| 40.32491
| 159
| 0.555237
| 2,520
| 22,340
| 4.70119
| 0.169841
| 0.012408
| 0.013928
| 0.006077
| 0.287077
| 0.214147
| 0.186376
| 0.14299
| 0.112602
| 0.105512
| 0
| 0.016002
| 0.35103
| 22,340
| 553
| 160
| 40.39783
| 0.801145
| 0.191092
| 0
| 0.199495
| 0
| 0.007576
| 0.092797
| 0.002757
| 0
| 0
| 0
| 0
| 0.007576
| 1
| 0.020202
| false
| 0
| 0.035354
| 0
| 0.070707
| 0.085859
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd8d82fd51795634599d592a12414f82293ec386
| 2,623
|
py
|
Python
|
api/app/tests/weather_models/endpoints/test_models_endpoints.py
|
bcgov/wps
|
71df0de72de9cd656dc9ebf8461ffe47cfb155f6
|
[
"Apache-2.0"
] | 19
|
2020-01-31T21:51:31.000Z
|
2022-01-07T14:40:03.000Z
|
api/app/tests/weather_models/endpoints/test_models_endpoints.py
|
bcgov/wps
|
71df0de72de9cd656dc9ebf8461ffe47cfb155f6
|
[
"Apache-2.0"
] | 1,680
|
2020-01-24T23:25:08.000Z
|
2022-03-31T23:50:27.000Z
|
api/app/tests/weather_models/endpoints/test_models_endpoints.py
|
bcgov/wps
|
71df0de72de9cd656dc9ebf8461ffe47cfb155f6
|
[
"Apache-2.0"
] | 6
|
2020-04-28T22:41:08.000Z
|
2021-05-05T18:16:06.000Z
|
""" Functional testing for /models/* endpoints.
"""
import os
import json
import importlib
import logging
import pytest
from pytest_bdd import scenario, given, then, when
from fastapi.testclient import TestClient
import app.main
from app.tests import load_sqlalchemy_response_from_json
from app.tests import load_json_file
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures("mock_jwt_decode")
@scenario("test_models_endpoints.feature", "Generic model endpoint testing",
example_converters=dict(
codes=json.loads, endpoint=str, crud_mapping=load_json_file(__file__), expected_status_code=int,
expected_response=load_json_file(__file__), notes=str))
def test_model_predictions_summaries_scenario():
""" BDD Scenario for prediction summaries """
def _patch_function(monkeypatch, module_name: str, function_name: str, json_filename: str):
""" Patch module_name.function_name to return de-serialized json_filename """
def mock_get_data(*_):
dirname = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(dirname, json_filename)
return load_sqlalchemy_response_from_json(filename)
monkeypatch.setattr(importlib.import_module(module_name), function_name, mock_get_data)
@given("some explanatory <notes>")
def given_some_notes(notes: str):
""" Send notes to the logger. """
logger.info(notes)
@given("A <crud_mapping>", target_fixture='database')
def given_a_database(monkeypatch, crud_mapping: dict):
""" Mock the sql response """
for item in crud_mapping:
_patch_function(monkeypatch, item['module'], item['function'], item['json'])
return {}
@when("I call <endpoint> with <codes>")
def when_prediction(database: dict, codes: str, endpoint: str):
""" Make call to endpoint """
client = TestClient(app.main.app)
response = client.post(
endpoint, headers={'Authorization': 'Bearer token'}, json={'stations': codes})
if response.status_code == 200:
database['response_json'] = response.json()
database['status_code'] = response.status_code
@then('The <expected_status_code> is matched')
def assert_status_code(database: dict, expected_status_code: str):
""" Assert that the status code is as expected
"""
assert database['status_code'] == int(expected_status_code)
@then('The <expected_response> is matched')
def assert_response(database: dict, expected_response: dict):
""" "Catch all" test that blindly checks the actual json response against an expected response. """
assert database['response_json'] == expected_response
| 34.973333
| 110
| 0.733511
| 335
| 2,623
| 5.474627
| 0.325373
| 0.054526
| 0.039258
| 0.019629
| 0.081788
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001351
| 0.153641
| 2,623
| 74
| 111
| 35.445946
| 0.824775
| 0.139916
| 0
| 0
| 0
| 0
| 0.145966
| 0.023119
| 0
| 0
| 0
| 0
| 0.088889
| 1
| 0.177778
| false
| 0
| 0.244444
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd944074d8e83c269f766de8fed07825db678f3d
| 8,123
|
py
|
Python
|
COT/helpers/tests/test_fatdisk.py
|
morneaup/cot
|
3d4dc7079a33aa0c09216ec339b44f84ab69ff4b
|
[
"MIT"
] | 81
|
2015-01-18T22:31:42.000Z
|
2022-03-14T12:34:33.000Z
|
COT/helpers/tests/test_fatdisk.py
|
morneaup/cot
|
3d4dc7079a33aa0c09216ec339b44f84ab69ff4b
|
[
"MIT"
] | 67
|
2015-01-05T15:24:39.000Z
|
2021-08-16T12:44:58.000Z
|
COT/helpers/tests/test_fatdisk.py
|
morneaup/cot
|
3d4dc7079a33aa0c09216ec339b44f84ab69ff4b
|
[
"MIT"
] | 20
|
2015-07-09T14:20:25.000Z
|
2021-09-18T17:59:57.000Z
|
#!/usr/bin/env python
#
# fatdisk.py - Unit test cases for COT.helpers.fatdisk submodule.
#
# March 2015, Glenn F. Matthews
# Copyright (c) 2014-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Unit test cases for the COT.helpers.fatdisk module."""
import os
import re
from distutils.version import StrictVersion
import mock
from COT.helpers.tests.test_helper import HelperTestCase
from COT.helpers.fatdisk import FatDisk
from COT.helpers import helpers
# pylint: disable=missing-type-doc,missing-param-doc,protected-access
@mock.patch('COT.helpers.fatdisk.FatDisk.download_and_expand_tgz',
side_effect=HelperTestCase.stub_download_and_expand_tgz)
class TestFatDisk(HelperTestCase):
"""Test cases for FatDisk helper class."""
def setUp(self):
"""Test case setup function called automatically prior to each test."""
self.helper = FatDisk()
self.maxDiff = None
super(TestFatDisk, self).setUp()
@mock.patch('COT.helpers.helper.check_output',
return_value="fatdisk, version 1.0.0-beta")
def test_get_version(self, *_):
"""Validate .version getter."""
self.helper._installed = True
self.assertEqual(StrictVersion("1.0.0"), self.helper.version)
@mock.patch('COT.helpers.helper.check_output')
@mock.patch('subprocess.check_call')
def test_install_already_present(self,
mock_check_call,
mock_check_output,
*_):
"""Trying to re-install is a no-op."""
self.helper._installed = True
self.helper.install()
mock_check_output.assert_not_called()
mock_check_call.assert_not_called()
@mock.patch('platform.system', return_value='Linux')
@mock.patch('os.path.isdir', return_value=False)
@mock.patch('os.path.exists', return_value=False)
@mock.patch('os.makedirs', side_effect=OSError)
@mock.patch('distutils.spawn.find_executable', return_value="/foo")
@mock.patch('shutil.copy', return_value=True)
@mock.patch('COT.helpers.helper.check_output', return_value="")
@mock.patch('subprocess.check_call')
def test_install_apt_get(self,
mock_check_call,
mock_check_output,
mock_copy,
*_):
"""Test installation via 'apt-get'."""
self.enable_apt_install()
helpers['dpkg']._installed = True
for name in ['make', 'clang', 'gcc', 'g++']:
helpers[name]._installed = False
self.helper.install()
self.assertSubprocessCalls(
mock_check_output,
[
['dpkg', '-s', 'make'],
['dpkg', '-s', 'gcc'],
])
self.assertSubprocessCalls(
mock_check_call,
[
['apt-get', '-q', 'update'],
['apt-get', '-q', 'install', 'make'],
['apt-get', '-q', 'install', 'gcc'],
['./RUNME'],
['sudo', 'mkdir', '-p', '-m', '755', '/usr/local/bin'],
])
self.assertTrue(re.search("/fatdisk$", mock_copy.call_args[0][0]))
self.assertEqual('/usr/local/bin', mock_copy.call_args[0][1])
self.assertAptUpdated()
# Make sure we don't call apt-get update/install again unnecessarily.
mock_check_output.reset_mock()
mock_check_call.reset_mock()
mock_check_output.return_value = 'install ok installed'
# fakeout!
helpers['make']._installed = False
self.helper._installed = False
os.environ['PREFIX'] = '/opt/local'
os.environ['DESTDIR'] = '/home/cot'
self.helper.install()
self.assertSubprocessCalls(
mock_check_output,
[
['dpkg', '-s', 'make'],
])
self.assertSubprocessCalls(
mock_check_call,
[
['./RUNME'],
['sudo', 'mkdir', '-p', '-m', '755',
'/home/cot/opt/local/bin'],
])
self.assertTrue(re.search("/fatdisk$", mock_copy.call_args[0][0]))
self.assertEqual('/home/cot/opt/local/bin', mock_copy.call_args[0][1])
def test_install_brew(self, *_):
"""Test installation via 'brew'."""
self.brew_install_test(['glennmatthews/fatdisk/fatdisk', '--devel'])
def test_install_port(self, *_):
"""Test installation via 'port'."""
self.port_install_test('fatdisk')
@mock.patch('platform.system', return_value='Linux')
@mock.patch('os.path.isdir', return_value=False)
@mock.patch('os.path.exists', return_value=False)
@mock.patch('os.makedirs', side_effect=OSError)
@mock.patch('distutils.spawn.find_executable', return_value='/foo')
@mock.patch('shutil.copy', return_value=True)
@mock.patch('subprocess.check_call')
def test_install_yum(self,
mock_check_call,
mock_copy,
*_):
"""Test installation via 'yum'."""
self.enable_yum_install()
for name in ['make', 'clang', 'gcc', 'g++']:
helpers[name]._installed = False
self.helper.install()
self.assertSubprocessCalls(
mock_check_call,
[
['yum', '--quiet', 'install', 'make'],
['yum', '--quiet', 'install', 'gcc'],
['./RUNME'],
['sudo', 'mkdir', '-p', '-m', '755', '/usr/local/bin'],
])
self.assertTrue(re.search("/fatdisk$", mock_copy.call_args[0][0]))
self.assertEqual('/usr/local/bin', mock_copy.call_args[0][1])
@mock.patch('platform.system', return_value='Linux')
@mock.patch('distutils.spawn.find_executable', return_value=None)
def test_install_linux_need_make_no_package_manager(self, *_):
"""Linux installation requires yum or apt-get if 'make' missing."""
self.select_package_manager(None)
for name in ['make', 'clang', 'gcc', 'g++']:
helpers[name]._installed = False
with self.assertRaises(NotImplementedError):
self.helper.install()
@staticmethod
def _find_make_only(name):
"""Stub for distutils.spawn.find_executable - only finds 'make'."""
if name == 'make':
return "/bin/make"
else:
return None
@mock.patch('platform.system', return_value='Linux')
@mock.patch('COT.helpers.helper.Helper')
@mock.patch('distutils.spawn.find_executable')
def test_install_linux_need_compiler_no_package_manager(self,
mock_find_exec,
*_):
"""Linux installation requires yum or apt-get if 'gcc' missing."""
self.select_package_manager(None)
for name in ['clang', 'gcc', 'g++']:
helpers[name]._installed = False
mock_find_exec.side_effect = self._find_make_only
with self.assertRaises(NotImplementedError):
self.helper.install()
@mock.patch('platform.system', return_value='Darwin')
@mock.patch('COT.helpers.fatdisk.FatDisk.installable',
new_callable=mock.PropertyMock, return_value=True)
def test_install_helper_mac_no_package_manager(self, *_):
"""Mac installation requires port."""
self.select_package_manager(None)
self.assertRaises(RuntimeError, self.helper.install)
| 40.014778
| 79
| 0.596331
| 928
| 8,123
| 5.038793
| 0.227371
| 0.050043
| 0.023952
| 0.02438
| 0.55154
| 0.476048
| 0.442686
| 0.390077
| 0.334902
| 0.279299
| 0
| 0.006539
| 0.265789
| 8,123
| 202
| 80
| 40.212871
| 0.777498
| 0.172596
| 0
| 0.493243
| 0
| 0
| 0.169931
| 0.070804
| 0
| 0
| 0
| 0
| 0.121622
| 1
| 0.074324
| false
| 0
| 0.047297
| 0
| 0.141892
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd97704e53ec2a3a2b53ad0c5d0eef703c39868d
| 4,414
|
py
|
Python
|
python/SessionCallbackPLSQL.py
|
synetcom/oracle-db-examples
|
e995ca265b93c0d6b7da9ad617994288b3a19a2c
|
[
"Apache-2.0"
] | 4
|
2019-10-26T06:21:32.000Z
|
2021-02-15T15:28:02.000Z
|
python/SessionCallbackPLSQL.py
|
synetcom/oracle-db-examples
|
e995ca265b93c0d6b7da9ad617994288b3a19a2c
|
[
"Apache-2.0"
] | null | null | null |
python/SessionCallbackPLSQL.py
|
synetcom/oracle-db-examples
|
e995ca265b93c0d6b7da9ad617994288b3a19a2c
|
[
"Apache-2.0"
] | 5
|
2019-10-26T06:21:31.000Z
|
2022-03-10T12:47:13.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# SessionCallbackPLSQL.py
#
# Demonstrate how to use a session callback written in PL/SQL. The callback is
# invoked whenever the tag requested by the application does not match the tag
# associated with the session in the pool. It should be used to set session
# state, so that the application can count on known session state, which allows
# the application to reduce the number of round trips to the database.
#
# The primary advantage to this approach over the equivalent approach shown in
# SessionCallback.py is when DRCP is used, as the callback is invoked on the
# server and no round trip is required to set state.
#
# This script requires cx_Oracle 7.1 and higher.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import SampleEnv
# create pool with session callback defined
pool = cx_Oracle.SessionPool(SampleEnv.GetMainUser(),
SampleEnv.GetMainPassword(), SampleEnv.GetConnectString(), min=2,
max=5, increment=1, threaded=True,
sessionCallback="pkg_SessionCallback.TheCallback")
# truncate table logging calls to PL/SQL session callback
conn = pool.acquire()
cursor = conn.cursor()
cursor.execute("truncate table PLSQLSessionCallbacks")
conn.close()
# acquire session without specifying a tag; the callback will not be invoked as
# a result and no session state will be changed
print("(1) acquire session without tag")
conn = pool.acquire()
cursor = conn.cursor()
cursor.execute("select to_char(current_date) from dual")
result, = cursor.fetchone()
print("main(): result is", repr(result))
conn.close()
# acquire session, specifying a tag; since the session returned has no tag,
# the callback will be invoked; session state will be changed and the tag will
# be saved when the connection is closed
print("(2) acquire session with tag")
conn = pool.acquire(tag="NLS_DATE_FORMAT=SIMPLE")
cursor = conn.cursor()
cursor.execute("select to_char(current_date) from dual")
result, = cursor.fetchone()
print("main(): result is", repr(result))
conn.close()
# acquire session, specifying the same tag; since a session exists in the pool
# with this tag, it will be returned and the callback will not be invoked but
# the connection will still have the session state defined previously
print("(3) acquire session with same tag")
conn = pool.acquire(tag="NLS_DATE_FORMAT=SIMPLE")
cursor = conn.cursor()
cursor.execute("select to_char(current_date) from dual")
result, = cursor.fetchone()
print("main(): result is", repr(result))
conn.close()
# acquire session, specifying a different tag; since no session exists in the
# pool with this tag, a new session will be returned and the callback will be
# invoked; session state will be changed and the tag will be saved when the
# connection is closed
print("(4) acquire session with different tag")
conn = pool.acquire(tag="NLS_DATE_FORMAT=FULL;TIME_ZONE=UTC")
cursor = conn.cursor()
cursor.execute("select to_char(current_date) from dual")
result, = cursor.fetchone()
print("main(): result is", repr(result))
conn.close()
# acquire session, specifying a different tag but also specifying that a
# session with any tag can be acquired from the pool; a session with one of the
# previously set tags will be returned and the callback will be invoked;
# session state will be changed and the tag will be saved when the connection
# is closed
print("(4) acquire session with different tag but match any also specified")
conn = pool.acquire(tag="NLS_DATE_FORMAT=FULL;TIME_ZONE=MST", matchanytag=True)
cursor = conn.cursor()
cursor.execute("select to_char(current_date) from dual")
result, = cursor.fetchone()
print("main(): result is", repr(result))
conn.close()
# acquire session and display results from PL/SQL session logs
conn = pool.acquire()
cursor = conn.cursor()
cursor.execute("""
select RequestedTag, ActualTag
from PLSQLSessionCallbacks
order by FixupTimestamp""")
print("(5) PL/SQL session callbacks")
for requestedTag, actualTag in cursor:
print("Requested:", requestedTag, "Actual:", actualTag)
| 41.641509
| 79
| 0.705709
| 614
| 4,414
| 5.026059
| 0.267101
| 0.025275
| 0.034025
| 0.049903
| 0.501296
| 0.493195
| 0.480557
| 0.470188
| 0.434543
| 0.41348
| 0
| 0.003948
| 0.139329
| 4,414
| 105
| 80
| 42.038095
| 0.808371
| 0.516312
| 0
| 0.6
| 0
| 0
| 0.382634
| 0.138359
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.018182
| 0.054545
| 0
| 0.054545
| 0.236364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd9b3eca720cfbb505d3feb4ca4a2f19b87529e1
| 536
|
py
|
Python
|
mbrl/env/wrappers/gym_jump_wrapper.py
|
MaxSobolMark/mbrl-lib
|
bc8ccfe8a56b58d3ce5bae2c4ccdadd82ecdb594
|
[
"MIT"
] | null | null | null |
mbrl/env/wrappers/gym_jump_wrapper.py
|
MaxSobolMark/mbrl-lib
|
bc8ccfe8a56b58d3ce5bae2c4ccdadd82ecdb594
|
[
"MIT"
] | null | null | null |
mbrl/env/wrappers/gym_jump_wrapper.py
|
MaxSobolMark/mbrl-lib
|
bc8ccfe8a56b58d3ce5bae2c4ccdadd82ecdb594
|
[
"MIT"
] | null | null | null |
"""Reward wrapper that gives rewards for positive change in z axis.
Based on MOPO: https://arxiv.org/abs/2005.13239"""
from gym import Wrapper
class JumpWrapper(Wrapper):
def __init__(self, env):
super(JumpWrapper, self).__init__(env)
self._z_init = self.env.sim.data.qpos[1]
def step(self, action):
observation, reward, done, info = self.env.step(action)
z = self.env.sim.data.qpos[1]
reward = reward + 15 * max(z - self._z_init, 0)
return observation, reward, done, info
| 31.529412
| 67
| 0.654851
| 78
| 536
| 4.346154
| 0.551282
| 0.082596
| 0.064897
| 0.082596
| 0.112094
| 0.112094
| 0
| 0
| 0
| 0
| 0
| 0.033573
| 0.222015
| 536
| 16
| 68
| 33.5
| 0.779377
| 0.208955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd9b964182281e0d8725c664433f2162b4f057ea
| 2,102
|
py
|
Python
|
img.py
|
svh2811/Advanced-Lane-Finding
|
f451f26ef126efcbef711e8c4a14d28d24b08262
|
[
"MIT"
] | null | null | null |
img.py
|
svh2811/Advanced-Lane-Finding
|
f451f26ef126efcbef711e8c4a14d28d24b08262
|
[
"MIT"
] | null | null | null |
img.py
|
svh2811/Advanced-Lane-Finding
|
f451f26ef126efcbef711e8c4a14d28d24b08262
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import cv2
from thresholding import *
# list of lists
def plot_images_along_row(images):
fig = plt.figure()
rows = len(images)
cols = len(images[0])
i = 0
for row in range(rows):
for col in range(cols):
a = fig.add_subplot(rows, cols, i+1)
if (len(images[row][col][1].shape) == 2):
imgplot = plt.imshow(images[row][col][1], cmap='gray')
else:
imgplot = plt.imshow(images[row][col][1])
a.set_title(images[row][col][0])
i += 1
plt.show()
plt.close()
img = cv2.imread("challenge_video_frames/02.jpg")
#"""
colorspace1 = cv2.cvtColor(img, cv2.COLOR_BGR2Luv)
channels1 = [
("L", colorspace1[:, :, 0]),
("u", colorspace1[:, :, 1]),
("v", colorspace1[:, :, 2])
]
#"""
"""
colorspace2 = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)
channels2 = [
("L", colorspace2[:, :, 0]),
("a", colorspace2[:, :, 1]),
("b", colorspace2[:, :, 2])
]
colorspace3 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
channels3 = [
("H", colorspace3[:, :, 0]),
("S", colorspace3[:, :, 1]),
("V", colorspace3[:, :, 2])
]
colorspace4 = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
channels4 = [
("H", colorspace4[:, :, 0]),
("L", colorspace4[:, :, 1]),
("S", colorspace4[:, :, 2])
]
"""
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gradx = gradient_thresh(rgb_img, orient="x", sobel_kernel=7, thresh=(8, 16))
grady = gradient_thresh(rgb_img, orient="y", sobel_kernel=3, thresh=(20, 100))
sobel_grads = [
("gray", cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)),
("gX", gradx),
("gY", grady)
]
mag_thresh_img = mag_thresh(rgb_img, sobel_kernel=3, mag_thresh=(20, 200))
mean_gX = cv2.medianBlur(gradx, 5)
dir_thresh_img = dir_threshold(rgb_img, sobel_kernel=3, thresh=(np.pi/2, 2*np.pi/3))
others = [
("Og Img", rgb_img),
("mag", mag_thresh_img),
("mean_gx", mean_gX)
]
plot_images_along_row([others, channels1, sobel_grads])
#plot_images_along_row([channels1, channels2, channels3, channels4])
| 22.602151
| 84
| 0.597526
| 281
| 2,102
| 4.309609
| 0.366548
| 0.039637
| 0.069364
| 0.084228
| 0.229562
| 0.047894
| 0.047894
| 0
| 0
| 0
| 0
| 0.054054
| 0.207897
| 2,102
| 92
| 85
| 22.847826
| 0.673273
| 0.040913
| 0
| 0
| 0
| 0
| 0.041278
| 0.019308
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.090909
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd9c7ab39a034416b3b55dc333af7b177eebd1ee
| 5,602
|
py
|
Python
|
disease_predictor_backend/disease_prediction.py
|
waizshahid/Disease-Predictor
|
2bf2e69631ddbf7ffce0b6c39adcb6816d4208b2
|
[
"MIT"
] | null | null | null |
disease_predictor_backend/disease_prediction.py
|
waizshahid/Disease-Predictor
|
2bf2e69631ddbf7ffce0b6c39adcb6816d4208b2
|
[
"MIT"
] | 7
|
2020-09-07T21:31:50.000Z
|
2022-02-26T22:28:30.000Z
|
disease_predictor_backend/disease_prediction.py
|
waizshahid/Disease-Predictor
|
2bf2e69631ddbf7ffce0b6c39adcb6816d4208b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import flask
from flask import request, jsonify
import time
import sqlite3
import random
# import the necessary packages
from keras.preprocessing.image import img_to_array
from keras.models import load_model
from keras import backend
from imutils import build_montages
import cv2
import numpy as np
from flask_cors import CORS
import io
app = flask.Flask(__name__)
CORS(app)
conn = sqlite3.connect('database.db')
print("Opened database successfully")
conn.execute('CREATE TABLE IF NOT EXISTS Patients (id INTEGER PRIMARY KEY,firstName TEXT, lastName TEXT, ins_ID TEXT, city TEXT, dob TEXT)')
conn.execute('CREATE TABLE IF NOT EXISTS Spiral (id INTEGER PRIMARY KEY,positive INTEGER, negative INTEGER)')
conn.execute('CREATE TABLE IF NOT EXISTS Wave (id INTEGER PRIMARY KEY,positive INTEGER, negative INTEGER)')
conn.execute('CREATE TABLE IF NOT EXISTS Malaria (id INTEGER PRIMARY KEY,positive INTEGER, negative INTEGER)')
@app.route('/prediction', methods=['POST'])
def api_image():
# Database
print('API CALL')
firstName = request.args['fname']
lastName = request.args['lname']
ins_ID = request.args['ins_ID']
city = request.args['city']
dob = request.args['dob']
model_name = request.args["model"]
photo = request.files['photo']
in_memory_file = io.BytesIO()
photo.save(in_memory_file)
data = np.fromstring(in_memory_file.getvalue(), dtype=np.uint8)
color_image_flag = 1
orig = cv2.imdecode(data, color_image_flag)
model_path = ""
# load the pre-trained network
print("[INFO] loading pre-trained network...")
if model_name in "malaria":
print("Maalaria model loaded")
model_path = "malaria_model.model" # Please enter the path for Malaria model
elif model_name in "spiral":
print("Spiral model loaded")
model_path = "spiral_model.model" # Please enter the path for Spiral model
elif model_name in "wave":
print("Wave model loaded")
model_path = r"wave_model.model" # Please enter the path for wave model
model = load_model(model_path)
# initialize our list of results
results = []
# pre-process our image by converting it from BGR to RGB channel
# ordering (since our Keras mdoel was trained on RGB ordering),
# resize it to 64x64 pixels, and then scale the pixel intensities
# to the range [0, 1]
image = cv2.cvtColor(orig, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (48, 48))
image = image.astype("float") / 255.0
# order channel dimensions (channels-first or channels-last)
# depending on our Keras backend, then add a batch dimension to
# the image
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
# make predictions on the input image
pred = model.predict(image)
print("pred: ", pred)
pred = pred.argmax(axis=1)[0]
# an index of zero is the 'parasitized' label while an index of
# one is the 'uninfected' label
label = "UnInfected" if pred == 0 else "Infected"
color = (0, 0, 255) if pred == 0 else (0, 255, 0)
# resize our original input (so we can better visualize it) and
# then draw the label on the image
orig = cv2.resize(orig, (128, 128))
cv2.putText(orig, label, (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
color, 2)
# add the output image to our list of results
results.append(orig)
# Use the jsonify function from Flask to convert our list of
# Python dictionaries to the JSON format.
res = {}
with sqlite3.connect("database.db") as con:
cur = con.cursor()
cur.execute('INSERT INTO Patients VALUES(?,?,?,?,?,?)',(None,firstName, lastName, ins_ID, city, dob))
res=cur.execute('SELECT * FROM Patients')
if model_name in "malaria":
if pred == 1:
cur.execute('INSERT INTO Malaria VALUES(?,?,?)',(None,1,0))
else:
cur.execute('INSERT INTO Malaria VALUES(?,?,?)',(None,0,1))
con.commit()
positive = cur.execute('SELECT SUM(positive) FROM Malaria')
positive = positive.fetchall()
negative = cur.execute('SELECT SUM(negative) FROM Malaria')
negative = negative.fetchall()
elif model_name in "spiral":
if pred == 1:
cur.execute('INSERT INTO Spiral VALUES(?,?,?)',(None,1,0))
else:
cur.execute('INSERT INTO Spiral VALUES(?,?,?)',(None,0,1))
con.commit()
positive = cur.execute('SELECT SUM(positive) FROM Spiral')
positive = positive.fetchall()
negative = cur.execute('SELECT SUM(negative) FROM Spiral')
negative = negative.fetchall()
elif model_name in "wave":
if pred == 1:
cur.execute('INSERT INTO Wave VALUES(?,?,?)',(None,1,0))
else:
cur.execute('INSERT INTO Wave VALUES(?,?,?)',(None,0,1))
con.commit()
positive = cur.execute('SELECT SUM(positive) FROM Wave')
positive = positive.fetchall()
negative = cur.execute('SELECT SUM(negative) FROM Wave')
negative = negative.fetchall()
if pred == 1:
res = {"Prediction":"1", "positive":positive, "negative":negative}
print(res)
else:
res = {"Prediction":"0", "positive":positive, "negative":negative}
print(res)
backend.clear_session()
return jsonify(res)
app.run()
# In[ ]:
| 30.950276
| 141
| 0.632453
| 734
| 5,602
| 4.762943
| 0.290191
| 0.040046
| 0.032037
| 0.040046
| 0.3627
| 0.320938
| 0.298055
| 0.197368
| 0.183352
| 0.15246
| 0
| 0.017665
| 0.252231
| 5,602
| 180
| 142
| 31.122222
| 0.816901
| 0.172974
| 0
| 0.233645
| 0
| 0.009346
| 0.263741
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009346
| false
| 0
| 0.121495
| 0
| 0.140187
| 0.084112
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd9eda7f958b9bb9b2b7af7adc0477c17f9fb5fc
| 19,230
|
py
|
Python
|
web/app.py
|
Luzkan/MessengerNotifier
|
462c6b1a9aa0a29a0dd6b5fc77d0677c61962d5d
|
[
"Linux-OpenIB"
] | 4
|
2020-06-01T09:01:47.000Z
|
2021-04-16T20:07:29.000Z
|
web/app.py
|
Luzkan/NotifAyy
|
462c6b1a9aa0a29a0dd6b5fc77d0677c61962d5d
|
[
"Linux-OpenIB"
] | 20
|
2020-06-05T16:54:36.000Z
|
2020-06-09T13:25:59.000Z
|
web/app.py
|
Luzkan/MessengerNotifier
|
462c6b1a9aa0a29a0dd6b5fc77d0677c61962d5d
|
[
"Linux-OpenIB"
] | 2
|
2020-05-07T04:51:00.000Z
|
2020-05-08T17:52:55.000Z
|
import logging
from flask import Flask, render_template, request, redirect, flash, session, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin, LoginManager, login_user, logout_user
from datetime import datetime
from passlib.hash import sha256_crypt
import msnotifier.bot.siteMonitor as siteMonitor
import threading
import msnotifier.messenger as messenger
app = Flask(__name__)
app.config['SECRET_KEY'] = 'xDDDDsupresikretKEy'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///notifayy.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# Login Handling
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
# User_ID = Primary Key
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# -------------------------
# - Database Structure -
# ALERT TABLE
# +----+-------------+-----------------+-------------------+---------------+---------------+
# | ID | TITLE (str) | PAGE (url) | DATE_ADDED (date) | USER_ID (key) | APPS_ID (key) |
# +----+-------------+-----------------+-------------------+---------------+---------------+
# | 1 | My Site | http://site.com | 07.06.2020 | 2 | 4 |
# | 2 | (...) | (...) | (...) | (...) | (...) |
# +----+-------------+-----------------+-------------------+---------------+---------------+
# > APPS_ID -> Key, which is: Primary Key in APPS Table
# > USER_ID -> Key, which is: Primary Key in USER Table
# APPS TABLE
# +----+----------------+-----------------+------------------+--------------+
# | ID | Discord (bool) | Telegram (bool) | Messenger (bool) | Email (bool) |
# +----+----------------+-----------------+------------------+--------------+
# | 4 | true | false | true | true |
# | 5 | (...) | (...) | (...) | (...) |
# +----+----------------+-----------------+------------------+--------------+
# > ID -> Primary Key, which is: Referenced by ALERTS TABLE (APPS_ID)
# USER TABLE
# +----+----------------+-----------------------+------------------+--------------------+--------------+
# | ID | Email (str) | Passowrd (str hashed) | Discord_Id (int) | Messenger_Id (str) | Logged (int) |
# +----+----------------+-----------------------+------------------+--------------------+--------------+
# | 2 | cool@gmail.com | <hash> | 21842147 | ??? | 1 |
# | 3 | (...) | (...) | (...) | (...) | |
# +----+----------------+-----------------------+------------------+--------------------+--------------+
# > ID -> Primary Key, which is: Referenced by ALERTS TABLE (USER_ID)
# -------------------------------
# - Database Classes Tables -
class Alert(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
page = db.Column(db.String(100), nullable=False)
date_added = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
user_id = db.Column(db.Integer, nullable=False)
apps_id = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f'Alert # {str(self.id)}'
class ChangesForDiscord(db.Model):
id = db.Column(db.Integer, primary_key=True)
alert_id = db.Column(db.Integer, nullable=False)
content = db.Column(db.String(200), nullable=False)
def __repr__(self):
return f'ChangesForDiscord # {str(self.id)}'
class Apps(db.Model):
id = db.Column(db.Integer, primary_key=True)
discord = db.Column(db.Boolean, nullable=False, default=False)
telegram = db.Column(db.Boolean, nullable=False, default=False)
messenger = db.Column(db.Boolean, nullable=False, default=False)
email = db.Column(db.Boolean, nullable=False, default=False)
def __repr__(self):
return f'Apps # {str(self.id)}. Status (d/t/m): ({str(self.discord)}/{str(self.telegram)}/{str(self.messenger)}/{str(self.email)})'
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
discord_id = db.Column(db.Integer, nullable=True)
messenger_l = db.Column(db.String(100), nullable=True)
messenger_token = db.Column(db.String(100), nullable=True)
telegram_id = db.Column(db.String(100), nullable=True)
logged = db.Column(db.Integer, nullable=True)
def __repr__(self):
return f'User: {str(self.email)}'
def get_items_for_messaging(id):
a=Alert.query.filter_by(id=id).first()
u=User.query.filter_by(id=a.user_id)
bools=Apps.query.filter_by(id=id)
return [a,u,bools]
def add_to_changes(item):
item=ChangesForDiscord(alert_id=item[0],content=item[1])
db.session.add(item)
db.session.commit()
# --------------------------------
# - Helping Functions for DB -
def get_everything(alert_id):
al=Alert.query.filter_by(id=alert_id).first()
user=User.query.filter_by(id=al.user_id).first()
apps=Apps.query.filter_by(id=al.apps_id).first()
return al, user, apps
def allAlerts():
return Alert.query.all()
class Sending(threading.Thread):
def __init__(self,changes):
threading.Thread.__init__(self)
self.changes =changes
def run(self):
for item in self.changes:
# z itema wyciągamy alert_id i content
content=item[1]
alert_id=item[0]
al, user, apps = get_everything(alert_id)
alertwebpage=al.page
mail=apps.email
msng=apps.messenger
discord=apps.discord
if mail==True:
email=user.email
notifier= messenger.mail_chat()
notifier.log_into(email,"")
notifier.message_myself(content,alertwebpage)
if msng==True:
fblogin=user.fb_login
fbpass=user.fb_passw
notifier= messenger.mail_chat()
notifier.log_into(fblogin,fbpass)
notifier.message_myself(content,alertwebpage)
if discord==True:
add_to_changes(item)
class Detecting(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.alerts=[]
def get_all_alerts(self):
return [(i.id, i.page) for i in allAlerts()]
def delete_alert(self,alert_id):
for alert in self.alerts:
if alert[0]==alert_id:
self.alerts.remove(alert)
return 1
return -1
def add_alert(self,alert_id,adr):
self.alerts.append((alert_id,adr))
def run(self):
self.alerts = self.get_all_alerts()
while(True):
tags = ["h1", "h2", "h3", "p"]
changes=siteMonitor.get_diffs_string_format(siteMonitor.get_diffs(tags,[alert[0] for alert in self.alerts],[alert[1] for alert in self.alerts],16),tags)
if len(changes)!=0:
Sending(changes).start()
o=Detecting()
o.start()
def get_alerts():
# Getting current User ID and retrieving his alerts
cur_user_id = session["_user_id"]
all_alerts = Alert.query.filter_by(user_id=cur_user_id).order_by(Alert.date_added).all()
all_apps = get_apps(all_alerts)
# Adding to Alert Object the booleans for apps through apps_id key
for alert in all_alerts:
alert.messenger = all_apps[alert.id].messenger
alert.discord = all_apps[alert.id].discord
alert.telegram = all_apps[alert.id].telegram
alert.email = all_apps[alert.id].email
return all_alerts
def get_alerts_by_id(discordId: str):
all_alerts = Alert.query.filter_by(user_id=discordId).order_by(Alert.date_added).all()
all_apps = get_apps(all_alerts)
for alert in all_alerts:
alert.discord = all_apps[alert.id].discord
return all_alerts
def get_apps(all_alerts):
all_apps = {}
for alert in all_alerts:
all_apps[alert.id] = Apps.query.get(alert.apps_id)
return all_apps
# ---------------------------------------
# - Helping Functions for Site Walk -
def remember_me_handle():
if "_user_id" in session:
if session["remember_me"]:
app.logger.info('User was logged in - printing his site.')
all_alerts = get_alerts()
return render_template('index.html', alerts=all_alerts, emailuser=session['email'])
else:
app.logger.info('User was not logged in - printing landing page.')
return redirect('/index.html')
else:
return render_template('index.html')
def get_bool(string):
if string == "True" or string == "true":
return True
return False
# -----------------------
# - Main HREF Routes -
@app.route('/register', methods=['GET', 'POST'])
def auth():
app.logger.info('Registration Button pressed.')
if request.method == 'POST':
app.logger.info('Method: POST')
user_email = request.form['email']
user_password = request.form['password']
# If this returns then it means that this user exists
user = User.query.filter_by(email=user_email).first()
# If user doesn't exist, redirect back
if user:
flash('Email address already exists')
app.logger.warning("Email adress already exist in the database.")
return redirect('/')
app.logger.info("Succesfully added new user to database.")
# Hashing the Password
password_hashed = sha256_crypt.hash(user_password)
new_user = User(email=user_email, password=password_hashed)
# Add new user to DB
db.session.add(new_user)
db.session.commit()
flash('Registration went all fine! :3 You can now log in!')
return redirect('/')
else:
app.logger.warning("User somehow didn't use Method: POST.")
flash('Something went wrong with sending the registration informations.')
return redirect('/')
@app.route('/login', methods=['POST'])
def login_post():
app.logger.info('Login Button Pressed.')
if request.method == 'POST':
# Get User Informations from Form
user_email = request.form.get('email')
user_password = request.form.get('password')
remember = request.form.get('remember')
user = User.query.filter_by(email=user_email).first()
# Checking if this user exist (doing this and pass check will throw err, if user is not in db, hence no pass)
if not user:
flash("There's no registered account with given email adress.")
app.logger.warning(" User doesn't exist: " + user_email)
return redirect('/')
# --- Password Check
# Info: I'm printing hashed version, but we actually compare the original string with hashed version in db
pass_check = (sha256_crypt.verify(user_password, user.password))
app.logger.info(f"Result of pass check: {pass_check} - (input: {sha256_crypt.hash(user_password)}, db: {user.password})")
# ---
# Verifying Password
if not user or not pass_check:
flash('Please check your login details and try again.')
app.logger.warning("Wrong Credentials" + user_email)
return redirect('/')
app.logger.info("Succesfully logged in user: " + user_email)
# Remember Me Handling (saving in session and in param)
login_user(user, remember=remember)
session["remember_me"] = True if remember else False
session["email"] = user_email
# Apps Quality of Life display if already defined by user
session["disc"] = user.discord_id
session["mess"] = user.messenger_l
session["tele"] = user.telegram_id
if user.discord_id == None:
session["disc"] = ""
if user.messenger_l == None:
session["mess"] = ""
if user.telegram_id == None:
session["tele"] = ""
# Getting Alerts and loading the page for this user
return redirect('/index.html')
else:
return remember_me_handle()
return redirect('/')
@app.route('/alerts', methods=['GET', 'POST'])
def alerts():
if request.method == 'POST':
app.logger.info('Adding New Alert.')
# Creating App Alert
messenger_bool = get_bool(request.form['messenger'])
telegram_bool = get_bool(request.form['telegram'])
discord_bool = get_bool(request.form['discord'])
email_bool = get_bool(request.form['email'])
new_apps_bool = Apps(discord=discord_bool, telegram=telegram_bool, messenger=messenger_bool, email=email_bool)
# First we add the app alert, then flush to retrieve it's unique ID
db.session.add(new_apps_bool)
db.session.flush()
# Creating new Alert
alert_title = request.form['title']
alert_page = request.form['page']
current_user_id = session["_user_id"]
apps_bools_id = new_apps_bool.id
new_alert = Alert(title=alert_title, page=alert_page, user_id=current_user_id, apps_id=apps_bools_id)
db.session.add(new_alert)
db.session.flush()
o.add_alert(new_alert.id,new_alert.page)
db.session.commit()
return redirect('/index.html')
else:
app.logger.info('Loading Landing Page or User Main Page.')
return remember_me_handle()
# --------------------------------
# - Editing / Deleting Alerts -
@app.route('/alerts/delete/<int:id>')
def delete(id):
app.logger.info(f'Deleting Alert with ID: {id}')
alert = Alert.query.get_or_404(id)
db.session.delete(alert)
o.delete_alert(alert.id)
db.session.commit()
return redirect('/index.html')
# Made the alert editing very smooth - everything is handled from mainpage
@app.route('/alerts/edit/<int:id>', methods=['GET', 'POST'])
def edit(id):
app.logger.info(f'Trying to edit Alert with ID: {id}')
# Retrieving the edited Alert from DB
o.delete_alert(id)
alert = Alert.query.get_or_404(id)
apps = Apps.query.get_or_404(alert.apps_id)
if request.method == 'POST':
app.logger.info(f'Editing Alert with ID: {id}')
# Receiving new inputs for this alert
alert.title = request.form['title']
alert.page = request.form['page']
apps.messenger = get_bool(request.form['messenger'])
apps.telegram = get_bool(request.form['telegram'])
apps.discord = get_bool(request.form['discord'])
apps.email = get_bool(request.form['email'])
# Updating the alert in DB
o.add_alert(alert.id, alert.page)
db.session.commit()
app.logger.info(f'Edited Alert with ID: {id}')
return redirect('/index.html')
# -----------------------------------------
# - Linking Discord/Messenger/Telegram -
@app.route('/discord_link', methods=['POST'])
def discord_link():
app.logger.info(f'Trying to link discord id.')
# Retrieving the current User info from DB
user = User.query.get_or_404(session["_user_id"])
if request.method == 'POST':
# Receiving new inputs for this alert
user.discord_id = request.form['discord_id']
session["disc"] = user.discord_id
# Updating the alert in DB
db.session.commit()
app.logger.info(f"Linked Discord for user {session['_user_id']} - id: {user.discord_id}")
return redirect('/index.html')
@app.route('/messenger_link', methods=['POST'])
def messenger_link():
app.logger.info(f'Trying to link messenger credentials.')
user = User.query.get_or_404(session["_user_id"])
if request.method == 'POST':
# Deadline Request Feature
user.messenger_l = request.form['messenger_l']
# It's bad idea to store plain password String in db
# messenger_p variable contains fb password
messenger_p = request.form['messenger_p']
session["mess"] = user.messenger_l
db.session.commit()
app.logger.info(f"Linked Messenger for user {session['_user_id']} - login: {user.messenger_l}")
return redirect('/index.html')
@app.route('/telegram_link', methods=['POST'])
def telegram_link():
app.logger.info(f'Trying to link telegram id.')
user = User.query.get_or_404(session["_user_id"])
if request.method == 'POST':
user.telegram_id = request.form['telegram_id']
session["tele"] = user.telegram_id
db.session.commit()
app.logger.info(f"Linked Telegram for user {session['_user_id']} - id: {user.telegram_id}")
return redirect('/index.html')
# ------------------------------------
# - HREF for Mainpage and Logout -
@app.route('/')
def index():
app.logger.info('Landing Page Visited.')
return remember_me_handle()
@app.route('/index.html', methods=['GET', 'POST'])
def go_home():
all_alerts = get_alerts()
return render_template('index.html', alerts=all_alerts, emailuser=session['email'], discsaved=session["disc"], messsaved=session["mess"], telesaved=session["tele"])
@app.route('/logout', methods=['GET', 'POST'])
def logout():
app.logger.info(f"User is logging out: {session['email']}")
logout_user()
return redirect('/')
@app.route('/changes', methods=['GET'])
def changes():
change = ChangesForDiscord.query.first()
if change is None:
return jsonify({'change': '', 'title': '', 'page': '', 'discid': -1})
db.session.delete(change)
db.session.commit()
alrt = Alert.query.filter_by(id = change.alert_id).first()
usr = User.query.filter_by(id = alrt.user_id).first()
if usr is None:
return jsonify({'change': '', 'title': '', 'page': '', 'discid': -1})
result = jsonify({'change': change.content, 'title': alrt.title, 'page': alrt.page, 'discid': usr.discord_id})
return result
if __name__ == "__main__":
app.run(debug=True)
# ===== Notice Info 04-06-2020
# First of all, password encryption was added, so:
# > pip install passlib (507kb, guys)
#
# Keep in mind that expanding on existing models in DB
# Will caues error due to unexisting columns, so:
# Navigate to ./web (here's the app.py)
# > python
# > from app import db
# > db.reflect()
# > db.drop_all()
# > db.create_all()
# ===== Notice Info 05-06-2020
# To surprass all these annoying false-positive warnings with
# db.* and logger.*, just do this:
# > pip install pylint-flask (10 KB)
# Then in .vscode/settings.json (if you are using vscode), add:
# > "python.linting.pylintArgs": ["--load-plugins", "pylint-flask"]
# ===== Notice Info 06-06-2020
# > app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# ^ This is for the FSADeprecationWarning (adds significant overhead)
# and will be disabled by default in the future anyway
#
# Cleaned up this code a bit, and made it more visual and easy to read
# Added linking functionality for all buttons, so you can do w/e you want
# with them right now. Also added email bool for alerts
| 36.768642
| 168
| 0.5974
| 2,402
| 19,230
| 4.643214
| 0.166528
| 0.014525
| 0.019726
| 0.015063
| 0.358379
| 0.24944
| 0.194298
| 0.127141
| 0.083834
| 0.061777
| 0
| 0.008439
| 0.223557
| 19,230
| 523
| 169
| 36.768642
| 0.738531
| 0.249246
| 0
| 0.257764
| 0
| 0.006211
| 0.156078
| 0.020782
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102484
| false
| 0.034161
| 0.02795
| 0.021739
| 0.341615
| 0.006211
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd9f2efde7a1d0ab6cab653c4c31ffe2c9cae398
| 5,741
|
py
|
Python
|
odmlui/helpers.py
|
mpsonntag/odml-ui
|
bd1ba1b5a04e4409d1f5b05fc491411963ded1fd
|
[
"BSD-3-Clause"
] | 3
|
2017-03-06T17:00:45.000Z
|
2020-05-05T20:59:28.000Z
|
odmlui/helpers.py
|
mpsonntag/odml-ui
|
bd1ba1b5a04e4409d1f5b05fc491411963ded1fd
|
[
"BSD-3-Clause"
] | 138
|
2017-02-27T17:08:32.000Z
|
2021-02-10T14:06:45.000Z
|
odmlui/helpers.py
|
mpsonntag/odml-ui
|
bd1ba1b5a04e4409d1f5b05fc491411963ded1fd
|
[
"BSD-3-Clause"
] | 7
|
2017-03-07T06:39:18.000Z
|
2020-04-19T12:54:51.000Z
|
"""
The 'helpers' module provides various helper functions.
"""
import getpass
import json
import os
import subprocess
import sys
from odml import fileio
from odml.dtypes import default_values
from odml.tools.parser_utils import SUPPORTED_PARSERS
from .treemodel import value_model
try: # Python 3
from urllib.parse import urlparse, unquote, urljoin
from urllib.request import pathname2url
except ImportError: # Python 2
from urlparse import urlparse, urljoin
from urllib import unquote, pathname2url
def uri_to_path(uri):
"""
*uri_to_path* parses a uri into a OS specific file path.
:param uri: string containing a uri.
:return: OS specific file path.
"""
net_locator = urlparse(uri).netloc
curr_path = unquote(urlparse(uri).path)
file_path = os.path.join(net_locator, curr_path)
# Windows specific file_path handling
if os.name == "nt" and file_path.startswith("/"):
file_path = file_path[1:]
return file_path
def path_to_uri(path):
"""
Converts a passed *path* to a URI GTK can handle and returns it.
"""
uri = pathname2url(path)
uri = urljoin('file:', uri)
return uri
def get_extension(path):
"""
Returns the upper case file extension of a file
referenced by a passed *path*.
"""
ext = os.path.splitext(path)[1][1:]
ext = ext.upper()
return ext
def get_parser_for_uri(uri):
"""
Sanitize the given path, and also return the
odML parser to be used for the given path.
"""
path = uri_to_path(uri)
parser = get_extension(path)
if parser not in SUPPORTED_PARSERS:
parser = 'XML'
return parser
def get_parser_for_file_type(file_type):
"""
Checks whether a provided file_type is supported by the currently
available odML parsers.
Returns either the identified parser or XML as the fallback parser.
"""
parser = file_type.upper()
if parser not in SUPPORTED_PARSERS:
parser = 'XML'
return parser
def handle_section_import(section):
"""
Augment all properties of an imported section according to odml-ui needs.
:param section: imported odml.BaseSection
"""
for prop in section.properties:
handle_property_import(prop)
# Make sure properties down the rabbit hole are also treated.
for sec in section.sections:
handle_section_import(sec)
def handle_property_import(prop):
"""
Every odml-ui property requires at least one default value according
to its dtype, otherwise the property is currently broken.
Further the properties are augmented with 'pseudo_values' which need to be
initialized and added to each property.
:param prop: imported odml.BaseProperty
"""
if len(prop.values) < 1:
if prop.dtype:
prop.values = [default_values(prop.dtype)]
else:
prop.values = [default_values('string')]
create_pseudo_values([prop])
def create_pseudo_values(odml_properties):
"""
Creates a treemodel.Value for each value in an
odML Property and appends the resulting list
as *pseudo_values* to the passed odML Property.
"""
for prop in odml_properties:
values = prop.values
new_values = []
for index in range(len(values)):
val = value_model.Value(prop, index)
new_values.append(val)
prop.pseudo_values = new_values
def get_conda_root():
"""
Checks for an active Anaconda environment.
:return: Either the root of an active Anaconda environment or an empty string.
"""
# Try identifying conda the easy way
if "CONDA_PREFIX" in os.environ:
return os.environ["CONDA_PREFIX"]
# Try identifying conda the hard way
try:
conda_json = subprocess.check_output("conda info --json",
shell=True, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as exc:
print("[Info] Conda check: %s" % exc)
return ""
if sys.version_info.major > 2:
conda_json = conda_json.decode("utf-8")
dec = json.JSONDecoder()
try:
root_path = dec.decode(conda_json)['default_prefix']
except ValueError as exc:
print("[Info] Conda check: %s" % exc)
return ""
if sys.version_info.major < 3:
root_path = str(root_path)
return root_path
def run_odmltables(file_uri, save_dir, odml_doc, odmltables_wizard):
"""
Saves an odML document to a provided folder with the file
ending '.odml' in format 'XML' to ensure an odmltables
supported file. It then executes odmltables with the provided wizard
and the created file.
:param file_uri: File URI of the odML document that is handed over to
odmltables.
:param save_dir: Directory where the temporary file is saved to.
:param odml_doc: An odML document.
:param odmltables_wizard: supported values are 'compare', 'convert',
'filter' and 'merge'.
"""
tail = os.path.split(uri_to_path(file_uri))[1]
tmp_file = os.path.join(save_dir, ("%s.odml" % tail))
fileio.save(odml_doc, tmp_file)
try:
subprocess.Popen(['odmltables', '-w', odmltables_wizard, '-f', tmp_file])
except Exception as exc:
print("[Warning] Error running odml-tables: %s" % exc)
def get_username():
"""
:return: Full name or username of the current user
"""
username = getpass.getuser()
try:
# this only works on linux
import pwd
fullname = pwd.getpwnam(username).pw_gecos
if fullname:
username = fullname
except ImportError:
pass
return username.rstrip(",")
| 27.338095
| 82
| 0.659815
| 764
| 5,741
| 4.83377
| 0.315445
| 0.01733
| 0.009748
| 0.006499
| 0.058489
| 0.058489
| 0.058489
| 0.058489
| 0.058489
| 0.058489
| 0
| 0.003042
| 0.25553
| 5,741
| 209
| 83
| 27.4689
| 0.86102
| 0.351507
| 0
| 0.171717
| 0
| 0
| 0.053592
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.030303
| 0.20202
| 0
| 0.414141
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fd9fb12d6a255983397f47fb91d956bce471d4bb
| 3,511
|
py
|
Python
|
mindspore/dataset/transforms/c_transforms.py
|
Xylonwang/mindspore
|
ea37dc76f0a8f0b10edd85c2ad545af44552af1e
|
[
"Apache-2.0"
] | 1
|
2020-06-17T07:05:45.000Z
|
2020-06-17T07:05:45.000Z
|
mindspore/dataset/transforms/c_transforms.py
|
Xylonwang/mindspore
|
ea37dc76f0a8f0b10edd85c2ad545af44552af1e
|
[
"Apache-2.0"
] | null | null | null |
mindspore/dataset/transforms/c_transforms.py
|
Xylonwang/mindspore
|
ea37dc76f0a8f0b10edd85c2ad545af44552af1e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module c_transforms provides common operations, including OneHotOp and TypeCast.
"""
import numpy as np
import mindspore._c_dataengine as cde
from .validators import check_num_classes, check_de_type, check_fill_value, check_slice_op
from ..core.datatypes import mstype_to_detype
class OneHot(cde.OneHotOp):
"""
Tensor operation to apply one hot encoding.
Args:
num_classes (int): Number of classes of the label.
"""
@check_num_classes
def __init__(self, num_classes):
self.num_classes = num_classes
super().__init__(num_classes)
class Fill(cde.FillOp):
"""
Tensor operation to create a tensor filled with passed scalar value.
The output tensor will have the same shape and type as the input tensor.
Args:
fill_value (python types (str, int, float, or bool)) : scalar value
to fill created tensor with.
"""
@check_fill_value
def __init__(self, fill_value):
print(fill_value)
super().__init__(cde.Tensor(np.array(fill_value)))
class TypeCast(cde.TypeCastOp):
"""
Tensor operation to cast to a given MindSpore data type.
Args:
data_type (mindspore.dtype): mindspore.dtype to be casted to.
"""
@check_de_type
def __init__(self, data_type):
data_type = mstype_to_detype(data_type)
self.data_type = str(data_type)
super().__init__(data_type)
class Slice(cde.SliceOp):
"""
Slice operation to extract a tensor out using the given n slices.
The functionality of Slice is similar to NumPy indexing feature.
(Currently only rank 1 Tensors are supported)
Args:
*slices: Maximum n number of objects to slice a tensor of rank n.
One object in slices can be one of:
1. int: slice this index only. Negative index is supported.
2. slice object: slice the generated indices from the slice object. Similar to `start:stop:step`.
3. None: slice the whole dimension. Similar to `:` in python indexing.
4. Ellipses ...: slice all dimensions between the two slices.
Examples:
>>> # Data before
>>> # | col |
>>> # +---------+
>>> # | [1,2,3] |
>>> # +---------|
>>> data = data.map(operations=Slice(slice(1,3))) # slice indices 1 and 2 only
>>> # Data after
>>> # | col |
>>> # +------------+
>>> # | [1,2] |
>>> # +------------|
"""
@check_slice_op
def __init__(self, *slices):
dim0 = slices[0]
if isinstance(dim0, int):
dim0 = [dim0]
elif dim0 is None:
dim0 = True
elif isinstance(dim0, slice):
dim0 = (dim0.start, dim0.stop, dim0.step)
elif dim0 is Ellipsis:
dim0 = True
super().__init__(dim0)
| 31.630631
| 111
| 0.621191
| 457
| 3,511
| 4.617068
| 0.407002
| 0.030332
| 0.020853
| 0.015166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014128
| 0.254059
| 3,511
| 110
| 112
| 31.918182
| 0.791523
| 0.608659
| 0
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.121212
| 0
| 0.363636
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fda32d9ef88615859faf1c308e9468dde8a656a0
| 2,264
|
py
|
Python
|
cgatpipelines/tools/pipeline_docs/pipeline_rrbs/trackers/rrbsReport.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 49
|
2015-04-13T16:49:25.000Z
|
2022-03-29T10:29:14.000Z
|
cgatpipelines/tools/pipeline_docs/pipeline_rrbs/trackers/rrbsReport.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 252
|
2015-04-08T13:23:34.000Z
|
2019-03-18T21:51:29.000Z
|
cgatpipelines/tools/pipeline_docs/pipeline_rrbs/trackers/rrbsReport.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 22
|
2015-05-21T00:37:52.000Z
|
2019-09-25T05:04:27.000Z
|
import re
from CGATReport.Tracker import *
from CGATReport.Utils import PARAMS as P
# get from config file
UCSC_DATABASE = "hg19"
EXPORTDIR = "export"
###################################################################
###################################################################
###################################################################
###################################################################
# Run configuration script
EXPORTDIR = P.get('exome_exportdir', P.get('exportdir', 'export'))
DATADIR = P.get('exome_datadir', P.get('datadir', '.'))
DATABASE = P.get('exome_backend', P.get('sql_backend', 'sqlite:///./csvdb'))
TRACKS = ['WTCHG_10997_01', 'WTCHG_10997_02']
###########################################################################
def splitLocus(locus):
if ".." in locus:
contig, start, end = re.match("(\S+):(\d+)\.\.(\d+)", locus).groups()
elif "-" in locus:
contig, start, end = re.match("(\S+):(\d+)\-(\d+)", locus).groups()
return contig, int(start), int(end)
def linkToUCSC(contig, start, end):
'''build URL for UCSC.'''
ucsc_database = UCSC_DATABASE
link = "`%(contig)s:%(start)i..%(end)i <http://genome.ucsc.edu/cgi-bin/hgTracks?db=%(ucsc_database)s&position=%(contig)s:%(start)i..%(end)i>`_" \
% locals()
return link
###########################################################################
class RrbsTracker(TrackerSQL):
'''Define convenience tracks for plots'''
def __init__(self, *args, **kwargs):
TrackerSQL.__init__(self, *args, backend=DATABASE, **kwargs)
class SingleTableHistogram(TrackerSQL):
columns = None
table = None
group_by = None
def __init__(self, *args, **kwargs):
TrackerSQL.__init__(self, *args, **kwargs)
def __call__(self, track, slice=None):
data = self.getAll("SELECT %(group_by)s, %(columns)s FROM %(table)s")
return data
class imagesTracker(TrackerImages):
'''Convience Tracker for globbing images for gallery plot'''
def __init__(self, *args, **kwargs):
Tracker.__init__(self, *args, **kwargs)
if "glob" not in kwargs:
raise ValueError("TrackerImages requires a:glob: parameter")
self.glob = kwargs["glob"]
| 31.444444
| 149
| 0.518993
| 233
| 2,264
| 4.858369
| 0.416309
| 0.024735
| 0.063604
| 0.079505
| 0.198763
| 0.180212
| 0.150177
| 0.150177
| 0.150177
| 0.074205
| 0
| 0.008368
| 0.155477
| 2,264
| 71
| 150
| 31.887324
| 0.583682
| 0.069346
| 0
| 0.078947
| 0
| 0.026316
| 0.239234
| 0.017943
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.078947
| 0
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fda439b250b37d77743740f40f14e6a0ae152586
| 512
|
py
|
Python
|
leetcode/python/985_sum_of_even_number_after_queries.py
|
VVKot/leetcode-solutions
|
7d6e599b223d89a7861929190be715d3b3604fa4
|
[
"MIT"
] | 4
|
2019-04-22T11:57:36.000Z
|
2019-10-29T09:12:56.000Z
|
leetcode/python/985_sum_of_even_number_after_queries.py
|
VVKot/coding-competitions
|
7d6e599b223d89a7861929190be715d3b3604fa4
|
[
"MIT"
] | null | null | null |
leetcode/python/985_sum_of_even_number_after_queries.py
|
VVKot/coding-competitions
|
7d6e599b223d89a7861929190be715d3b3604fa4
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def sumEvenAfterQueries(self,
A: List[int],
queries: List[List[int]]) -> List[int]:
even_sum = sum(num for num in A if not num & 1)
result = []
for val, idx in queries:
if not A[idx] & 1:
even_sum -= A[idx]
A[idx] += val
if not A[idx] & 1:
even_sum += A[idx]
result.append(even_sum)
return result
| 26.947368
| 67
| 0.451172
| 63
| 512
| 3.603175
| 0.396825
| 0.088106
| 0.052863
| 0.079295
| 0.185022
| 0.185022
| 0.185022
| 0.185022
| 0.185022
| 0
| 0
| 0.010676
| 0.451172
| 512
| 18
| 68
| 28.444444
| 0.797153
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fda704c8c3598728280ac25d245978289f33459f
| 1,540
|
py
|
Python
|
camera/start.py
|
IbrahimAhmad65/pythonApp
|
76c6e2a6de48d34b034bfc0e045cc345b90bf45c
|
[
"MIT"
] | null | null | null |
camera/start.py
|
IbrahimAhmad65/pythonApp
|
76c6e2a6de48d34b034bfc0e045cc345b90bf45c
|
[
"MIT"
] | null | null | null |
camera/start.py
|
IbrahimAhmad65/pythonApp
|
76c6e2a6de48d34b034bfc0e045cc345b90bf45c
|
[
"MIT"
] | null | null | null |
#/bin/python3
import numpy as np
from PIL import Image
def processbad(array):
#arr = np.zeros([array.size(),array[0].size(),array[0][0].size])
arr = np.zeros([int(np.size(array)/8),
int(np.size(array[0])/8),3],
dtype=np.byte)
# print (arr)
counter = 0
count = 0
for i in array:
for b in i:
array[counter][count][0] = b[0]
array[counter][count][1] = b[1]
array[counter][count][2] = b[2]
count +=1
counter +=1
image = Image.fromarray(arr)
return image
def process(img, red, green, blue):
array = np.array(img)# [widthxheightxpixels]
r = array[:,:,0]
g = array[:,:,1]
b = array[:,:,2]
return np.logical_and(np.logical_not(np.ma.masked_equal(r, red).mask), np.logical_and(np.logical_not(np.ma.masked_equal(b, blue).mask), (np.logical_not(np.ma.masked_equal(g, green).mask))))
#return np.ma.masked_equal(r,0)
counter = 0
count = 0
for i in array:
for b in i:
if(b[0] < 1 and b[1] < 1 and b[2] < 1):
array[counter][count] = [255,255,255,255]
#print(b)
else:
array[counter][count] = [0,0,0,255]
count +=1
counter +=1
count = 0
image = Image.fromarray(array)
return image
img = Image.open('checker.png')
#array = 255 - array
#invimg = Image.fromarray(array)
#invimg.save('testgrey-inverted.png')
img = Image.fromarray(process(img,0,0,0))
img.save("newchecker.png")
| 26.101695
| 193
| 0.555844
| 231
| 1,540
| 3.666667
| 0.251082
| 0.011806
| 0.100354
| 0.070838
| 0.219599
| 0.199528
| 0.199528
| 0.167651
| 0.167651
| 0.167651
| 0
| 0.051121
| 0.275974
| 1,540
| 58
| 194
| 26.551724
| 0.70852
| 0.153896
| 0
| 0.384615
| 0
| 0
| 0.01932
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.051282
| 0
| 0.179487
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fda7b7ab5e740804c9088eb3b79d539461e5afae
| 1,290
|
py
|
Python
|
esque/cli/commands/edit/topic.py
|
real-digital/esque
|
0b779fc308ce8bce45c1903f36c33664b2e832e7
|
[
"MIT"
] | 29
|
2019-05-10T21:12:38.000Z
|
2021-08-24T08:09:49.000Z
|
esque/cli/commands/edit/topic.py
|
real-digital/esque
|
0b779fc308ce8bce45c1903f36c33664b2e832e7
|
[
"MIT"
] | 103
|
2019-05-17T07:21:41.000Z
|
2021-12-02T08:29:00.000Z
|
esque/cli/commands/edit/topic.py
|
real-digital/esque
|
0b779fc308ce8bce45c1903f36c33664b2e832e7
|
[
"MIT"
] | 2
|
2019-05-28T06:45:14.000Z
|
2019-11-21T00:33:15.000Z
|
import click
from esque import validation
from esque.cli.autocomplete import list_topics
from esque.cli.helpers import edit_yaml, ensure_approval
from esque.cli.options import State, default_options
from esque.cli.output import pretty_topic_diffs
from esque.resources.topic import copy_to_local
@click.command("topic")
@click.argument("topic-name", required=True, autocompletion=list_topics)
@default_options
def edit_topic(state: State, topic_name: str):
"""Edit a topic.
Open the topic's configuration in the default editor. If the user saves upon exiting the editor,
all the given changes will be applied to the topic.
"""
controller = state.cluster.topic_controller
topic = state.cluster.topic_controller.get_cluster_topic(topic_name)
_, new_conf = edit_yaml(topic.to_yaml(only_editable=True), validator=validation.validate_editable_topic_config)
local_topic = copy_to_local(topic)
local_topic.update_from_dict(new_conf)
diff = controller.diff_with_cluster(local_topic)
if not diff.has_changes:
click.echo("Nothing changed.")
return
click.echo(pretty_topic_diffs({topic_name: diff}))
if ensure_approval("Are you sure?"):
controller.alter_configs([local_topic])
else:
click.echo("Canceled!")
| 34.864865
| 115
| 0.762016
| 182
| 1,290
| 5.17033
| 0.428571
| 0.057386
| 0.05101
| 0.057386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151938
| 1,290
| 36
| 116
| 35.833333
| 0.860146
| 0.126357
| 0
| 0
| 0
| 0
| 0.047834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.28
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fda8d40081b9fb4ec44129fb7abfaa7410ce0508
| 9,535
|
py
|
Python
|
robocorp-python-ls-core/src/robocorp_ls_core/pluginmanager.py
|
anton264/robotframework-lsp
|
6f8f89b88ec56b767f6d5e9cf0d3fb58847e5844
|
[
"ECL-2.0",
"Apache-2.0"
] | 92
|
2020-01-22T22:15:29.000Z
|
2022-03-31T05:19:16.000Z
|
robocorp-python-ls-core/src/robocorp_ls_core/pluginmanager.py
|
anton264/robotframework-lsp
|
6f8f89b88ec56b767f6d5e9cf0d3fb58847e5844
|
[
"ECL-2.0",
"Apache-2.0"
] | 604
|
2020-01-25T17:13:27.000Z
|
2022-03-31T18:58:24.000Z
|
robocorp-python-ls-core/src/robocorp_ls_core/pluginmanager.py
|
anton264/robotframework-lsp
|
6f8f89b88ec56b767f6d5e9cf0d3fb58847e5844
|
[
"ECL-2.0",
"Apache-2.0"
] | 39
|
2020-02-06T00:38:06.000Z
|
2022-03-15T06:14:19.000Z
|
# Original work Copyright 2018 Brainwy Software Ltda (Dual Licensed: LGPL / Apache 2.0)
# From https://github.com/fabioz/pyvmmonitor-core
# See ThirdPartyNotices.txt in the project root for license information.
# All modifications Copyright (c) Robocorp Technologies Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines a PluginManager (which doesn't really have plugins, only a registry of extension points
and implementations for such extension points).
To use, create the extension points you want (any class starting with 'EP') and register
implementations for those.
I.e.:
pm = PluginManager()
pm.register(EPFoo, FooImpl, keep_instance=True)
pm.register(EPBar, BarImpl, keep_instance=False)
Then, later, to use it, it's possible to ask for instances through the PluginManager API:
foo_instances = pm.get_implementations(EPFoo) # Each time this is called, new
# foo_instances will be created
bar_instance = pm.get_instance(EPBar) # Each time this is called, the same bar_instance is returned.
Alternatively, it's possible to use a decorator to use a dependency injection pattern -- i.e.:
don't call me, I'll call you ;)
@inject(foo_instance=EPFoo, bar_instances=[EPBar])
def m1(foo_instance, bar_instances, pm):
for bar in bar_instances:
...
foo_instance.foo
"""
import functools
from pathlib import Path
from typing import TypeVar, Any, Dict, Type, Tuple, Optional, Union
def execfile(file, glob=None, loc=None):
import tokenize
with tokenize.open(file) as stream:
contents = stream.read()
exec(compile(contents + "\n", file, "exec"), glob, loc)
class NotInstanceError(RuntimeError):
pass
class NotRegisteredError(RuntimeError):
pass
class InstanceAlreadyRegisteredError(RuntimeError):
pass
T = TypeVar("T")
class PluginManager(object):
"""
This is a manager of plugins (which we refer to extension points and implementations).
Mostly, we have a number of EPs (Extension Points) and implementations may be registered
for those extension points.
The PluginManager is able to provide implementations (through #get_implementations) which are
not kept on being tracked and a special concept which keeps an instance alive for an extension
(through #get_instance).
"""
def __init__(self) -> None:
self._ep_to_impls: Dict[Type, list] = {}
self._ep_to_instance_impls: Dict[Tuple[Type, Optional[str]], list] = {}
self._ep_to_context_to_instance: Dict[Type, dict] = {}
self._name_to_ep: Dict[str, Type] = {}
self.exited = False
def load_plugins_from(self, directory: Path) -> int:
found_files_with_plugins = 0
filepath: Path
for filepath in directory.iterdir():
if filepath.is_file() and filepath.name.endswith(".py"):
namespace: dict = {"__file__": str(filepath)}
execfile(str(filepath), glob=namespace, loc=namespace)
register_plugins = namespace.get("register_plugins")
if register_plugins is not None:
found_files_with_plugins += 1
register_plugins(self)
return found_files_with_plugins
# This should be:
# def get_implementations(self, ep: Type[T]) -> List[T]:
# But isn't due to: https://github.com/python/mypy/issues/5374
def get_implementations(self, ep: Union[Type, str]) -> list:
assert not self.exited
if isinstance(ep, str):
ep = self._name_to_ep[ep]
impls = self._ep_to_impls.get(ep, [])
ret = []
for class_, kwargs in impls:
instance = class_(**kwargs)
ret.append(instance)
return ret
def register(
self,
ep: Type,
impl,
kwargs: Optional[dict] = None,
context: Optional[str] = None,
keep_instance: bool = False,
):
"""
:param ep:
:param str impl:
This is the full path to the class implementation.
:param kwargs:
:param context:
If keep_instance is True, it's possible to register it for a given
context.
:param keep_instance:
If True, it'll be only available through pm.get_instance and the
instance will be kept for further calls.
If False, it'll only be available through get_implementations.
"""
if kwargs is None:
kwargs = {}
assert not self.exited
if isinstance(ep, str):
raise ValueError("Expected the actual EP class to be passed.")
self._name_to_ep[ep.__name__] = ep
if keep_instance:
ep_to_instance_impls = self._ep_to_instance_impls
impls = ep_to_instance_impls.get((ep, context))
if impls is None:
impls = ep_to_instance_impls[(ep, context)] = []
else:
raise InstanceAlreadyRegisteredError(
"Unable to override when instance is kept and an implementation "
"is already registered."
)
else:
ep_to_impl = self._ep_to_impls
impls = ep_to_impl.get(ep)
if impls is None:
impls = ep_to_impl[ep] = []
impls.append((impl, kwargs))
def set_instance(self, ep: Type, instance, context=None) -> None:
if isinstance(ep, str):
raise ValueError("Expected the actual EP class to be passed.")
self._name_to_ep[ep.__name__] = ep
instances = self._ep_to_context_to_instance.get(ep)
if instances is None:
instances = self._ep_to_context_to_instance[ep] = {}
instances[context] = instance
def iter_existing_instances(self, ep: Union[Type, str]):
if isinstance(ep, str):
ep = self._name_to_ep[ep]
return self._ep_to_context_to_instance[ep].values()
def has_instance(self, ep: Union[Type, str], context=None):
if isinstance(ep, str):
ep_cls = self._name_to_ep.get(ep)
if ep_cls is None:
return False
try:
self.get_instance(ep, context)
return True
except NotRegisteredError:
return False
# This should be:
# def get_instance(self, ep: Type[T], context=None) -> T:
# But isn't due to: https://github.com/python/mypy/issues/5374
def get_instance(self, ep: Union[Type, str], context: Optional[str] = None) -> Any:
"""
Creates an instance in this plugin manager: Meaning that whenever a new EP is asked in
the same context it'll receive the same instance created previously (and it'll be
kept alive in the plugin manager).
"""
if self.exited:
raise AssertionError("PluginManager already exited")
if isinstance(ep, str):
ep = self._name_to_ep[ep]
try:
return self._ep_to_context_to_instance[ep][context]
except KeyError:
try:
impls = self._ep_to_instance_impls[(ep, context)]
except KeyError:
found = False
if context is not None:
found = True
try:
impls = self._ep_to_instance_impls[(ep, None)]
except KeyError:
found = False
if not found:
if ep in self._ep_to_impls:
# Registered but not a kept instance.
raise NotInstanceError()
else:
# Not registered at all.
raise NotRegisteredError()
assert len(impls) == 1
class_, kwargs = impls[0]
instances = self._ep_to_context_to_instance.get(ep)
if instances is None:
instances = self._ep_to_context_to_instance[ep] = {}
ret = instances[context] = class_(**kwargs)
return ret
__getitem__ = get_instance
def exit(self):
self.exited = True
self._ep_to_context_to_instance.clear()
self._ep_to_impls.clear()
def inject(**inject_kwargs):
def decorator(func):
@functools.wraps(func)
def inject_dec(*args, **kwargs):
pm = kwargs.get("pm")
if pm is None:
raise AssertionError(
"pm argument with PluginManager not passed (required for @inject)."
)
for key, val in inject_kwargs.items():
if key not in kwargs:
if val.__class__ is list:
kwargs[key] = pm.get_implementations(val[0])
else:
kwargs[key] = pm.get_instance(val)
return func(*args, **kwargs)
return inject_dec
return decorator
| 35.446097
| 100
| 0.607971
| 1,184
| 9,535
| 4.734797
| 0.236486
| 0.026757
| 0.02426
| 0.021406
| 0.219765
| 0.169105
| 0.150375
| 0.130753
| 0.102212
| 0.102212
| 0
| 0.003644
| 0.309177
| 9,535
| 268
| 101
| 35.578358
| 0.847427
| 0.338332
| 0
| 0.288591
| 0
| 0
| 0.048852
| 0
| 0
| 0
| 0
| 0
| 0.033557
| 1
| 0.087248
| false
| 0.040268
| 0.026846
| 0
| 0.221477
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdac411261e3837a075f2bf9d23c9a72e80c187a
| 459
|
py
|
Python
|
code/data_owner_1/get_connection.py
|
ClarkYan/msc-thesis
|
c4fbd901c2664aa7140e5e82fb322ed0f578761a
|
[
"Apache-2.0"
] | 7
|
2017-11-05T08:22:51.000Z
|
2021-09-14T19:34:30.000Z
|
code/data_owner_1/get_connection.py
|
ClarkYan/msc-thesis
|
c4fbd901c2664aa7140e5e82fb322ed0f578761a
|
[
"Apache-2.0"
] | 1
|
2021-02-27T07:24:50.000Z
|
2021-04-24T03:29:12.000Z
|
code/data_owner_1/get_connection.py
|
ClarkYan/msc-thesis
|
c4fbd901c2664aa7140e5e82fb322ed0f578761a
|
[
"Apache-2.0"
] | 3
|
2019-04-15T03:22:22.000Z
|
2022-03-12T11:27:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Author: ClarkYAN -*-
import requests
def set_up_connection(url, sender):
# files = {'dataset': open(filename, 'rb')}
user_info = {'name': sender}
r = requests.post(url, data=user_info, headers={'Connection': 'close'})
if r.text == "success":
conn_result = sender, "connect to the cloud"
else:
conn_result = sender, "cannot connect to the cloud"
return conn_result
| 27
| 75
| 0.625272
| 59
| 459
| 4.745763
| 0.711864
| 0.107143
| 0.114286
| 0.121429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00277
| 0.213508
| 459
| 16
| 76
| 28.6875
| 0.772853
| 0.237473
| 0
| 0
| 0
| 0
| 0.210983
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdb047a38cb8eadeccbc08314dec72d4acb12c4f
| 1,820
|
py
|
Python
|
tools/malloc-exp/plot-histogram.py
|
scottviteri/verified-betrfs
|
7af56c8acd943880cb19ba16d146c6a206101d9b
|
[
"BSD-2-Clause"
] | 15
|
2021-05-11T09:19:12.000Z
|
2022-03-14T10:39:05.000Z
|
tools/malloc-exp/plot-histogram.py
|
scottviteri/verified-betrfs
|
7af56c8acd943880cb19ba16d146c6a206101d9b
|
[
"BSD-2-Clause"
] | 3
|
2021-06-07T21:45:13.000Z
|
2021-11-29T23:19:59.000Z
|
tools/malloc-exp/plot-histogram.py
|
scottviteri/verified-betrfs
|
7af56c8acd943880cb19ba16d146c6a206101d9b
|
[
"BSD-2-Clause"
] | 7
|
2021-05-11T17:08:04.000Z
|
2022-02-23T07:19:36.000Z
|
#!/usr/bin/env python3
# Copyright 2018-2021 VMware, Inc., Microsoft Inc., Carnegie Mellon University, ETH Zurich, and University of Washington
# SPDX-License-Identifier: BSD-2-Clause
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import re
#import json
def parse_one_histogram(line):
#return json.loads(line)
assert line[0] == "{"
assert line[-2:] == "}\n"
line = line[1:-2]
pairs = line.split(",")[:-1]
histo = {}
for pair in pairs:
size,count = map(int, pair.split(":"))
if count>0:
histo[size] = count
return histo
def cdf(histo, by_size):
sizes = list(histo.keys())
sizes.sort()
xs = []
ys = []
accum = 0
for size in sizes:
count = histo[size]
accum += count * size if by_size else count
xs.append(size)
ys.append(accum)
#print(xs)
# normalize ys to 0..1
ys = [y/float(accum) for y in ys]
#print(ys)
return xs, ys
def parse():
t = 0
proc_heap = {}
malloc_total = {}
histos = {}
for line in open("malloc-exp/histograms", "r").readlines():
if line.startswith("proc-heap"):
fields = line.split()
proc_heap[t] = int(fields[1])
malloc_total[t] = int(fields[3])
t += 1
if line.startswith("{"):
histos[t] = parse_one_histogram(line)
max_histo_t = max(histos.keys())
print(max_histo_t)
max_histo = histos[max_histo_t]
print(max_histo)
# accumulate the CDF
line, = plt.plot(*cdf(max_histo, True))
line.set_label("by size")
line, = plt.plot(*cdf(max_histo, False))
line.set_label("by allocation count")
plt.xscale("log")
plt.legend()
plt.savefig("malloc-exp/size-cdf.png")
#plt.show()
parse()
| 24.594595
| 120
| 0.581868
| 251
| 1,820
| 4.131474
| 0.398406
| 0.054002
| 0.026037
| 0.040501
| 0.04243
| 0.04243
| 0
| 0
| 0
| 0
| 0
| 0.017424
| 0.274725
| 1,820
| 73
| 121
| 24.931507
| 0.768182
| 0.153846
| 0
| 0
| 0
| 0
| 0.058824
| 0.028758
| 0
| 0
| 0
| 0
| 0.037736
| 1
| 0.056604
| false
| 0
| 0.075472
| 0
| 0.169811
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdb2d4c9f5001e0fbebe90b3cb11e75763d20dd3
| 1,735
|
py
|
Python
|
tests/extensions/aria_extension_tosca/conftest.py
|
tnadeau/incubator-ariatosca
|
de32028783969bc980144afa3c91061c7236459c
|
[
"Apache-2.0"
] | null | null | null |
tests/extensions/aria_extension_tosca/conftest.py
|
tnadeau/incubator-ariatosca
|
de32028783969bc980144afa3c91061c7236459c
|
[
"Apache-2.0"
] | null | null | null |
tests/extensions/aria_extension_tosca/conftest.py
|
tnadeau/incubator-ariatosca
|
de32028783969bc980144afa3c91061c7236459c
|
[
"Apache-2.0"
] | 1
|
2020-06-16T15:13:06.000Z
|
2020-06-16T15:13:06.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyTest configuration module.
Add support for a "--tosca-parser" CLI option.
For more information on PyTest hooks, see the `PyTest documentation
<https://docs.pytest.org/en/latest/writing_plugins.html#pytest-hook-reference>`__.
"""
import pytest
from ...mechanisms.parsing.aria import AriaParser
def pytest_addoption(parser):
parser.addoption('--tosca-parser', action='store', default='aria', help='TOSCA parser')
def pytest_report_header(config):
tosca_parser = config.getoption('--tosca-parser')
return 'tosca-parser: {0}'.format(tosca_parser)
@pytest.fixture(scope='session')
def parser(request):
tosca_parser = request.config.getoption('--tosca-parser')
verbose = request.config.getoption('verbose') > 0
if tosca_parser == 'aria':
with AriaParser() as p:
p.verbose = verbose
yield p
else:
pytest.fail('configured tosca-parser not supported: {0}'.format(tosca_parser))
| 35.408163
| 91
| 0.736023
| 239
| 1,735
| 5.297071
| 0.506276
| 0.104265
| 0.020537
| 0.025276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004828
| 0.164265
| 1,735
| 48
| 92
| 36.145833
| 0.868276
| 0.565994
| 0
| 0
| 0
| 0
| 0.191257
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.117647
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdb6180fad4a97a9bd7fd4c10c96bb8a853e03d5
| 5,487
|
py
|
Python
|
tests/test_project.py
|
eruber/py_project_template
|
f0b12ab603e1277943f0323cbd0d8fb86fd04861
|
[
"MIT"
] | null | null | null |
tests/test_project.py
|
eruber/py_project_template
|
f0b12ab603e1277943f0323cbd0d8fb86fd04861
|
[
"MIT"
] | null | null | null |
tests/test_project.py
|
eruber/py_project_template
|
f0b12ab603e1277943f0323cbd0d8fb86fd04861
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Project Template
The code below is derived from several locations:
REFERENCES:
REF1: https://docs.pytest.org/en/latest/contents.html
REF2: https://github.com/hackebrot/pytest-cookies
LOCATIONS
LOC1: https://github.com/audreyr/cookiecutter-pypackage
LOC2: https://github.com/mdklatt/cookiecutter-python-app
LOC3: https://github.com/Springerle/py-generic-project
"""
# ----------------------------------------------------------------------------
# Python Standard Library Imports (one per line)
# ----------------------------------------------------------------------------
import sys
import shlex
import os
import sys
import subprocess
# import yaml
import datetime
from contextlib import contextmanager
if sys.version_info > (3, 2):
import io
import os
else:
raise "Use Python 3.3 or higher"
# ----------------------------------------------------------------------------
# External Third Party Python Module Imports (one per line)
# ----------------------------------------------------------------------------
from cookiecutter.utils import rmtree
# from click.testing import CliRunner
# ----------------------------------------------------------------------------
# Project Specific Module Imports (one per line)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
__author__ = 'E.R. Uber (eruber@gmail.com)'
__license__ = 'MIT'
__copyright__ = "Copyright (C) 2017 by E.R. Uber"
# ----------------------------------------------------------------------------
# Module Global & Constant Definitions
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Test Support...
# ----------------------------------------------------------------------------
# [LOC1]
@contextmanager
def inside_dir(dirpath):
"""
Execute code from inside the given directory
:param dirpath: String, path of the directory the command is being run.
"""
old_path = os.getcwd()
try:
os.chdir(dirpath)
yield
finally:
os.chdir(old_path)
# [LOC1]
@contextmanager
def bake_in_temp_dir(cookies, *args, **kwargs):
"""
Delete the temporal directory that is created when executing the tests
:param cookies: pytest_cookies.Cookies, cookie to be baked and its temporal files will be removed
"""
result = cookies.bake(*args, **kwargs)
try:
yield result
finally:
rmtree(str(result.project))
# [LOC1]
def run_inside_dir(command, dirpath):
"""
Run a command from inside a given directory, returning the exit status
:param command: Command that will be executed
:param dirpath: String, path of the directory the command is being run.
"""
with inside_dir(dirpath):
return subprocess.check_call(shlex.split(command))
# [LOC1]
def check_output_inside_dir(command, dirpath):
"Run a command from inside a given directory, returning the command output"
with inside_dir(dirpath):
return subprocess.check_output(shlex.split(command))
# [LOC1]
def project_info(result):
"""Get toplevel dir, project_slug, and project dir from baked cookies"""
project_path = str(result.project)
project_slug = os.path.split(project_path)[-1]
project_dir = os.path.join(project_path, project_slug)
return project_path, project_slug, project_dir
# ----------------------------------------------------------------------------
# Tests...
# ----------------------------------------------------------------------------
# [LOC1]
def test_year_compute_in_license_file(cookies):
with bake_in_temp_dir(cookies) as result:
license_file_path = result.project.join('LICENSE')
now = datetime.datetime.now()
assert str(now.year) in license_file_path.read()
# [LOC1]
# ["MIT", "BSD3", "ISC", "Apache2", "GNU-GPL-v3", "Not open source"]
def test_bake_selecting_license(cookies):
license_strings = {
'MIT': 'MIT License',
'BSD3': 'Redistributions of source code must retain the above copyright notice, this',
'ISC': 'ISC License',
'Apache2': 'Licensed under the Apache License, Version 2.0',
'GNU-GPL-v3': 'GNU GENERAL PUBLIC LICENSE',
}
for license, target_string in license_strings.items():
with bake_in_temp_dir(cookies, extra_context={'license': license}) as result:
assert target_string in result.project.join('LICENSE').read()
# NEED TO ADD a project setup.py file for this to pass
# already have a template setup.py file, but this one is for
# the project
assert license in result.project.join('setup.py').read()
def test_bake_project(cookies):
result = cookies.bake(extra_context={'project_name': 'TestProject'})
# p, s, d = project_info(result)
# print(f"Project Path: {p}")
# print(f"Project Slug: {s}")
# print(f" Project Dir: {d}")
if result.trace_back:
print(result.trace_back_stack)
assert result.exit_code == 0
assert result.exception is None
assert result.project.basename == 'python-testproject'
assert result.project.isdir()
# ----------------------------------------------------------------------------
if __name__ == "__main__":
pass
| 32.467456
| 101
| 0.546382
| 587
| 5,487
| 4.971039
| 0.361158
| 0.031186
| 0.019191
| 0.017478
| 0.170665
| 0.131597
| 0.115147
| 0.087046
| 0.087046
| 0.087046
| 0
| 0.006821
| 0.171679
| 5,487
| 168
| 102
| 32.660714
| 0.635204
| 0.50483
| 0
| 0.171429
| 0
| 0
| 0.161688
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.114286
| false
| 0.014286
| 0.142857
| 0
| 0.3
| 0.014286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdbd3757fbcb05b2b219ad506437967a7305ef32
| 3,583
|
py
|
Python
|
event_handlers/voyager_event_handler.py
|
bigpizza/VoyagerTelegramBot
|
8b1e3cbebe9041b0ca341ce4d5d9835f5e12b4d9
|
[
"MIT"
] | null | null | null |
event_handlers/voyager_event_handler.py
|
bigpizza/VoyagerTelegramBot
|
8b1e3cbebe9041b0ca341ce4d5d9835f5e12b4d9
|
[
"MIT"
] | null | null | null |
event_handlers/voyager_event_handler.py
|
bigpizza/VoyagerTelegramBot
|
8b1e3cbebe9041b0ca341ce4d5d9835f5e12b4d9
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
from typing import Dict, Tuple
from curse_manager import CursesManager
from telegram import TelegramBot
class VoyagerEventHandler:
"""
A base class for all event handlers to inherit from.
To handle an incoming event from voyager application server, Most important method is the 'handle_event' method.
"""
def __init__(self, config,
telegram_bot: TelegramBot,
handler_name: str = 'DefaultHandler',
curses_manager: CursesManager = None):
self.name = handler_name
self.config = config
self.telegram_bot = telegram_bot
self.curses_manager = curses_manager
def interested_event_names(self):
"""
:return: List of event names this event_handler wants to process.
"""
return []
def interested_event_name(self):
"""
:return: An event name this event_handler wants to process.
"""
return None
def interested_in_all_events(self):
"""
:return: A boolean indicating whether this event handler wants to process all possible events.
"""
return False
def get_name(self):
"""
:return: The name of this event_handler
"""
return self.name
def send_text_message(self, message: str):
"""
Send plain text message to Telegram, and print out error message
:param message: The text that need to be sent to Telegram
"""
if self.telegram_bot:
status, info_dict = self.telegram_bot.send_text_message(message)
if status == 'ERROR':
print(
f'\n[ERROR - {self.get_name()} - Text Message]'
f'[{info_dict["error_code"]}]'
f'[{info_dict["description"]}]')
else:
print(f'\n[ERROR - {self.get_name()} - Telegram Bot]')
def send_image_message(self, base64_img: bytes = None, image_fn: str = '', msg_text: str = '',
as_doc: bool = True) -> Tuple[str or None, str or None]:
"""
Send image message to Telegram, and print out error message
:param base64_img: image data that encoded as base64
:param image_fn: the file name of the image
:param msg_text: image capture in string format
:param as_doc: if the image should be sent as document (for larger image file)
:return: Tuple of chat_id and message_id to check status
"""
if self.telegram_bot:
status, info_dict = self.telegram_bot.send_image_message(base64_img, image_fn, msg_text, as_doc)
if status == 'ERROR':
print(
f'\n[ERROR - {self.get_name()} - Text Message]'
f'[{info_dict["error_code"]}]'
f'[{info_dict["description"]}]')
elif status == 'OK':
return str(info_dict['chat_id']), str(info_dict['message_id'])
else:
print(f'\n[ERROR - {self.get_name()} - Telegram Bot]')
return None, None
@abstractmethod
def handle_event(self, event_name: str, message: Dict):
"""
Processes the incoming event + message. Note: a single message might be
processed by multiple event handlers. Don't modify the message dict.
:param event_name: The event name in string format.
:param message: A dictionary containing all messages
:return: Nothing
"""
print('handling event', event_name, message)
| 36.191919
| 116
| 0.597823
| 436
| 3,583
| 4.75
| 0.277523
| 0.047803
| 0.036214
| 0.023177
| 0.259778
| 0.259778
| 0.245292
| 0.210526
| 0.210526
| 0.167069
| 0
| 0.003248
| 0.312587
| 3,583
| 98
| 117
| 36.561224
| 0.837596
| 0.322076
| 0
| 0.333333
| 0
| 0
| 0.15821
| 0.050738
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.395833
| 0.104167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|