hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e93d157cf7aab5c1bcb7bfeee8e1f4209c714ad6
| 2,862
|
py
|
Python
|
recommander-lib/src/main.py
|
armendu/recommander-system
|
e2d13838237584cc5cc4de2f4ea2d63f9f3b8889
|
[
"MIT"
] | 1
|
2021-04-29T04:15:13.000Z
|
2021-04-29T04:15:13.000Z
|
recommander-lib/src/main.py
|
armendu/recommander-system
|
e2d13838237584cc5cc4de2f4ea2d63f9f3b8889
|
[
"MIT"
] | null | null | null |
recommander-lib/src/main.py
|
armendu/recommander-system
|
e2d13838237584cc5cc4de2f4ea2d63f9f3b8889
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Main application file
"""
__author__ = "Armend Ukehaxhaj"
__version__ = "1.0.0"
__license__ = "MIT"
from logzero import logger
import numpy as np
import pandas as pd
import csv
import pickle
from word2vec import word2vec
from preprocessor import preprocessor
import json
primary_data_filename = "input/GoTrainedData.txt"
sample_data_filename = "input/Sample.txt"
amazon_sample = "input/amazon_co-ecommerce_sample.csv"
def open_input_file(filename):
canvas = []
with open('./input/initial-data.json') as json_file:
data = json_file.readlines()
for line in data:
obj = json.loads(line)
value = obj.get("title", "")
value_brand = obj.get("brand", "")
temp_sentance = value + " " + value_brand
# print(temp_sentance)
canvas.append(temp_sentance)
# if value is not None:
# print(value)
# temp_sentance += value
# if value_brand is not None:
# temp_sentance += value_brand
# canvas = []
# saved = pd.read_csv(filename)
# canvas = saved['product_name']
return canvas
def main():
logger.info("Starting app")
settings = {}
settings['n'] = 5 # dimension of word embeddings
settings['window_size'] = 3 # context window +/- center word
settings['min_count'] = 0 # minimum word count
settings['epochs'] = 3 # 5000 # number of training epochs
# number of negative words to use during training
settings['neg_samp'] = 10
settings['learning_rate'] = 0.01 # learning rate
np.random.seed(0) # set the seed for reproducibility
# corpus = [['the', 'quick', 'brown', 'fox',
# 'jumped', 'over', 'the', 'lazy', 'dog']]
# logger.info("Retrieving corpus")
corpus = open_input_file(amazon_sample)
# Pre process data
logger.info("Preprocess the data")
pp = preprocessor()
corpus = pp.preprocess(corpus)
# for row in new_corpus:
# for word in row:
# logger.info(word)
# logger.info("Preprocessed data: ")
# logger.info(corpus)
# INITIALIZE W2V MODEL
# w2v = word2vec(settings)
# generate training data
# logger.info("Training")
# training_data = w2v.generate_training_data(settings, new_corpus)
# train word2vec model
# w2v.train(training_data)
model_filename = 'models/finalized_model-refactored.sav'
# save the model to disk
# pickle.dump(w2v, open(model_filename, 'wb'))
# Load the pickled model
w2v_from_pickle = pickle.load(open(model_filename, 'rb'))
# Use the loaded pickled model to make predictions
w2v_from_pickle.word_sim("microphone", 6)
if __name__ == "__main__":
main()
| 27.519231
| 74
| 0.615653
| 339
| 2,862
| 5.00885
| 0.421829
| 0.041225
| 0.030035
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014444
| 0.274284
| 2,862
| 103
| 75
| 27.786408
| 0.803081
| 0.3826
| 0
| 0
| 0
| 0
| 0.157375
| 0.070267
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.181818
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e93dd26357433b7e319a7cf157df9046ce5be7e6
| 2,378
|
py
|
Python
|
spark_auto_mapper/data_types/datetime.py
|
gagan-chawla/SparkAutoMapper
|
7b0aca2e4bece42b3229550f3f2fcc9607f79437
|
[
"Apache-2.0"
] | null | null | null |
spark_auto_mapper/data_types/datetime.py
|
gagan-chawla/SparkAutoMapper
|
7b0aca2e4bece42b3229550f3f2fcc9607f79437
|
[
"Apache-2.0"
] | null | null | null |
spark_auto_mapper/data_types/datetime.py
|
gagan-chawla/SparkAutoMapper
|
7b0aca2e4bece42b3229550f3f2fcc9607f79437
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, List
from pyspark.sql import Column, DataFrame
from pyspark.sql.functions import coalesce, to_timestamp
from spark_auto_mapper.data_types.column import AutoMapperDataTypeColumn
from spark_auto_mapper.data_types.data_type_base import AutoMapperDataTypeBase
from spark_auto_mapper.data_types.literal import AutoMapperDataTypeLiteral
from spark_auto_mapper.helpers.value_parser import AutoMapperValueParser
from spark_auto_mapper.type_definitions.defined_types import AutoMapperDateInputType
class AutoMapperDateTimeDataType(AutoMapperDataTypeBase):
def __init__(
self,
value: AutoMapperDateInputType,
formats: Optional[List[str]] = None
) -> None:
"""
Converts the value to a timestamp type in Spark
:param value: value
:param formats: (Optional) formats to use for trying to parse the value otherwise uses Spark defaults
"""
super().__init__()
self.value: AutoMapperDataTypeBase = value \
if isinstance(value, AutoMapperDataTypeBase) \
else AutoMapperValueParser.parse_value(value)
self.formats: Optional[List[str]] = formats
def get_column_spec(
self, source_df: Optional[DataFrame], current_column: Optional[Column]
) -> Column:
# if column is not of type date then convert it to date
formats_column_specs: List[Column] = [
to_timestamp(
self.value.get_column_spec(
source_df=source_df, current_column=current_column
),
format=format_
) for format_ in self.formats
] if self.formats else [
to_timestamp(
self.value.get_column_spec(
source_df=source_df, current_column=current_column
)
)
]
if source_df is not None and isinstance(self.value, AutoMapperDataTypeColumn) \
and not dict(source_df.dtypes)[self.value.value] == "timestamp":
return coalesce(*formats_column_specs)
elif isinstance(self.value, AutoMapperDataTypeLiteral):
return coalesce(*formats_column_specs)
else:
column_spec = self.value.get_column_spec(
source_df=source_df, current_column=current_column
)
return column_spec
| 38.983607
| 109
| 0.670311
| 257
| 2,378
| 5.953307
| 0.287938
| 0.047059
| 0.042484
| 0.062092
| 0.236601
| 0.194771
| 0.139869
| 0.139869
| 0.139869
| 0.139869
| 0
| 0
| 0.267031
| 2,378
| 60
| 110
| 39.633333
| 0.877797
| 0.095038
| 0
| 0.195652
| 0
| 0
| 0.004261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.173913
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e93e898e14d862c8186e0e63f6ce2ac5ff75423c
| 15,524
|
py
|
Python
|
relah.py
|
ttwj/ReLah
|
8231636d4698001dc615848096a97ebd78ae2713
|
[
"WTFPL"
] | 3
|
2020-01-31T08:22:49.000Z
|
2021-01-10T20:02:37.000Z
|
relah.py
|
ttwj/ReLah
|
8231636d4698001dc615848096a97ebd78ae2713
|
[
"WTFPL"
] | null | null | null |
relah.py
|
ttwj/ReLah
|
8231636d4698001dc615848096a97ebd78ae2713
|
[
"WTFPL"
] | null | null | null |
# Python implementation of DBS PayLah!
# By ttwj - 2017
import base64
import random
import string
#remember to install pycryptodome!
import datetime
from Crypto.Cipher import AES, PKCS1_v1_5
from Crypto.PublicKey import RSA
import lxml.etree, json
from lxml import html
from pprint import pprint
from io import StringIO
import requests
import re
import time
import warnings
import requests
import contextlib
from api.models import PayLahAPISource
http_proxy = "http://localhost:8888"
https_proxy = "https://localhost:8888"
app_ver = '4.0.0'
proxyDict = {
"http": http_proxy,
'https': https_proxy
}
try:
from functools import partialmethod
except ImportError:
# Python 2 fallback: https://gist.github.com/carymrobbins/8940382
from functools import partial
class partialmethod(partial):
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.func, instance, *(self.args or ()), **(self.keywords or {}))
@contextlib.contextmanager
def no_ssl_verification():
old_request = requests.Session.request
requests.Session.request = partialmethod(old_request, verify=False)
warnings.filterwarnings('ignore', 'Unverified HTTPS request')
yield
warnings.resetwarnings()
requests.Session.request = old_request
from Crypto.Cipher import AES
from Crypto import Random
class AESCipher:
def __init__(self, key):
"""
Requires hex encoded param as a key
"""
self.key = key.encode()
BLOCK_SIZE = 16
def pkcs5_pad(self, s):
"""
padding to blocksize according to PKCS #5
calculates the number of missing chars to BLOCK_SIZE and pads with
ord(number of missing chars)
@see: http://www.di-mgt.com.au/cryptopad.html
@param s: string to pad
@type s: string
@rtype: string
"""
return s + (self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE) * chr(self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE)
def encrypt(self, raw):
"""
Returns hex encoded encrypted value!
"""
raw = self.pkcs5_pad(raw)
iv = '1234567898765432'.encode()
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return cipher.encrypt(raw.encode('utf-8'))
def decrypt(self, enc):
"""
Requires hex encoded param to decrypt
"""
enc = enc.decode("hex")
iv = enc[:16]
enc = enc[16:]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return unpad(cipher.decrypt(enc))
class DBSPayLahTransaction(object):
rand = ''
public_key_bin = ''
cipher = None
def updatePayLahAPISource(self):
self.payLahAPISource.api_random = self.rand
self.payLahAPISource.api_base64_public_key = self.base64_public_key
def __init__(self, payLahAPISource):
self.payLahAPISource = payLahAPISource
"""
api_random = models.CharField(max_length=20)
api_base64_public_key = models.TextField()
api_deviceID = models.CharField(max_length=100)
api_phoneID = models.CharField(max_length=100)
api_encryptedPasscode = models.TextField()
api_unencryptedPasscodeLength = models.IntegerField()
api_cookiesJSON = JSONField()
"""
self.ipAddress = payLahAPISource.api_ipAddress
self.rand = payLahAPISource.api_random
self.base64_public_key = payLahAPISource.api_base64_public_key
self.deviceID = payLahAPISource.api_deviceID
self.phoneID = payLahAPISource.api_phoneID
self.encryptedPasscode = payLahAPISource.api_encryptedPasscode
self.public_key_bin = base64.b64decode(payLahAPISource.api_base64_public_key.encode('utf-8'))
self.unencryptedPasscodeLength = str(payLahAPISource.api_unencryptedPasscodeLength)
self.cipher = AESCipher(self.rand)
self.r = requests.session()
self.r.cookies = requests.utils.cookiejar_from_dict(payLahAPISource.api_cookiesJSON)
#def __init__(self):
# self.r = requests.Session()
def ran_generator(size=16, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def get_server(self):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'ipAddress': self.ipAddress,
'platform': 'iPhone',
'serviceID': 'getServer'
}
data = self.requestLah(payload)
self.public_key_bin = base64.b64decode(data['base64EncodedString'].encode('utf-8'))
print(data)
def requestLah(self, payload):
import requests
import logging
# These two lines enable debugging at httplib level (requests->urllib3->http.client)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which is not logged.
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
#http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
#logging.basicConfig()
#logging.getLogger().setLevel(logging.DEBUG)
#requests_log = logging.getLogger("requests.packages.urllib3")
#requests_log.setLevel(logging.DEBUG)
#requests_log.propagate = True
with no_ssl_verification():
r = self.r.post("https://p2pcweb.dbs.com/services/DBSMobileWalletService0/" + payload['serviceID'], data=payload,
#proxies=proxyDict,
headers={
'user-agent': 'PayLah/7 CFNetwork/808.2.16 Darwin/16.3.0',
})
data = json.loads(r.text)
return data
def encrypt(self, text):
return base64.b64encode(self.cipher.encrypt(text))
def prelogin(self):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'ipAddress': self.ipAddress,
'deviceId': self.encrypt(self.deviceID),
'loginType': 'wallet',
'platform': 'iPhone',
'serviceID': 'prelogin',
}
print(payload)
self.requestLah(payload)
def generate_paylah_url(self, amount, reservation_id, retry=False):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'channelIndicator': 'P2P',
'count': self.encrypt('20'),
'ipAddress': self.ipAddress,
'deviceId': self.encrypt(self.deviceID),
'isOneTimeOnly': self.encrypt('Y'),
'payment_name': self.encrypt('BeepPay PayLah ' + reservation_id),
'periodOfSale': self.encrypt('7'),
'price': self.encrypt(amount),
'phoneId': self.encrypt(self.phoneID),
'phoneModel': 'iPhone 5s',
'platform': 'iPhone',
'serviceID': 'generatePaylahURL',
}
print(payload)
data = self.requestLah(payload)
if data['statusCode'] != '0000':
if retry is False:
print("PayLah expired, regenerating")
# TODO: save this particulars somewhere in the model :-)
self.retry_paylah_login()
return self.generate_paylah_url(amount, reservation_id, retry=True)
else:
raise Exception('Exceeded login retries')
print(data)
return data
def retry_paylah_login(self):
'''
self.rand = payLahAPISource.api_random
self.base64_public_key = payLahAPISource.api_base64_public_key
self.deviceID = payLahAPISource.api_deviceID
self.phoneID = payLahAPISource.api_phoneID
self.encryptedPasscode = payLahAPISource.api_encryptedPasscode
self.public_key_bin = base64.b64decode(payLahAPISource.api_base64_public_key.encode('utf-8'))
self.unencryptedPasscodeLength = str(payLahAPISource.api_unencryptedPasscodeLength)
self.cipher = AESCipher(self.rand)
self.r = requests.session()
self.r.cookies = requests.utils.cookiejar_from_dict(payLahAPISource.api_cookiesJSON)
'''
self.get_server()
# transaction.public_key_bin = base64.b64decode("MIICqDCCAZACCGNAYXyIwSRhMA0GCSqGSIb3DQEBBQUAMBUxEzARBgNVBAMMCkRCUyBQYXlsYWgwHhcNMTcxMDI3MTczMTEyWhcNMTkxMDI4MTczMTEyWjAYMRYwFAYDVQQDDA1EQlMgTWVyY2hhbnRzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAhQ2CljVoM6GrAWrxN0qh9dgVLwpTcFsC2C3uKecRFCDODZY3Qv/DL8ta8+ZN+UWmvHCt/tWjt7FCCIolfn1iXyPuldngsey/JKTSmhPL1imufPUJjbUZaTSwpP1y7DWWJGLqqZMdtyaq0KkpxDM8rBgmXm9eC+YQ+woDux2SQp4PlCpnjxXpYoXG55CWjLsQLx1AaVOFjH38do13OIvEMJWucfmDgY4k6l8TT9gxKoGXTN7p9rHK57dVDOLTScspjuOazU6nLM0U5obsQAvjEzMzKo4wDESremQYWlcaKT4gOliSwbOy4EF6XBrtU+JC7jGPWAOpx/evRUecfKgR9wIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQAyjzuSQB2LMLe24TsNHO7zqpdxl4TQ1fd6B7+E1klKPn/pcWgVQZkgsOyjH+LY7P+yk5LVUyGBukuH8qQUY2LWqo8Si1dJeSJzRSKfMElZj1idMx04X9YiJHvpq4eRaqjXtmsXRgc7bD3TlE6ZZa1GwVWux67IdfhCb/9pnfY37d9G6xM0Tk2UkxTc+WfXLG8k1RX6HhjQ8vTNJhkMTb/TwZfLQ89owPKzSahCpk9qKj9TU4uuDJXmAmiuf6IKCXL+mvGeltc/NDGetvsSwUCkBfkpuRoiS4mHkdGn+4w3izgobByjAgQMNpK4l7qLuonmHLDFkE92tX/yn4bJxqGy".encode('utf-8'))
self.wallet_launch()
self.prelogin()
self.wallet_login_new()
self.payLahAPISource.api_random = self.rand
self.payLahAPISource.api_base64_public_key = self.base64_public_key
self.payLahAPISource.api_deviceID = self.deviceID
self.payLahAPISource.api_phoneID = self.phoneID
self.payLahAPISource.api_encryptedPasscode = self.encryptedPasscode
self.payLahAPISource.api_unencryptedPasscodeLength = self.unencryptedPasscodeLength
self.payLahAPISource.api_cookiesJSON = requests.utils.dict_from_cookiejar(self.r.cookies)
self.payLahAPISource.save()
def get_paymentlink_expired_history(self):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'channelIndicator': 'P2P',
'count': self.encrypt('50'),
'ipAddress': self.ipAddress,
'deviceId': self.encrypt(self.deviceID),
'index': self.encrypt('0'),
'phoneId': self.encrypt(self.phoneID),
'phoneModel': 'iPhone 5s',
'platform': 'iPhone',
'serviceID': 'getPaymentLinkHistoryExpired',
}
return self.requestLah(payload)
def get_transaction_history(self, retry=False):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'channelIndicator': 'P2P',
'count': self.encrypt('80'),
'ipAddress': self.ipAddress,
'deviceId': self.encrypt(self.deviceID),
'index': self.encrypt('1'),
'loginType': '02',
'phoneId': self.encrypt(self.phoneID),
'phoneModel': 'iPhone 5s',
'platform': 'iPhone',
'serviceID': 'getTransactionHistory',
}
print(payload)
data = self.requestLah(payload)
if data['statusCode'] != '0000':
if retry is False:
print("PayLah expired, regenerating")
# TODO: save this particulars somewhere in the model :-)
self.retry_paylah_login()
return self.get_transaction_history(retry=True)
else:
raise Exception('Exceeded login retries')
print(json.dumps(data))
return data
def force_paylink_expire(self, transactionRef):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'channelIndicator': 'P2P',
'deviceId': self.encrypt(self.deviceID),
'expiryDays': self.encrypt('EXPIRY'),
'ipAddress': self.ipAddress,
'isOneTime': self.encrypt('Y'),
'status': self.encrypt('E'),
'transactionRefNumber': self.encrypt(transactionRef),
'platform': 'iPhone',
'serviceID': 'updatePaymentLink',
'isOnetime': self.encrypt('Y'),
}
print(payload)
return self.requestLah(payload)
def wallet_login_new(self):
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'channelIndicator': 'P2P',
'count': self.encrypt('10'),
'ipAddress': self.ipAddress,
'deviceId': self.encrypt(self.deviceID),
'encryptedPassCode': self.encryptedPasscode,
'index': self.encrypt('1'),
'loginType': '02',
'phoneId': self.encrypt(self.phoneID),
'phoneModel': 'iPhone 5s',
'platform': 'iPhone',
'serviceID': 'walletloginNew',
'touchIDStatus': 'Active',
'unencryptedPasscodelength': self.unencryptedPasscodeLength
}
print(payload)
return self.requestLah(payload)
def wallet_launch(self):
self.rand = DBSPayLahTransaction.ran_generator()
#self.rand = "QCos1rgim225kkrE"
self.cipher = AESCipher(self.rand)
public_key = RSA.import_key(self.public_key_bin)
cipher_rsa = PKCS1_v1_5.new(public_key)
cipher_text = cipher_rsa.encrypt(self.rand.encode())
print(cipher_text)
#print("random " + self.rand)
#print(self.public_key_bin)
encoded = base64.b64encode(cipher_text)
#encoded = "RrdSu8k31vXLdCctxUrXK+YNdJVyy/x9fUC3Z322Ku4/2GsGWqJty4H/1Z6XTnkTkKjcuCmRYcBce5NBnroBcyCIrWrlfG3H+xTYU/vuRylQjvFopIHAhvp8KZ1myR2dhghUMCoKmzr2tZyT9Ay4GHEPfLYzIdtivpNnJNjpM8LTe+4n/cMLtBLuLdZiiDH/OLLuenKxieS4pl9YTMeG3pxAuGWZk5D2qccOy8SEH7H2D+JJzu7GX+WM0GPTMDoxvYwOifaLxvcM5qJoZ8AInso54dOdV+jytIDfnO2aHaksTqLMFLOeiYST8puKOAIfWpSuDl+Yr3knMiz5Dq3cXw=="
print("encoded " + str(encoded))
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
payload = {
'appID': 'DBSMobileWallet',
'appver': app_ver,
'channel': 'rc',
'deviceId': self.encrypt(self.deviceID),
'encryptedAES128Key': '',
'encryptedDeviceModel': self.encrypt('iPhone 5s'),
'encryptedOs': self.encrypt('iPhone'),
'fromWalletType': self.encrypt('02'),
'inputParam': encoded,
'ipAddress': self.ipAddress,
'phoneId': self.encrypt(self.phoneID),
'platform': 'iPhone',
'searchCriteria': 'deviceID',
'searchParam': self.encrypt(self.deviceID),
'serviceID': 'walletLaunch',
'subscriptionId': '',
'timeStamp': timestamp,
'toWalletType': self.encrypt('00')
}
print(payload)
self.requestLah(payload)
#paylah_api_source = PayLahAPISource.objects.get(pk=1)
#txn = DBSPayLahTransaction(paylah_api_source)
#txn.get_transaction_history()
| 33.67462
| 987
| 0.626578
| 1,432
| 15,524
| 6.664804
| 0.26257
| 0.038034
| 0.020432
| 0.027661
| 0.365465
| 0.328479
| 0.318944
| 0.318944
| 0.283215
| 0.272318
| 0
| 0.029111
| 0.269776
| 15,524
| 460
| 988
| 33.747826
| 0.812809
| 0.216761
| 0
| 0.426523
| 0
| 0
| 0.154793
| 0.006508
| 0
| 0
| 0
| 0.006522
| 0
| 1
| 0.071685
| false
| 0.021505
| 0.100358
| 0.007168
| 0.250896
| 0.050179
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e93fa44d8c8e89fa596f6f1e1b5862803b660a31
| 13,090
|
py
|
Python
|
st_dashboard.py
|
amirtaghavy/TDI-capstone-RedditTalks-vs-MarketAction
|
62d6b754348ed7ae5d5ef4bd31eb2553a76c8892
|
[
"MIT"
] | null | null | null |
st_dashboard.py
|
amirtaghavy/TDI-capstone-RedditTalks-vs-MarketAction
|
62d6b754348ed7ae5d5ef4bd31eb2553a76c8892
|
[
"MIT"
] | null | null | null |
st_dashboard.py
|
amirtaghavy/TDI-capstone-RedditTalks-vs-MarketAction
|
62d6b754348ed7ae5d5ef4bd31eb2553a76c8892
|
[
"MIT"
] | null | null | null |
import streamlit as st
import dill
import pandas as pd
import plotly.express as px
from datetime import date
import statsmodels
with open('compiled-sentiment-history.pkd', 'rb') as f:
df_compiled = dill.load(f)
df_compiled.drop_duplicates(inplace=True)
dates = list({idx[1] for idx in df_compiled.index})
dates = sorted(dates, key=lambda dt: (str(dt).split('-')))
# date_ = '2021-06-01'
st.title('The Data Incubator Capstone Project')
st.subheader('*Title*: **Wallstreetbets Gossip vs. Market Price Action**')
st.subheader('*Created by*: Amir A. Taghavey - Summer, 2021')
st.markdown('*Email*: a [dot] taghavey @ gmail [dot] com')
''' '''
st.markdown(
'This App was developed as main deliverable of thecapstone project requirement of [**the Data Incubator**](https://www.thedataincubator.com/) fellowship program.')
st.sidebar.title('Options Dashboard:')
page = st.sidebar.selectbox('Select field:',
(
'Synopsis',
'App structure',
'VIZ: Reddit hot_10 vs. time',
'VIZ: Gossip vs. Action',
'ML analysis summary',
'Acknowledgments')
, 0)
if page == 'Synopsis':
st.markdown(
'''
**Background**: The short-squeeze of GameStop and AMC stocks in early 2021 was impacted in great part by the massive-scale coordinated action of the subreddit ***wallstreetbets*** ants army of retail investors.
Many of the early ants realized remarkable gains on their investment enabling them to payoff their student loans or home mortgages at the demise of a few hedge funds such as the London-based White Square Capital.
These events motivated new swarms of retail investors to join in the movement with their hard-earned savings, and for many this game has offered its ugly face!
**Objective**: Motivated by the story above, this project aimed at finding an objective answer to one question: ***Is safety in being a part of the herd when it comes to navigating the US Stock Market?***
**Methods**: To achieve this, I (i) scanned popular social media platforms to identify and characterize how the retail investors percieved the market performance for the most frequently talked about stocks on New York Stock Exchange before each trading session and (ii) compiled the actual market action data at the end of each trading session on a daily basis over the time period of 6/1/2021-9/1/2021, and performed an extensive amount of analysis to extract possible underlying correlartions.
**Summary**: NO correlation (and hence NO basis for meaningful predictions) was found betweem the market price action and any of the prior (i) PRE-market gossip / sentiment, (ii) stock price action, or (iii) stock options activity from the previous trading session.
**Conclusion**: Moral of the story, objectively and in a nutshell, is that ***No evidence was found to support ANY consistent forward temporal correlation bwteen market gossip and price action!***
'''
)
elif page == 'App structure':
st.markdown(
'''
App Structure:
\n
A. *reddit's PRE-market hot_20* (9:00 AM ET), the 20 most talked about NYSE stocks are identified
B. recent posts from *stocktwits* and *twitter* APIs for the hot_20 list of the day are compiled
C. vader sentiment intensity analyzer is implemented to extract investor sentiment from compiled text
D. price action data are collected from *yahoo_fin* API at the close of market (4:00 PM ET)
E. investor sentiment - market performance data are analyzed, modeled, and visualized
''')
img = 'CodeStructure.png'
st.image(img, clamp=True,
caption='Schematic of the logical code structure and inter-connections between modules \
(i) compiling market talk data from social media platforms, \
(ii) performing sentiment intensity analysis, \
(iii) gathering financial data, and \
(iv) conducting data analytics on compiled market gossip - price action data.')
elif page == 'ML analysis summary':
st.subheader('**Machine Learning Correlation Analysis**')
st.markdown('''
\n
***Summary:*** An extensive correlation analysis study of the compiled data was conducted
with the *objective* to find underlying forward temporal correlations (if any) between
(a) post-market price action and (b.1) pre-market sentiment nalysis data, (b.2) pre-market
stock options activity data (e.g., contract volume, change in open interest, change in percent ITM / OTM, etc.),
and/or (b.3) previous trading session post-market price action data for reddit's hot stock list.
\n
***Approach***: Target (i.e. lable) was to predict the change in stock price, $$\Delta$$P.
Price change was defined as price quote at market close less price quote at market open normalized to
price quote at market open for a given ticker on reddit hot list. Two types of approaches were implemented
to model $$\Delta$$P: **A. Regressive Approach**, and **B. Binary Classification Approach**.
In the latter approach, price action signal was reduced to upward / downward trends.
\n
***Transformations***: All quantitative features were scaled using standard scaler, and dimensionality
reduction was carried out using TrauncatedSVD method.
\n
***Modeling***: Cross validation score was used to compare modeling performance of the tested models.
Model comparisons among regressors and classifiers were done separately using $$r^{2}$$ and accuracy
metrics, respectively.
\n
Models implemented include:
\n
| Model | Regression | Classification |
| :--- | :--------: | :------------: |
| Linear Regression | ✔ | |
| Logistic Regression | | ✔ |
| Ridge with cross-validation | ✔ | ✔ |
| Decision Tree | ✔ | ✔ |
| Random Forest | ✔ | ✔ |
| K-Nearest-Neighbors | ✔ | ✔ |
| Support Vector Machine | ✔ | ✔ |
| Multi-layer Perceptron Network | ✔ | ✔ |
\n
.
\n
***Results***: All regressors returned an $$r^{2}$$-value equal to zero (0) consistent with no detectable correlation
between any of (i) sentiment, (ii) stock options, or (iii) previous-day stock data and the response
variable (i.e. $$\Delta$$P). This was further corroborated with the slighly higher than the null-model
classification accuracy score yielded by the KNN classifier of 0.54 (versus 0.53 classification
accuracy corresponding to the null hypothesis).
The modeling results could extract no correlation between (signal) price action data for the
reddit hotlist and the sentiment extracted from the market talks, option activities or prior
trading-session data.
''')
elif page == 'Acknowledgments':
st.markdown('''
- Reddit hotlist sentiment intensity analysis in this project was done by implementing an exising
[reddit-sentiment_analyis](https://github.com/asad70/reddit-sentiment-analysis) github repository
developed by [**asad70**](https://github.com/asad70). It was modified to expend search scope
to additional financial sub-reddits, provide human-guided training to Vader Sentiment Intensity
Analyzer, and to fit the required i/o structure of this project.
- I would like to thank and acknowledge Dr. [Robert Schroll](robert@thedataincubator.com),
my instructor and TDI capstone project advisor, for the instrumental feedback I received from him
during the design, development and execution of this project.
''')
elif page == 'VIZ: Gossip vs. Action':
trendline_on = st.sidebar.checkbox('add linear trendline:', False)
date_idx = st.sidebar.slider('Select date index:',
min_value=0,
max_value=len(dates)-1,
value=0)
date_ = dates[date_idx]
df = df_compiled.loc[(slice(None), date_),:]
df.sort_values('counts', ascending=False, inplace=True)
df.reset_index(inplace=True)
# plt = sentiment_visualizer_date(c_df,'2021-06-01')
plt=px.scatter(df,
x='bull_bear_ratio',
y='change_sn',
color='neutral',
size='counts', #text='ticker',
size_max=20,
color_continuous_scale=px.colors.sequential.BuPu_r,
hover_data=['ticker', 'volume'],
labels={'bull_bear_ratio': 'Investor Bullishness [-]',
'change_sn': 'Price Change [-]'},
trendline='ols' if trendline_on else None,
title=f"As of {date.strftime(date_, r'%B, %d %Y')}:"
)
plt.update_layout(plot_bgcolor='white', # #ceced0
title_font={'size':16, 'family':'Arial Black'},
yaxis={'showgrid':False, 'zeroline':False, 'linecolor': 'black',
'zerolinecolor': 'grey', 'tickfont':{'size':12},
'titlefont':{'size':14, 'family':'Arial Black'},
'range':[-0.2,0.2]},
xaxis={'showgrid':False, 'zeroline':False, 'linecolor': 'black',
'tickfont':{'size':12}, 'titlefont':{'size':14, 'family':'Arial Black'},
'range':[.75,1.75]},
height=600, width=700, #'ylorrd'
coloraxis_colorbar={'title':"Neutrality",
'tickvals': [0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] ,
'tick0': 0.4,
# 'cmin':0.5,
# 'cmax': 1.0,
#'tickvals':[5,6,7,8,9], 'ticktext': ['0.1M', '1M', '10M', '100M', '1B']
},
hovermode="x unified"
)
plt.update_traces(textposition='top center',
textfont={'size':10, 'color':'grey'},
marker={'line':{'color':'#ceced0'}},
#hovertemplate=None,
)
st.plotly_chart(plt, use_container_width=True)
st.subheader('Sentiment')
st.dataframe(df[['ticker', 'bearish', 'bullish',
'neutral', 'bull_bear_ratio',
'change_sn', 'volume']])
elif page == 'VIZ: Reddit hot_10 vs. time':
st.subheader('All-time (since the Memorial Day weekend!) HOT-10 stocks on Reddit:')
hot_10_inds = df_compiled.reset_index().groupby(by='ticker') \
.count()[['date']].sort_values('date', ascending=False)[:10].index
df_ = df_compiled.reset_index()
hot10_counts = df_[df_.ticker.isin(hot_10_inds)] \
.groupby('ticker') \
.sum()[['counts']] \
.reindex(hot_10_inds) \
.reset_index()
fig = px.pie(hot10_counts, values='counts', names='ticker', hole=0.3,
color_discrete_sequence=px.colors.sequential.RdBu)
fig.update_traces(textposition='inside', textinfo='percent+label')
st.plotly_chart(fig)
hot10 = [f'{i+1}. {ticker}' for i, ticker in enumerate(hot_10_inds)]
picked_hot = st.sidebar.selectbox('choose ticker to plot:', options=hot10, index=0)
picked_hot = picked_hot.split(' ')[1]
st.markdown(f'Bar chart of daily intra-session change in stock price for **${picked_hot}**:')
df = df_compiled.loc[picked_hot].drop(columns=['counts'])
plt = px.bar(df, y='change_sn', text='volume', color='bull_bear_ratio',
color_continuous_scale=px.colors.sequential.RdBu_r)
plt.update_traces(texttemplate='%{text:.2s}', textposition='outside')
plt.update_layout(uniformtext_minsize=8)
plt.update_layout(xaxis_tickangle=-45,
yaxis={'showgrid':False,
'title': 'session change [-]',
'range':[-0.1, 0.1]},
coloraxis_colorbar={'title':"Investor\nBullishness",
'tickmode': 'array',
'tickvals': [0.8, 0.9, 1, 1.1, 1.2],
'tick0': 0.8,})
st.plotly_chart(plt, use_container_width=True)
st.dataframe(df)
| 55.940171
| 504
| 0.58793
| 1,563
| 13,090
| 4.87396
| 0.382598
| 0.014439
| 0.008926
| 0.007088
| 0.056183
| 0.049094
| 0.023366
| 0.023366
| 0.023366
| 0.013127
| 0
| 0.02046
| 0.3055
| 13,090
| 233
| 505
| 56.180258
| 0.815972
| 0.016501
| 0
| 0.094972
| 0
| 0.055866
| 0.505638
| 0.012942
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03352
| 0
| 0.03352
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a61c5a4f7f2b0b08f169681bdd4f9538e9142c6
| 13,902
|
py
|
Python
|
RocMethod.py
|
meiyuanqing/MetaThreshold
|
fbccc7e5356606b929211eedaf5371506232c1b5
|
[
"MIT"
] | null | null | null |
RocMethod.py
|
meiyuanqing/MetaThreshold
|
fbccc7e5356606b929211eedaf5371506232c1b5
|
[
"MIT"
] | null | null | null |
RocMethod.py
|
meiyuanqing/MetaThreshold
|
fbccc7e5356606b929211eedaf5371506232c1b5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding:utf-8
"""
Author : Yuanqing Mei
Date : 2021/4/8
Time: 15:42
File: RocMethod.py
HomePage : http://github.com/yuanqingmei
Email : dg1533019@smail.nju.edu.cn
This script find out the cutoff of a metric value by maximizing the AUC value and ROC、BPP、MFM、GM methods.
References:
[1] Bender, R. Quantitative risk assessment in epidemiological studies investigating threshold effects.
Biometrical Journal, 41 (1999), 305-319.(计算VARL的SE(标准误)的参考文献P310)
[2] Zhou, Y., et al. "An in-depth study of the potentially confounding effect of class size in fault prediction."
ACM Trans. Softw. Eng. Methodol. (2014) 23(1): 1-51. (计算BPP、MFM(F1)值为阈值)
[3] Shatnawi, R. (2018). Identifying Threshold Values of Change-Prone Modules.
(计算sum(Sensitivity+Specificity)=sum(TPR+TNR)值为阈值)
"""
import time
def roc_threshold(working_dir="F:\\NJU\\MTmeta\\experiments\\supervised\\trainingData\\",
result_dir="F:\\NJU\\MTmeta\\experiments\\supervised\\",
training_list="List.txt"):
import os
import csv
import numpy as np
import pandas as pd
import statsmodels.api as sm
# from sklearn import metrics
from sklearn.metrics import recall_score, precision_score, f1_score, roc_curve, auc, roc_auc_score, confusion_matrix
# 显示所有列
pd.set_option('display.max_columns', None)
# 显示所有行
pd.set_option('display.max_rows', None)
# the item of row of dataframe
pd.set_option('display.width', 5000)
working_directory = working_dir
result_directory = result_dir
os.chdir(working_directory)
with open(working_dir + training_list) as l:
lines = l.readlines()
for line in lines:
file = line.replace("\n", "")
print('the file is ', file)
# 分别处理每一个项目: f1取出要被处理的项目;
# f2:用于存储每一个项目的阈值信息,f2用csv.writer写数据时没有newline参数,会多出一空行;
# deletedList: 用于存储项目中某个度量logistic回归时,系数不显著或系数为零的度量及该项目名
with open(working_directory + file, 'r', encoding="ISO-8859-1") as f1, \
open(result_directory + "RocThreshold\\ROC_Thresholds.csv", 'a+', encoding="utf-8", newline='') as f2, \
open(result_directory + "RocThreshold\\deletedList.csv", 'a+', encoding="utf-8") as deletedList:
reader = csv.reader(f1)
writer = csv.writer(f2)
writer_deletedList = csv.writer(deletedList)
# receives the first line of a file and convert to dict generator
fieldnames = next(reader)
# exclude the non metric fields (12 items) and metric values including undef and undefined (17 items)
non_metric = ["relName", "className", "bug"]
# metric_data stores the metric fields (102 items)
def fun_1(m):
return m if m not in non_metric else None
metric_data = filter(fun_1, fieldnames)
df = pd.read_csv(file)
# drop all rows that have any NaN values,删除表中含有任何NaN的行,并重新设置行号
df = df.dropna(axis=0, how='any', inplace=False).reset_index(drop=True)
if os.path.getsize(result_directory + "RocThreshold\\ROC_Thresholds.csv") == 0:
writer.writerow(["fileName", "metric", "Corr_metric_bug", "B_0", "B_0_pValue", "B_1", "B_1_pValue",
"cov11", "cov12", "cov22", "BaseProbability_1",
"auc_threshold", "auc_threshold_variance", "auc_max_value", "i_auc_max",
"gm_threshold", "gm_threshold_variance", "gm_max_value", "i_gm_max",
"bpp_threshold", "bpp_threshold_variance", "bpp_max_value", "i_bpp_max",
"mfm_threshold", "mfm_threshold_variance", "f1_max_value", "i_f1_max",
"roc_threshold", "roc_threshold_variance", "roc_max_value", "i_roc_max",
"varl_threshold", "varl_threshold_variance"])
if os.path.getsize(result_directory + "RocThreshold\\deletedList.csv") == 0:
writer_deletedList.writerow(["fileName", "metric", "B_0_pValue", "B_0",
"auc_max_value", "i_auc_max", "gm_max_value", "i_gm_max", "bpp_max_value",
"i_bpp_max", "f1_max_value", "i_f1_max", "roc_max_value", "i_roc_max"])
for metric in metric_data:
print("the current file is ", file)
print("the current metric is ", metric)
# 由于bug中存储的是缺陷个数,转化为二进制存储,若x>2,则可预测bug为3个以上的阈值,其他类推
df['bugBinary'] = df.bug.apply(lambda x: 1 if x > 0 else 0)
# 依次用该度量的每一个值作为阈值计算出auc和GM,然后选择auc最大值的那个度量值作为阈值,即断点回归的cutoff
# 同时计算BPP(Balanced-pf-pd)、MFM(F1)和ROC(Sensitivity+Specificity)=(TPR+TNR)值,
# 分别定义存入五个值list,最大值和取最大值的下标值
AUCs = []
GMs = []
BPPs = []
MFMs = []
ROCs = []
auc_max_value = 0
gm_max_value = 0
bpp_max_value = 0
f1_max_value = 0
roc_max_value = 0
i_auc_max = 0
i_gm_max = 0
i_bpp_max = 0
i_f1_max = 0
i_roc_max = 0
# 判断每个度量与bug之间的关系,因为该关系会影响到断点回归时,相关系数大于零,则LATE估计值大于零,反之,则LATE估计值小于零
Corr_metric_bug = df.loc[:, [metric, 'bug']].corr('spearman')
# the i value in this loop, is the subscript value in the list of AUCs, GMs etc.
for i in range(len(df)):
t = df.loc[i, metric]
if Corr_metric_bug[metric][1] < 0:
df['predictBinary'] = df[metric].apply(lambda x: 1 if x <= t else 0)
else:
df['predictBinary'] = df[metric].apply(lambda x: 1 if x >= t else 0)
# confusion_matrix()函数中需要给出label, 0和1,否则该函数算不出TP,因为不知道哪个标签是poistive.
c_matrix = confusion_matrix(df["bugBinary"], df['predictBinary'], labels=[0, 1])
tn, fp, fn, tp = c_matrix.ravel()
if (tn + fp) == 0:
tnr_value = 0
else:
tnr_value = tn / (tn + fp)
if (fp + tn) == 0:
fpr = 0
else:
fpr = fp / (fp + tn)
# fpr, tpr, thresholds = roc_curve(df['bugBinary'], df['predictBinary'])
# AUC = auc(fpr, tpr)
auc_value = roc_auc_score(df['bugBinary'], df['predictBinary'])
recall_value = recall_score(df['bugBinary'], df['predictBinary'], labels=[0, 1])
precision_value = precision_score(df['bugBinary'], df['predictBinary'], labels=[0, 1])
f1_value = f1_score(df['bugBinary'], df['predictBinary'], labels=[0, 1])
gm_value = (recall_value * tnr_value) ** 0.5
pfr = recall_value
pdr = fpr # fp / (fp + tn)
bpp_value = 1 - (((0 - pfr) ** 2 + (1 - pdr) ** 2) * 0.5) ** 0.5
roc_value = recall_value + tnr_value
AUCs.append(auc_value)
GMs.append(gm_value)
BPPs.append(bpp_value)
MFMs.append(f1_value)
ROCs.append(roc_value)
# 求出上述五个list中最大值,及对应的i值,可能会有几个值相同,且为最大值,则取第一次找到那个值(i)为阈值
if auc_value > auc_max_value:
auc_max_value = auc_value
i_auc_max = i
if gm_value > gm_max_value:
gm_max_value = gm_value
i_gm_max = i
if bpp_value > bpp_max_value:
bpp_max_value = bpp_value
i_bpp_max = i
if f1_value > f1_max_value:
f1_max_value = f1_value
i_f1_max = i
if roc_value > roc_max_value:
roc_max_value = roc_value
i_roc_max = i
print("auc_max_value is ", auc_max_value)
print("gm_max_value is ", gm_max_value)
print("bpp_max_value is ", bpp_max_value)
print("f1_max_value is ", f1_max_value)
print("roc_max_value is ", roc_max_value)
print("i_auc_max is ", i_auc_max)
print("i_gm_max is ", i_gm_max)
print("i_bpp_max is ", i_bpp_max)
print("i_f1_max is ", i_f1_max)
print("i_roc_max is ", i_roc_max)
df['intercept'] = 1.0
# 通过 statsmodels.api 逻辑回归分类; 指定作为训练变量的列,不含目标列`bug`
logit = sm.Logit(df['bugBinary'], df.loc[:, [metric, 'intercept']])
# 拟合模型,disp=1 用于显示结果
result = logit.fit(method='bfgs', disp=0)
print(result.summary())
pValueLogit = result.pvalues
if pValueLogit[0] > 0.05: # 自变量前的系数
writer_deletedList.writerow(
[file, metric, pValueLogit[0], B[0], auc_max_value, i_auc_max, gm_max_value,
i_gm_max, bpp_max_value, i_bpp_max, f1_max_value, i_f1_max, roc_max_value,
i_roc_max])
continue # 若训练数据LOGIT回归系数的P值大于0.05,放弃该数据。
B = result.params # logit回归系数
if B[0] == 0: # 自变量前的系数
writer_deletedList.writerow(
[file, metric, pValueLogit[0], B[0], auc_max_value, i_auc_max, gm_max_value,
i_gm_max, bpp_max_value, i_bpp_max, f1_max_value, i_f1_max, roc_max_value,
i_roc_max])
continue # 若训练数据LOGIT回归系数等于0,放弃该数据。
# 计算auc阈值及标准差,包括其他四个类型阈值
auc_threshold = df.loc[i_auc_max, metric]
gm_threshold = df.loc[i_gm_max, metric]
bpp_threshold = df.loc[i_bpp_max, metric]
mfm_threshold = df.loc[i_f1_max, metric]
roc_threshold = df.loc[i_roc_max, metric]
# 计算LOGIT回归系数矩阵的协方差矩阵,因为计算aucThreshold的标准差要用到,见参考文献[1],
# 此处借鉴VARL方法,本质上VARL也是度量值中的一个
cov = result.cov_params()
cov11 = cov.iloc[0, 0]
cov12 = cov.iloc[0, 1]
cov22 = cov.iloc[1, 1]
auc_threshold_se = ((cov.iloc[0, 0] + 2 * auc_threshold * cov.iloc[0, 1]
+ auc_threshold * auc_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
auc_threshold_variance = auc_threshold_se ** 2
gm_threshold_se = ((cov.iloc[0, 0] + 2 * gm_threshold * cov.iloc[0, 1]
+ gm_threshold * gm_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
gm_threshold_variance = gm_threshold_se ** 2
bpp_threshold_se = ((cov.iloc[0, 0] + 2 * bpp_threshold * cov.iloc[0, 1]
+ bpp_threshold * bpp_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
bpp_threshold_variance = bpp_threshold_se ** 2
mfm_threshold_se = ((cov.iloc[0, 0] + 2 * mfm_threshold * cov.iloc[0, 1]
+ mfm_threshold * mfm_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
mfm_threshold_variance = mfm_threshold_se ** 2
roc_threshold_se = ((cov.iloc[0, 0] + 2 * roc_threshold * cov.iloc[0, 1]
+ roc_threshold * roc_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
roc_threshold_variance = roc_threshold_se ** 2
# 求VARL作为阈值,此处未用10折交叉验证的方法 VARL.threshold = (log(Porbability[1]/Porbability[2])-B[1])/B[2]
valueOfbugBinary = df["bugBinary"].value_counts() # 0 和 1 的各自的个数
print("the value of valueOfbugBinary[0] is ", valueOfbugBinary[0])
print("the value of valueOfbugBinary[1] is ", valueOfbugBinary[1])
# 用缺陷为大于0的模块数占所有模块之比
BaseProbability_1 = valueOfbugBinary[1] / (valueOfbugBinary[0] + valueOfbugBinary[1])
# 计算VARL阈值及标准差
varl_threshold = (np.log(BaseProbability_1 / (1 - BaseProbability_1)) - B[1]) / B[0]
varl_threshold_se = ((cov.iloc[0, 0] + 2 * varl_threshold * cov.iloc[0, 1]
+ varl_threshold * varl_threshold * cov.iloc[1, 1]) ** 0.5) / B[0]
varl_threshold_variance = varl_threshold_se ** 2
# 输出每一度量的结果
writer.writerow([file, metric, Corr_metric_bug[metric][1], B[0], pValueLogit[0], B[1], pValueLogit[1],
cov11, cov12, cov22, BaseProbability_1,
auc_threshold, auc_threshold_variance, auc_max_value, i_auc_max,
gm_threshold, gm_threshold_variance, gm_max_value, i_gm_max,
bpp_threshold, bpp_threshold_variance, bpp_max_value, i_bpp_max,
mfm_threshold, mfm_threshold_variance, f1_max_value, i_f1_max,
roc_threshold, roc_threshold_variance, roc_max_value, i_roc_max,
varl_threshold, varl_threshold_variance])
# break
if __name__ == '__main__':
s_time = time.time()
roc_threshold()
e_time = time.time()
execution_time = e_time - s_time
print("The __name__ is ", __name__, ". This is end of RocMethod.py!\n",
"The execution time of Bender.py script is ", execution_time)
| 47.447099
| 120
| 0.535319
| 1,634
| 13,902
| 4.29437
| 0.222154
| 0.057004
| 0.032065
| 0.008978
| 0.328203
| 0.246829
| 0.22645
| 0.191677
| 0.175004
| 0.156192
| 0
| 0.033555
| 0.361171
| 13,902
| 292
| 121
| 47.609589
| 0.756559
| 0.165588
| 0
| 0.069519
| 0
| 0
| 0.123518
| 0.030468
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010695
| false
| 0
| 0.037433
| 0.005348
| 0.053476
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a63a86305fa3e3ced908249d69f673dd8d16d58
| 717
|
py
|
Python
|
migrations/versions/2018-09-27_12:25:31__3cbc86a0a9d7.py
|
gems-uff/sms
|
01cfa84bd467617c58f58da04711c5097dd93fe6
|
[
"MIT"
] | null | null | null |
migrations/versions/2018-09-27_12:25:31__3cbc86a0a9d7.py
|
gems-uff/sms
|
01cfa84bd467617c58f58da04711c5097dd93fe6
|
[
"MIT"
] | null | null | null |
migrations/versions/2018-09-27_12:25:31__3cbc86a0a9d7.py
|
gems-uff/sms
|
01cfa84bd467617c58f58da04711c5097dd93fe6
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 3cbc86a0a9d7
Revises: 77894fcde804
Create Date: 2018-09-27 12:25:31.893545
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3cbc86a0a9d7'
down_revision = '77894fcde804'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('unique_order_item', 'order_items', type_='unique')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('unique_order_item', 'order_items', ['item_id', 'order_id', 'lot_number'])
# ### end Alembic commands ###
| 24.724138
| 106
| 0.707113
| 89
| 717
| 5.516854
| 0.561798
| 0.05499
| 0.08554
| 0.093686
| 0.321792
| 0.321792
| 0.179226
| 0.179226
| 0
| 0
| 0
| 0.079867
| 0.161785
| 717
| 28
| 107
| 25.607143
| 0.737105
| 0.411437
| 0
| 0
| 0
| 0
| 0.288312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a640b59523119016904d7053ed1bc557df19331
| 2,685
|
py
|
Python
|
mp_roguelike/ai.py
|
nonk123/mp_roguelike
|
48785b44dd3f2518a5a639a6609670408e7ea1f5
|
[
"MIT"
] | null | null | null |
mp_roguelike/ai.py
|
nonk123/mp_roguelike
|
48785b44dd3f2518a5a639a6609670408e7ea1f5
|
[
"MIT"
] | null | null | null |
mp_roguelike/ai.py
|
nonk123/mp_roguelike
|
48785b44dd3f2518a5a639a6609670408e7ea1f5
|
[
"MIT"
] | null | null | null |
import random
from .util import sign
class AI:
def __init__(self, entity):
self.entity = entity
self.queued_path = []
def think(self):
if self.queued_path:
x, y = self.queued_path.pop(0)
self.move(x - self.entity.x, y - self.entity.y)
def move(self, dx, dy):
self.entity.queue_move(dx, dy)
def move_to(self, x, y):
self.queued_path = []
at = [self.entity.x, self.entity.y]
while at != [x, y]:
at[0] += sign(x - at[0])
at[1] += sign(y - at[1])
self.queued_path.append((*at,))
def is_enemy(self, entity):
return isinstance(entity.ai, ControlledAI)
def attack(self, entity):
if entity is not None:
self.move_to(entity.x, entity.y)
class ControlledAI(AI):
def think(self):
pass
def is_enemy(self, entity):
return not super().is_enemy(entity)
class AggressiveAI(AI):
def think(self):
self.attack(self.find_closest_enemy())
super().think()
self.entity.turn_done = True
def is_enemy(self, entity):
return super().is_enemy(entity) and isinstance(entity.ai, ControlledAI)
def find_closest_enemy(self):
closest = None
for entity in self.entity.get_visible_entities():
if closest is None:
ddist = -10
else:
ddist = entity.dist(self.entity) - closest.dist(self.entity)
if self.is_enemy(entity) and ddist < 0:
closest = entity
return closest
class SpawnerAI(AI):
def __init__(self, entity, spawn_fun, max_spawn=5, spawn_cooldown=15):
super().__init__(entity)
self.spawn_fun = spawn_fun
self.max_spawn = max_spawn
self.spawn_cooldown = spawn_cooldown
self.spawned = []
self.turns_since_last_spawn = 0
def position(self, entity):
while True:
entity.x = self.entity.x + random.randint(-1, 1)
entity.y = self.entity.y + random.randint(-1, 1)
if not self.entity.world.is_occupied(entity.x, entity.y):
return
def think(self):
if len(self.spawned) < self.max_spawn \
and self.turns_since_last_spawn >= self.spawn_cooldown:
entity = self.spawn_fun()
self.spawned.append(entity)
entity.added += lambda: self.position(entity)
entity.dead += lambda: self.spawned.remove(entity)
self.entity.world.add_entity(entity)
self.turns_since_last_spawn = 0
self.turns_since_last_spawn += 1
self.entity.turn_done = True
| 26.584158
| 79
| 0.582495
| 351
| 2,685
| 4.2849
| 0.213675
| 0.146277
| 0.046543
| 0.047872
| 0.234043
| 0.083777
| 0
| 0
| 0
| 0
| 0
| 0.009651
| 0.3054
| 2,685
| 100
| 80
| 26.85
| 0.796783
| 0
| 0
| 0.183099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.197183
| false
| 0.014085
| 0.028169
| 0.042254
| 0.352113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a66f861ec173370f50a0b31924da0bccb5e1872
| 2,661
|
py
|
Python
|
romanyh/transposition.py
|
napulen/romanyh
|
34bc75d40bf532eb20607db763fcbc2693cac35f
|
[
"BSD-3-Clause"
] | null | null | null |
romanyh/transposition.py
|
napulen/romanyh
|
34bc75d40bf532eb20607db763fcbc2693cac35f
|
[
"BSD-3-Clause"
] | 5
|
2020-12-08T04:37:21.000Z
|
2021-01-06T03:36:30.000Z
|
romanyh/transposition.py
|
napulen/romanyh
|
34bc75d40bf532eb20607db763fcbc2693cac35f
|
[
"BSD-3-Clause"
] | null | null | null |
import re
import sys
from music21.interval import Interval
from music21.key import Key
def findKeysInRomanTextString(rntxt):
"""Get all the keys in a RomanText string.
Receive a string with valid RomanText content.
Output a list of all the key changes that happen
throughout the content.
"""
return re.findall(r" ([a-gA-G][#b]?): ", rntxt)
def transposeKeys(keys, newTonic):
"""Transpose a list of keys relative to a new tonic."""
referenceKey = Key(keys[0])
newTonicKey = Key(newTonic, mode=referenceKey.mode)
intervalDiff = Interval(referenceKey.tonic, newTonicKey.tonic)
transposedKeys = [newTonicKey.tonicPitchNameWithCase]
for k in keys[1:]:
localKey = Key(k)
newLocalTonic = localKey.tonic.transpose(intervalDiff)
newLocalKey = Key(newLocalTonic, mode=localKey.mode)
if abs(newLocalKey.sharps) >= 7:
newLocalKey = Key(
newLocalTonic.getEnharmonic(), mode=localKey.mode
)
transposedKeys.append(newLocalKey.tonicPitchNameWithCase)
transposedKeys = [k.replace("-", "b") for k in transposedKeys]
return transposedKeys
def transposeRomanText(f, newTonic="C"):
"""Transposes a RomanText file into a different key.
The transposition is performed in the following way:
- The first key in the file is taken as the reference key
- An interval between the reference key and new tonic is computed
- Every transposed key respects that interval, unless it becomes
or exceeds a key signature with 7 sharps or 7 flats
- In that case, the enharmonic spelling is preferred
The mode of the original key is always respected. That is,
attempting to transpose an annotation in the key of C Major
with a newTonic of `a` will result in a transposition to
A Major. Change of mode is not trivial and it is not addressed
in this code.
"""
with open(f) as fd:
rntxt = fd.read()
keys = findKeysInRomanTextString(rntxt)
transposedKeys = transposeKeys(keys, newTonic)
keysString = [f" {k}: " for k in keys]
transposedKeysString = [f" {k}: " for k in transposedKeys]
transposedRntxt = ""
for original, transposed in zip(keysString, transposedKeysString):
solved, replace, remainder = rntxt.partition(original)
transposedRntxt += solved + transposed
rntxt = remainder
transposedRntxt += rntxt
return transposedRntxt
if __name__ == "__main__":
inputFile = sys.argv[1]
newTonic = sys.argv[2] if len(sys.argv) == 3 else "C"
transposedRntxt = transposeRomanText(inputFile, newTonic)
print(transposedRntxt)
| 36.452055
| 70
| 0.693724
| 329
| 2,661
| 5.586626
| 0.392097
| 0.008705
| 0.013058
| 0.010881
| 0.008705
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005822
| 0.225479
| 2,661
| 72
| 71
| 36.958333
| 0.885978
| 0.32469
| 0
| 0
| 0
| 0
| 0.02449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.097561
| 0
| 0.243902
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a67795832eb29853a6ccb60a0d65c013b0a8f82
| 4,847
|
py
|
Python
|
management_api_app/tests_ma/test_service_bus/test_deployment_status_update.py
|
LizaShak/AzureTRE
|
b845eb4b73439ef7819565aaadb36f43b6484ad9
|
[
"MIT"
] | 2
|
2021-11-14T16:57:16.000Z
|
2022-03-13T15:14:26.000Z
|
management_api_app/tests_ma/test_service_bus/test_deployment_status_update.py
|
anatbal/AzureTRE
|
d1d4891657c737092e761c4aaf80b04ff0f03fc7
|
[
"MIT"
] | null | null | null |
management_api_app/tests_ma/test_service_bus/test_deployment_status_update.py
|
anatbal/AzureTRE
|
d1d4891657c737092e761c4aaf80b04ff0f03fc7
|
[
"MIT"
] | null | null | null |
import json
import pytest
import uuid
from mock import AsyncMock, patch
from db.errors import EntityDoesNotExist
from models.domain.resource import Status
from models.domain.workspace import Workspace
from models.domain.resource import Deployment
from resources import strings
from service_bus.deployment_status_update import receive_message_and_update_deployment
pytestmark = pytest.mark.asyncio
test_data = [
'bad',
'{"good": "json", "bad": "message"}'
]
test_sb_message = {
"id": "59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76",
"status": Status.Deployed,
"message": "test message"
}
class ServiceBusReceivedMessageMock:
def __init__(self, message: dict):
self.message = json.dumps(message)
self.correlation_id = "test_correlation_id"
def __str__(self):
return self.message
def create_sample_workspace_object(workspace_id):
return Workspace(
id=workspace_id,
description="My workspace",
resourceTemplateName="tre-workspace-vanilla",
resourceTemplateVersion="0.1.0",
resourceTemplateParameters={},
deployment=Deployment(status=Status.NotDeployed, message="")
)
@pytest.mark.parametrize("payload", test_data)
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_receiving_bad_json_logs_error(app, sb_client, logging_mock, payload):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(payload)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
await receive_message_and_update_deployment(app)
error_message = logging_mock.call_args.args[0]
assert error_message.startswith(strings.DEPLOYMENT_STATUS_MESSAGE_FORMAT_INCORRECT)
sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock)
@patch('service_bus.deployment_status_update.WorkspaceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_receiving_good_message(app, sb_client, logging_mock, repo):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
expected_workspace = create_sample_workspace_object(test_sb_message["id"])
repo().get_workspace_by_workspace_id.return_value = expected_workspace
await receive_message_and_update_deployment(app)
repo().get_workspace_by_workspace_id.assert_called_once_with(uuid.UUID(test_sb_message["id"]))
repo().update_workspace.assert_called_once_with(expected_workspace)
logging_mock.assert_not_called()
sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock)
@patch('service_bus.deployment_status_update.WorkspaceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_when_updating_non_existent_workspace_error_is_logged(app, sb_client, logging_mock, repo):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
repo().get_workspace_by_workspace_id.side_effect = EntityDoesNotExist
await receive_message_and_update_deployment(app)
expected_error_message = strings.DEPLOYMENT_STATUS_ID_NOT_FOUND.format(test_sb_message["id"])
logging_mock.assert_called_once_with(expected_error_message)
sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock)
@patch('service_bus.deployment_status_update.WorkspaceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_when_updating_and_state_store_exception(app, sb_client, logging_mock, repo):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
repo().get_workspace_by_workspace_id.side_effect = Exception
await receive_message_and_update_deployment(app)
logging_mock.assert_called_once_with(strings.STATE_STORE_ENDPOINT_NOT_RESPONDING + " ")
sb_client().get_queue_receiver().complete_message.assert_not_called()
| 41.784483
| 115
| 0.810192
| 600
| 4,847
| 6.085
| 0.183333
| 0.052041
| 0.036154
| 0.052588
| 0.657902
| 0.598466
| 0.547795
| 0.502876
| 0.490551
| 0.490551
| 0
| 0.004796
| 0.096555
| 4,847
| 115
| 116
| 42.147826
| 0.828956
| 0
| 0
| 0.37931
| 0
| 0
| 0.136786
| 0.090159
| 0
| 0
| 0
| 0
| 0.114943
| 1
| 0.034483
| false
| 0
| 0.114943
| 0.022989
| 0.183908
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a695ae89ca40a6004f7716018ec39b583cbbbfd
| 1,587
|
py
|
Python
|
tests/sms/models/test_reschedule_sms_messages.py
|
infobip-community/infobip-api-python-sdk
|
5ffc5ab877ee1748aa29391f991c8c5324387487
|
[
"MIT"
] | null | null | null |
tests/sms/models/test_reschedule_sms_messages.py
|
infobip-community/infobip-api-python-sdk
|
5ffc5ab877ee1748aa29391f991c8c5324387487
|
[
"MIT"
] | null | null | null |
tests/sms/models/test_reschedule_sms_messages.py
|
infobip-community/infobip-api-python-sdk
|
5ffc5ab877ee1748aa29391f991c8c5324387487
|
[
"MIT"
] | null | null | null |
from datetime import date, datetime, timedelta
import pytest
from pydantic.error_wrappers import ValidationError
from infobip_channels.sms.models.body.reschedule_sms_messages import (
RescheduleSMSMessagesMessageBody,
)
from infobip_channels.sms.models.query_parameters.reschedule_messages import (
RescheduleSMSMessagesQueryParameters,
)
@pytest.mark.parametrize("bulk_id", [{}, None])
def test_when_bulk_id_is_invalid__validation_error_is_raised(bulk_id):
with pytest.raises(ValidationError):
RescheduleSMSMessagesQueryParameters(
**{
"bulk_id": bulk_id,
}
)
def test_when_input_data_is_valid_query__validation_error_is_not_raised():
try:
RescheduleSMSMessagesQueryParameters(
**{
"bulk_id": "BulkId-xyz-123",
}
)
except ValidationError:
pytest.fail("Unexpected ValidationError raised")
@pytest.mark.parametrize(
"send_at",
[{}, "Test", "22-03-2022", date.today(), datetime.now() + timedelta(days=181)],
)
def test_when_send_at_is_invalid__validation_error_is_raised(send_at):
with pytest.raises(ValidationError):
RescheduleSMSMessagesMessageBody(
**{
"sendAt": send_at,
}
)
def test_when_input_data_is_valid_body__validation_error_is_not_raised():
try:
RescheduleSMSMessagesMessageBody(
**{
"sendAt": datetime.now(),
}
)
except ValidationError:
pytest.fail("Unexpected ValidationError raised")
| 27.842105
| 83
| 0.672968
| 153
| 1,587
| 6.594771
| 0.366013
| 0.035679
| 0.043608
| 0.043608
| 0.352825
| 0.297324
| 0.176412
| 0
| 0
| 0
| 0
| 0.011551
| 0.236295
| 1,587
| 56
| 84
| 28.339286
| 0.820957
| 0
| 0
| 0.347826
| 0
| 0
| 0.084436
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.108696
| 0
| 0.195652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a6a4945d24f523a66e8dd1cc3a18e4d3749558b
| 5,578
|
py
|
Python
|
_pkg_KuFunc/mod_SetLabel.py
|
tianlunjiang/_NukeStudio_v2
|
5ed9b9217aff16d903bdcda5c2f1e1cd3bebe367
|
[
"CNRI-Python"
] | 6
|
2019-08-27T01:30:15.000Z
|
2020-11-17T00:40:01.000Z
|
_pkg_KuFunc/mod_SetLabel.py
|
tianlunjiang/_NukeMods
|
47861bfc273262abba55b9f9a61782a5d89479b1
|
[
"CNRI-Python"
] | 2
|
2019-01-22T04:09:28.000Z
|
2019-01-23T15:11:39.000Z
|
_pkg_KuFunc/mod_SetLabel.py
|
tianlunjiang/_NukeMods
|
47861bfc273262abba55b9f9a61782a5d89479b1
|
[
"CNRI-Python"
] | 1
|
2020-08-03T22:43:23.000Z
|
2020-08-03T22:43:23.000Z
|
# ------------------------------------------------------------------------------
# Module Import
# ------------------------------------------------------------------------------
import nuke, nukescripts
import platform
from Qt import QtWidgets, QtGui, QtCore
#------------------------------------------------------------------------------
#-Header
#------------------------------------------------------------------------------
__VERSION__ = '2.0'
__OS__ = platform.system()
__AUTHOR__ = "Tianlun Jiang"
__WEBSITE__ = "jiangovfx.com"
__COPYRIGHT__ = "copyright (c) %s - %s" % (__AUTHOR__, __WEBSITE__)
__TITLE__ = "SetLabel v%s" % __VERSION__
def _version_():
ver="""
version 2.0
- Add preset buttons for frames and knob values
- Add Node Context support
version 1.0
- Basically working, when run(), prompt a frameless popup with line edit field
- replace with Qt
"""
# ------------------------------------------------------------------------------
# Global Variables
# ------------------------------------------------------------------------------
KNOB_IGNORE = set(['layer', 'invert_mask', 'help',
'dope_sheet', 'hide_input', 'xpos',
'crop', 'channels', 'note_font_color',
'onCreate', 'quality', 'updateUI',
'knobChanged', 'note_font', 'tile_color',
'bookmark', 'selected', 'autolabel',
'process_mask', 'label', 'onDestroy',
'inject', 'indicators', 'icon',
'channel', 'maskFrom', 'maskChannelMask',
'enable', 'maskChannelInput', 'Mask',
'ypos', 'postage_stamp_frame', 'postage_stamp',
'lifetimeStart', 'maskChannel', 'panel',
'lifetimeEnd', 'maskFromFlag',
'name', 'cached', 'fringe',
'mask', 'note_font_size', 'filter',
'useLifetime', 'gl_color'])
KNOB_IGNORE_KEYWORDS = ['_panelDropped', 'enable', 'unpremult', 'clamp']
# ------------------------------------------------------------------------------
# Core Class
# ------------------------------------------------------------------------------
class Core_SetLabel(QtWidgets.QDialog):
def __init__(self):
super(Core_SetLabel,self).__init__()
self.lineInput = QtWidgets.QLineEdit()
self.lineInput.setAlignment(QtCore.Qt.AlignCenter)
self.lineInput.returnPressed.connect(self.onPressed)
self.title = QtWidgets.QLabel("<b>Set Label</b>")
self.title.setAlignment(QtCore.Qt.AlignHCenter)
self.btn_frame = QtWidgets.QPushButton("Current Frame")
self.btn_frame.clicked.connect(self.onPreset)
self.knoblist = QtWidgets.QComboBox()
self.knoblist.setEditable(True)
self.btn_knob = QtWidgets.QPushButton("Knob Value")
self.btn_knob.clicked.connect(self.onPreset)
self.layout = QtWidgets.QVBoxLayout()
self.layout_knobs = QtWidgets.QHBoxLayout()
self.layout.addWidget(self.title)
self.layout.addWidget(self.lineInput)
self.layout.addWidget(self.btn_frame)
self.layout_knobs.addWidget(self.knoblist)
self.layout_knobs.addWidget(self.btn_knob)
self.layout.addLayout(self.layout_knobs)
self.setLayout(self.layout)
self.resize(200,50)
self.setWindowTitle("Set Label")
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.Popup)
# self.setDefault()
def onPressed(self):
"""change label with enter-key is pressed"""
newLabel = self.lineInput.text()
for n in self.sel_nodes:
n['label'].setValue(newLabel)
self.close()
def onPreset(self):
"""When preset button is pressed"""
_sender = self.sender()
if _sender is self.btn_frame:
for n in self.sel_nodes:
n['label'].setValue('x%s' % nuke.frame())
elif _sender is self.btn_knob:
sel_knob = self.knoblist.currentText()
n = self.sel_nodes[0]
n['label'].setValue('[value %s]' % sel_knob)
self.close()
def setDefault(self):
"""get the existing label of selected nodes"""
context = get_dag()
with context:
self.sel_nodes = nuke.selectedNodes()
if self.sel_nodes != []:
self.lineInput.show()
self.title.setText("<b>Set Label</b>")
self.lineInput.setText(self.sel_nodes[0]['label'].value())
n = self.sel_nodes[0]
knobs = filterKnobs(n.knobs())
self.knoblist.clear()
self.knoblist.addItems(knobs)
else:
self.lineInput.hide()
self.title.setText("<b>Error:<br>No Node Selected</b>")
def run(self):
"""rerun instance"""
self.setDefault()
self.move(QtGui.QCursor.pos()+QtCore.QPoint(-100,-12))
self.raise_()
self.lineInput.setFocus()
self.lineInput.selectAll()
self.show()
# ------------------------------------------------------------------------------
# Supporting Fucntions
# ------------------------------------------------------------------------------
def filterKnobs(knobs):
"""filter knobs for labels
@knobs: (list) list of knobs
return: (list) filtered list of knobs
"""
ls_ignored = list( set(knobs)-KNOB_IGNORE )
ls_filtered = []
for k in ls_ignored:
count = 0
for f in KNOB_IGNORE_KEYWORDS:
if f not in k: count += 1
if count == len(KNOB_IGNORE_KEYWORDS): ls_filtered.append(k)
return sorted(ls_filtered)
def get_dag():
"""For DAG context when selecting nodes"""
app = QtWidgets.QApplication
pos = QtGui.QCursor.pos()
widget = app.widgetAt(pos)
#print dir(widget)
context = widget.parent().windowTitle().split('Node Graph')[0].strip()
print(context)
return nuke.root() if context == '' else nuke.toNode(context)
# ------------------------------------------------------------------------------
# Instancing
# ------------------------------------------------------------------------------
SetLabel = Core_SetLabel()
| 25.126126
| 103
| 0.573682
| 586
| 5,578
| 5.283276
| 0.389079
| 0.04199
| 0.027132
| 0.022287
| 0.076227
| 0.020672
| 0.020672
| 0.020672
| 0.020672
| 0
| 0
| 0.004557
| 0.134457
| 5,578
| 221
| 104
| 25.239819
| 0.636703
| 0.235748
| 0
| 0.052174
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069565
| false
| 0
| 0.026087
| 0
| 0.121739
| 0.008696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a6c6afbecc178b754f00e36139090ce170c777c
| 780
|
py
|
Python
|
imgur_stuff.py
|
djs2022/DataEntrySite
|
aac8e71fe0a8b159113b1488cbe7a8a7e641bf1d
|
[
"MIT"
] | null | null | null |
imgur_stuff.py
|
djs2022/DataEntrySite
|
aac8e71fe0a8b159113b1488cbe7a8a7e641bf1d
|
[
"MIT"
] | null | null | null |
imgur_stuff.py
|
djs2022/DataEntrySite
|
aac8e71fe0a8b159113b1488cbe7a8a7e641bf1d
|
[
"MIT"
] | null | null | null |
import requests
import os
class Imgur():
client_id = None
remCredits = None
def __init__(self, clientID):
self.client_id = clientID
def uploadImage(self, file, title, description):
file.save(file.filename)
with open(file.filename, 'rb') as f:
data = f.read()
url = "https://api.imgur.com/3/image"
payload = {'image': data, 'title': title, 'description': description}
headers = {
"authorization": f"Client-ID {self.client_id}"
}
res = requests.request("POST", url, headers=headers, data=payload)
os.remove(file.filename)
response = res.json()
if response['success']:
return response['data']['link']
else:
return None
| 27.857143
| 77
| 0.574359
| 87
| 780
| 5.068966
| 0.528736
| 0.072562
| 0.054422
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001821
| 0.296154
| 780
| 27
| 78
| 28.888889
| 0.801457
| 0
| 0
| 0
| 0
| 0
| 0.141026
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.086957
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a6d77c44f6c1309b10cae742c418b58169828c7
| 4,489
|
py
|
Python
|
roles/tox/library/tox_parse_output.py
|
g-chauvel/zuul-jobs
|
7ea241a626f2f2e05d4aeb8cf0328d22736b1f0f
|
[
"Apache-2.0"
] | null | null | null |
roles/tox/library/tox_parse_output.py
|
g-chauvel/zuul-jobs
|
7ea241a626f2f2e05d4aeb8cf0328d22736b1f0f
|
[
"Apache-2.0"
] | null | null | null |
roles/tox/library/tox_parse_output.py
|
g-chauvel/zuul-jobs
|
7ea241a626f2f2e05d4aeb8cf0328d22736b1f0f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2018 Red Hat
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: tox_parse_output
short_description: Parses the output of tox looking for per-line comments
author: Monty Taylor (@mordred)
description:
- Looks for output from the tox command to find content that could be
returned as inline comments.
requirements:
- "python >= 3.5"
options:
tox_output:
description:
- Output from the tox command run
required: true
type: str
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
ANSI_RE = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
PEP8_RE = re.compile(r"^(.*):(\d+):(\d+): (.*)$")
SPHINX_RE = re.compile(r"^([^:]*):([\d]+):(\w.+)$")
def simple_matcher(line, regex, file_path_group, start_line_group,
message_group):
m = regex.match(line)
file_path = None
start_line = None
message = None
if m:
file_path = m.group(file_path_group)
start_line = m.group(start_line_group)
message = m.group(message_group)
return file_path, start_line, message
def pep8_matcher(line):
return simple_matcher(line, PEP8_RE, 1, 2, 4)
def sphinx_matcher(line):
return simple_matcher(line, SPHINX_RE, 1, 2, 3)
matchers = [
pep8_matcher,
sphinx_matcher,
]
def extract_line_comment(line):
"""
Extracts line comment data from a line using multiple matchers.
"""
file_path = None
start_line = None
message = None
for matcher in matchers:
file_path, start_line, message = matcher(line)
if file_path:
message = ANSI_RE.sub('', message)
break
return file_path, start_line, message
def extract_file_comments(tox_output, workdir, tox_envlist=None):
os.chdir(workdir)
ret = {}
for line in tox_output.split('\n'):
if not line:
continue
if line[0].isspace():
continue
file_path, start_line, message = extract_line_comment(line)
if not file_path:
continue
# Clean up the file path if it has a leading ./
if file_path.startswith('./'):
file_path = file_path[2:]
# Don't report if the file path isn't valid
if not os.path.isfile(file_path):
continue
# Strip current working dir to make absolute paths relative
cwd = os.getcwd() + '/'
if file_path.startswith(cwd):
file_path = file_path[len(cwd):]
# After stripping we don't allow absolute paths anymore since they
# cannot be linked to a file in the repo in zuul.
if file_path.startswith('/'):
continue
# We should only handle files that are in under version control.
# For now, skip .tox directory, we can enhance later.
if file_path.startswith('.tox'):
continue
ret.setdefault(file_path, [])
if tox_envlist:
message = "{envlist}: {message}".format(
envlist=tox_envlist,
message=message,
)
ret[file_path].append(dict(
line=int(start_line),
message=message,
))
return ret
def main():
module = AnsibleModule(
argument_spec=dict(
tox_output=dict(required=True, type='str', no_log=True),
tox_envlist=dict(required=True, type='str'),
workdir=dict(required=True, type='str'),
)
)
tox_output = module.params['tox_output']
tox_envlist = module.params['tox_envlist']
file_comments = extract_file_comments(
tox_output, module.params['workdir'], tox_envlist)
module.exit_json(changed=False, file_comments=file_comments)
if __name__ == '__main__':
main()
| 28.775641
| 74
| 0.642237
| 598
| 4,489
| 4.650502
| 0.366221
| 0.06904
| 0.028767
| 0.027328
| 0.221143
| 0.094211
| 0.049622
| 0.02589
| 0
| 0
| 0
| 0.007177
| 0.255068
| 4,489
| 155
| 75
| 28.96129
| 0.824462
| 0.243261
| 0
| 0.176471
| 0
| 0
| 0.168452
| 0.016667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.039216
| 0.019608
| 0.147059
| 0.009804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a737b4d0699668e68dfd11d0393dc995f8e0e88
| 574
|
py
|
Python
|
python-code/transformer-sample/basic/sentiment_analysis.py
|
87-midnight/NewbieInJava
|
ba84153c6b3a382e620c4df7892d653be2e1a607
|
[
"MIT"
] | null | null | null |
python-code/transformer-sample/basic/sentiment_analysis.py
|
87-midnight/NewbieInJava
|
ba84153c6b3a382e620c4df7892d653be2e1a607
|
[
"MIT"
] | 2
|
2019-10-22T08:21:09.000Z
|
2019-10-22T08:21:09.000Z
|
python-code/transformer-sample/basic/sentiment_analysis.py
|
87-midnight/NewbieInJava
|
ba84153c6b3a382e620c4df7892d653be2e1a607
|
[
"MIT"
] | null | null | null |
# 使用情绪分析流水线
import torch
from transformers import BertTokenizer, BertForSequenceClassification
torch.manual_seed(0)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", problem_type="multi_label_classification", num_labels=2)
inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss
outputs = model(**inputs, labels=labels)
loss = outputs.loss
logits = outputs.logits
list(logits.shape)
| 41
| 131
| 0.801394
| 71
| 574
| 6.366197
| 0.633803
| 0.061947
| 0.079646
| 0.097345
| 0.128319
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007619
| 0.085366
| 574
| 14
| 132
| 41
| 0.853333
| 0.083624
| 0
| 0
| 0
| 0
| 0.158397
| 0.049618
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a750f402f6cc67161071bf3b54785b45c55a45d
| 1,293
|
py
|
Python
|
examples/tutorial/parallel_amuse_script.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 131
|
2015-06-04T09:06:57.000Z
|
2022-02-01T12:11:29.000Z
|
examples/tutorial/parallel_amuse_script.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 690
|
2015-10-17T12:18:08.000Z
|
2022-03-31T16:15:58.000Z
|
examples/tutorial/parallel_amuse_script.py
|
rieder/amuse
|
3ac3b6b8f922643657279ddee5c8ab3fc0440d5e
|
[
"Apache-2.0"
] | 102
|
2015-01-22T10:00:29.000Z
|
2022-02-09T13:29:43.000Z
|
import time
import numpy
from amuse.lab import Huayno
from amuse.lab import Hermite
from amuse.lab import nbody_system
from amuse.lab import new_king_model
from matplotlib import pyplot
def gravity_minimal(bodies, t_end, nproc):
gravity = Hermite(number_of_workers=nproc)
gravity.particles.add_particles(bodies)
Etot_init = gravity.kinetic_energy + gravity.potential_energy
start_time = time.time()
gravity.evolve_model(t_end)
dtime = time.time() - start_time
Ekin = gravity.kinetic_energy
Epot = gravity.potential_energy
Etot = Ekin + Epot
dE = (Etot_init-Etot)/Etot
print()
print("T =", gravity.get_time(), " CPU time:", dtime, "[s]")
print("M =", bodies.mass.sum(), " E = ", Etot, " Q = ", -Ekin/Epot)
print("dE =", dE)
gravity.stop()
return dtime
if __name__ in ('__main__'):
N = 1024
W0 = 7.0
t_end = 0.1 | nbody_system.time
bodies = new_king_model(N, W0)
bodies.scale_to_standard()
nproc= 6
proc = numpy.arange(1, nproc+1, 1)
tcpu = []
for npi in proc:
tcpu.append(gravity_minimal(bodies, t_end, npi))
pyplot.scatter(proc, tcpu)
pyplot.xlabel("n proc")
pyplot.ylabel("CPU time [s]")
pyplot.savefig("fig_parallel_performance_N1k_Hermite.pdf")
| 25.352941
| 71
| 0.664346
| 183
| 1,293
| 4.480874
| 0.42623
| 0.043902
| 0.058537
| 0.087805
| 0.058537
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014778
| 0.215004
| 1,293
| 50
| 72
| 25.86
| 0.793103
| 0
| 0
| 0
| 0
| 0
| 0.076625
| 0.03096
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.179487
| 0
| 0.230769
| 0.102564
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a75e62e27fdd3a634c7ec673852b4fb62407232
| 311
|
py
|
Python
|
modules/random_cat.py
|
ChaseBosman/chatbot
|
a39e655e6d586fa596471cd20617dff5f9795a96
|
[
"Unlicense"
] | 3
|
2019-10-19T12:07:06.000Z
|
2020-10-05T17:24:56.000Z
|
modules/random_cat.py
|
ChaseBosman/chatbot
|
a39e655e6d586fa596471cd20617dff5f9795a96
|
[
"Unlicense"
] | 17
|
2019-10-05T12:30:17.000Z
|
2021-07-25T20:06:33.000Z
|
modules/random_cat.py
|
ChaseBosman/chatbot
|
a39e655e6d586fa596471cd20617dff5f9795a96
|
[
"Unlicense"
] | 26
|
2018-10-19T05:43:12.000Z
|
2020-10-02T05:27:48.000Z
|
import requests
import json
def random_cat_pic():
try:
url = 'http://aws.random.cat/meow'
response = requests.get(url)
response_json = json.loads(response.text)
return "Here's a super cute cat pic: " + response_json.get('file')
except:
return "Error meow"
| 25.916667
| 74
| 0.614148
| 41
| 311
| 4.560976
| 0.609756
| 0.096257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.273312
| 311
| 12
| 75
| 25.916667
| 0.827434
| 0
| 0
| 0
| 0
| 0
| 0.221154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a79851a367aea689a1293265d02727ae30bb330
| 7,877
|
py
|
Python
|
cvstudio/view/widgets/common/treeview_model.py
|
haruiz/PytorchCvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 32
|
2019-10-31T03:10:52.000Z
|
2020-12-23T11:50:53.000Z
|
cvstudio/view/widgets/common/treeview_model.py
|
haruiz/CvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 19
|
2019-10-31T15:06:05.000Z
|
2020-06-15T02:21:55.000Z
|
cvstudio/view/widgets/common/treeview_model.py
|
haruiz/PytorchCvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 8
|
2019-10-31T03:32:50.000Z
|
2020-07-17T20:47:37.000Z
|
import itertools
import typing
from typing import Any
from PyQt5 import QtCore
from PyQt5.QtCore import QModelIndex, pyqtSignal, QObject
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QAbstractItemDelegate, QWidget, QStyleOptionViewItem, QSpinBox
class CustomNode(object):
def __init__(self, data=None, success_icon=None, hover_icon=None, error_icon=None, level=-1, tag=None, status=1,
tooltip=None):
self._data = data
if isinstance(data, tuple):
self._data = list(data)
if isinstance(data, str):
self._data = [data]
self._tag = tag
self._enable = False
self._success_icon = success_icon
self._error_icon = error_icon if error_icon else success_icon
self._hover_icon = hover_icon if hover_icon else success_icon
self._children = []
self._parent = None
self._level = level
self._row = 0
self._status = status
self._tooltip_content = tooltip
def get_data(self, column):
if 0 <= column < len(self._data):
return self._data[column]
def set_data(self, column, value):
self._data[column] = value
def columnCount(self):
return len(self._data) if self._data else 0
@property
def tooltip_content(self):
return self._tooltip_content
@tooltip_content.setter
def tooltip_content(self, value):
self._tooltip_content = value
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, val):
self._tag = val
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def success_icon(self):
return self._success_icon
@success_icon.setter
def success_icon(self, value):
self._success_icon = value
@property
def error_icon(self):
return self._error_icon
@error_icon.setter
def error_icon(self, value):
self._error_icon = value
@property
def children(self):
return self._children
@children.setter
def children(self, value):
self._children = value
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, value):
self._parent = value
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def row(self):
return self._row
@row.setter
def row(self, value):
self._row = value
def child(self, index):
if 0 <= index < len(self.children):
return self.children[index]
def addChild(self, child):
child._parent = self
child._row = len(self.children) # get the last index
self.children.append(child)
def removeChild(self, position):
if position < 0 or position > len(self._children):
return False
child = self._children.pop(position)
child._parent = None
return True
class CustomModelSignals(QObject):
data_changed = pyqtSignal(CustomNode, int, str, str)
class WidgetDelegate(QAbstractItemDelegate):
def __init__(self):
super(WidgetDelegate, self).__init__()
def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QtCore.QModelIndex) -> QWidget:
editor = QSpinBox(parent)
editor.setFrame(False)
editor.setMinimum(0)
editor.setMaximum(100)
return editor
class CustomModel(QtCore.QAbstractItemModel):
def __init__(self, columns):
QtCore.QAbstractItemModel.__init__(self)
self._root = CustomNode(list(itertools.repeat("", len(columns))))
self.signals = CustomModelSignals()
self._columns = columns
@property
def root(self):
return self._root
def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int = ...) -> typing.Any:
if role == QtCore.Qt.DisplayRole:
return self._columns[section]
return super(CustomModel, self).headerData(section, orientation, role)
def addChild(self, node, parent=None):
if not parent or not parent.isValid():
parent = self._root
else:
parent = parent.internalPointer()
parent.addChild(node)
def setData(self, index: QModelIndex, value: Any, role=None):
if index.isValid():
if role == QtCore.Qt.EditRole:
node: CustomNode = index.internalPointer()
if value:
old_val = node.get_data(0)
node.set_data(0, value)
self.signals.data_changed.emit(node, role, old_val, value)
return True
else:
return False
return False
def removeChild(self, index: QModelIndex):
self.beginRemoveRows(index.parent(), index.row(), index.row())
success = self.removeRow(index.row(), parent=index.parent())
self.endRemoveRows()
return success
def removeRow(self, row, parent):
# if not parent
if not parent.isValid():
parentNode = self._root
else:
parentNode = parent.internalPointer() # the node
parentNode.removeChild(row)
return True
def data(self, index: QModelIndex, role=None):
if not index.isValid():
return None
node: CustomNode = index.internalPointer()
if role == QtCore.Qt.DisplayRole:
val = node.get_data(index.column())
return val
elif role == QtCore.Qt.DecorationRole and index.column() == 0:
if node.status == 1:
return node.success_icon
else:
return node.error_icon
elif role == QtCore.Qt.TextColorRole:
if node.level == 2 and node.status == -1:
return QColor(255, 0, 0)
elif role == QtCore.Qt.ToolTipRole:
return node.tooltip_content
return None
def flags(self, index: QModelIndex):
if not index.isValid():
return QtCore.Qt.NoItemFlags
flags = super(CustomModel, self).flags(index)
node: CustomNode = index.internalPointer()
# if node.level == 1:
# return (flags | QtCore.Qt.ItemIsEditable )
# else:
# return (flags | QtCore.Qt.ItemIsSelectable)
return (flags | QtCore.Qt.ItemIsEditable)
def rowCount(self, parent: QModelIndex = None, *args, **kwargs):
if parent.isValid(): # internal nodes
child: CustomNode = parent.internalPointer()
return len(child.children)
return len(self._root.children) # first level nodes
def columnCount(self, parent: QModelIndex = None, *args, **kwargs):
if parent.isValid():
return parent.internalPointer().columnCount()
return self._root.columnCount()
def parent(self, in_index: QModelIndex = None):
if in_index.isValid():
parent = in_index.internalPointer().parent
if parent:
return QtCore.QAbstractItemModel.createIndex(self, parent.row, 0, parent)
return QtCore.QModelIndex()
def index(self, row: int, column: int, parent=None, *args, **kwargs):
if not parent or not parent.isValid():
parent_node = self._root
else:
parent_node = parent.internalPointer()
if not QtCore.QAbstractItemModel.hasIndex(self, row, column, parent):
return QtCore.QModelIndex()
child = parent_node.child(row)
if child:
return QtCore.QAbstractItemModel.createIndex(self, row, column, child)
else:
return QtCore.QModelIndex()
| 30.296154
| 116
| 0.612416
| 873
| 7,877
| 5.390607
| 0.142039
| 0.029749
| 0.029749
| 0.008925
| 0.142371
| 0.036124
| 0.036124
| 0.036124
| 0.021249
| 0
| 0
| 0.005026
| 0.292751
| 7,877
| 259
| 117
| 30.413127
| 0.839706
| 0.02501
| 0
| 0.185366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.195122
| false
| 0
| 0.034146
| 0.053659
| 0.453659
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a7bafa3c7ab3354d60a1fcd0376c7ade47cb21d
| 707
|
py
|
Python
|
evtx_to_dataframe.py
|
esua/evtx_to_dataframe
|
390bf470e92092e66827373ed7e8b012a4fe94f6
|
[
"Apache-2.0"
] | null | null | null |
evtx_to_dataframe.py
|
esua/evtx_to_dataframe
|
390bf470e92092e66827373ed7e8b012a4fe94f6
|
[
"Apache-2.0"
] | null | null | null |
evtx_to_dataframe.py
|
esua/evtx_to_dataframe
|
390bf470e92092e66827373ed7e8b012a4fe94f6
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import Evtx.Evtx as evtx
import pandas as pd
import xmltodict
import re
parser = argparse.ArgumentParser(description="Convert Windows EVTX event log file to DataFrame.")
parser.add_argument("evtx", type=str, help="Path to the Windows EVTX event log file")
args = parser.parse_args()
with evtx.Evtx(args.evtx) as log:
data_dicts = []
for record in log.records():
elem = record.xml()
elem = re.sub(r'<Data Name="(.+)">(.+)</Data>', r'<\1>\2</\1>', elem) # Replace contents of EventData
data_dict = xmltodict.parse(elem) # convert xml to dict
data_dicts.append(data_dict)
df = pd.json_normalize(data_dicts) # convert dict to pd.DataFrame
print(df)
| 33.666667
| 110
| 0.693069
| 106
| 707
| 4.54717
| 0.481132
| 0.056017
| 0.06639
| 0.078838
| 0.095436
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005164
| 0.178218
| 707
| 20
| 111
| 35.35
| 0.824441
| 0.110325
| 0
| 0
| 0
| 0
| 0.2112
| 0.0368
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.294118
| 0
| 0.294118
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a7c85d6a1879df3d91cd853104103d5c1ce8afa
| 1,553
|
py
|
Python
|
paprotka/feature/cepstral.py
|
michalsosn/paprotka
|
d6079eefbade2cb8be5896777a7d50ac968d42ec
|
[
"MIT"
] | 1
|
2019-10-29T04:14:40.000Z
|
2019-10-29T04:14:40.000Z
|
paprotka/feature/cepstral.py
|
michalsosn/paprotka
|
d6079eefbade2cb8be5896777a7d50ac968d42ec
|
[
"MIT"
] | null | null | null |
paprotka/feature/cepstral.py
|
michalsosn/paprotka
|
d6079eefbade2cb8be5896777a7d50ac968d42ec
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
from scipy import signal, fftpack
def pre_emphasize(data, pre_emphasis=0.97):
return np.append(data[0], data[1:] - pre_emphasis * data[:-1])
def hz_to_mel(hz):
return 2595 * math.log10(1 + hz / 700)
def mel_to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def make_mel_filters(half, rate, filter_num):
min_mel = 0
max_mel = hz_to_mel(rate / 2)
mel_points = np.linspace(min_mel, max_mel, filter_num + 2)
hz_points = mel_to_hz(mel_points)
bin_points = np.floor((2 * half + 1) * hz_points / rate).astype(np.int32)
filters = np.zeros((filter_num, half))
for i in range(filter_num):
start, mid, end = bin_points[i], bin_points[i + 1], bin_points[i + 2]
filters[i, start:mid] = np.linspace(0, 1, mid - start, endpoint=False)
filters[i, mid:end] = np.linspace(1, 0, end - mid, endpoint=True)
return filters
def calculate_filter_bank(sound, filter_num=30, result_scaling=np.log1p, *args, **kwargs):
frequencies, times, transform = signal.stft(sound.data, sound.rate, *args, **kwargs)
power_spectrum = np.abs(transform) ** 2
filters = make_mel_filters(frequencies.size, sound.rate, filter_num)
coefficients = (filters @ power_spectrum).T
return result_scaling(coefficients)
def calculate_mfcc(sound, num_ceps=12, *args, **kwargs):
filter_banks = calculate_filter_bank(sound, *args, **kwargs)
mfcc = fftpack.dct(filter_banks, norm='ortho')
if num_ceps is None:
return mfcc
return mfcc[:, 1:(num_ceps + 1)]
| 33.042553
| 90
| 0.675467
| 241
| 1,553
| 4.157676
| 0.3361
| 0.053892
| 0.02994
| 0.01996
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037391
| 0.190599
| 1,553
| 46
| 91
| 33.76087
| 0.759745
| 0
| 0
| 0
| 0
| 0
| 0.00322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0.090909
| 0.484848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a7d8a539d82fbecac85da845cd748fe400b1a12
| 2,688
|
py
|
Python
|
arelle/plugin/unpackSecEisFile.py
|
DataFinnovation/Arelle
|
d4bf45f56fc9249f75ab22e6217dbe55f0510841
|
[
"Apache-2.0"
] | 292
|
2015-01-27T03:31:51.000Z
|
2022-03-26T07:00:05.000Z
|
arelle/plugin/unpackSecEisFile.py
|
DataFinnovation/Arelle
|
d4bf45f56fc9249f75ab22e6217dbe55f0510841
|
[
"Apache-2.0"
] | 94
|
2015-04-18T23:03:00.000Z
|
2022-03-28T17:24:55.000Z
|
arelle/plugin/unpackSecEisFile.py
|
DataFinnovation/Arelle
|
d4bf45f56fc9249f75ab22e6217dbe55f0510841
|
[
"Apache-2.0"
] | 200
|
2015-01-13T03:55:47.000Z
|
2022-03-29T12:38:56.000Z
|
'''
Unpack SEC EIS File is an example of a plug-in to the GUI menu
that will save the unpacked contents of an SEC EIS File in a directory.
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
def unpackEIS(cntlr, eisFile, unpackToDir):
from arelle.FileSource import openFileSource
filesource = openFileSource(eisFile, cntlr, checkIfXmlIsEis=True)
if not filesource.isArchive:
cntlr.addToLog("[info:unpackEIS] Not recognized as an EIS file: " + eisFile)
return
import os, io
unpackedFiles = []
for file in filesource.dir:
fIn, encoding = filesource.file(os.path.join(eisFile,file))
with open(os.path.join(unpackToDir, file), "w", encoding=encoding) as fOut:
fOut.write(fIn.read())
unpackedFiles.append(file)
fIn.close()
cntlr.addToLog("[info:unpackEIS] Unpacked files " + ', '.join(unpackedFiles))
def unpackSecEisMenuEntender(cntlr, menu, *args, **kwargs):
def askUnpackDirectory():
eisFile = cntlr.uiFileDialog("open",
title=_("arelle - Open SEC EIS file"),
initialdir=cntlr.config.setdefault("openSecEisFileDir","."),
filetypes=[(_("Compressed EIS file .eis"), "*.eis"), (_("Uncompressed EIS file .xml"), "*.xml")],
defaultextension=".eis")
if not eisFile:
return
from tkinter.filedialog import askdirectory
unpackToDir = askdirectory(parent=cntlr.parent,
initialdir=cntlr.config.setdefault("unpackSecEisFileDir","."),
title='Please select a directory for unpacked EIS Contents')
import os
cntlr.config["openSecEisFileDir"] = os.path.dirname(eisFile)
cntlr.config["unpackSecEisFileDir"] = unpackToDir
cntlr.saveConfig()
try:
unpackEIS(cntlr, eisFile, unpackToDir)
except Exception as ex:
cntlr.addToLog("[arelle:exception] Unpack EIS exception: " + str(ex));
menu.add_command(label="Unpack SEC EIS File",
underline=0,
command=lambda: askUnpackDirectory() )
__pluginInfo__ = {
'name': 'Unpack SEC EIS File',
'version': '0.9',
'description': "This plug-in unpacks the contents of an SEC EIS file.",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrWinMain.Menu.Tools': unpackSecEisMenuEntender,
}
| 43.354839
| 134
| 0.604911
| 277
| 2,688
| 5.841155
| 0.436823
| 0.038937
| 0.037083
| 0.029666
| 0.088999
| 0.088999
| 0.061805
| 0.061805
| 0.061805
| 0.061805
| 0
| 0.006253
| 0.286086
| 2,688
| 61
| 135
| 44.065574
| 0.836894
| 0.087798
| 0
| 0.041667
| 0
| 0
| 0.244272
| 0.009411
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.083333
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a7e02c43c6ebf2859a5eb96f826707b1b0a7b33
| 2,251
|
py
|
Python
|
fpcalc.py
|
johnlawsharrison/pyacoustid
|
55321b316f09e782a1c0914826419be799908e01
|
[
"MIT"
] | 203
|
2016-01-18T14:05:49.000Z
|
2022-03-25T04:04:42.000Z
|
fpcalc.py
|
johnlawsharrison/pyacoustid
|
55321b316f09e782a1c0914826419be799908e01
|
[
"MIT"
] | 41
|
2016-03-08T10:28:14.000Z
|
2021-11-26T20:53:15.000Z
|
fpcalc.py
|
johnlawsharrison/pyacoustid
|
55321b316f09e782a1c0914826419be799908e01
|
[
"MIT"
] | 56
|
2016-01-09T04:22:40.000Z
|
2022-01-29T16:01:39.000Z
|
#!/usr/bin/env python
# This file is part of pyacoustid.
# Copyright 2012, Lukas Lalinsky.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Simple script for calculating audio fingerprints, using the same
arguments/output as the fpcalc utility from Chromaprint."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import argparse
import sys
import acoustid
import chromaprint
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-length', metavar='SECS', type=int, default=120,
help='length of the audio data used for fingerprint '
'calculation (default 120)')
parser.add_argument('-raw', action='store_true',
help='output the raw uncompressed fingerprint')
parser.add_argument('paths', metavar='FILE', nargs='+',
help='audio file to be fingerprinted')
args = parser.parse_args()
# make gst not try to parse the args
del sys.argv[1:]
first = True
for i, path in enumerate(args.paths):
try:
duration, fp = acoustid.fingerprint_file(path, args.length)
except Exception:
print("ERROR: unable to calculate fingerprint "
"for file %s, skipping" % path, file=sys.stderr)
continue
if args.raw:
raw_fp = chromaprint.decode_fingerprint(fp)[0]
fp = ','.join(map(str, raw_fp))
if not first:
print
first = False
print('FILE=%s' % path)
print('DURATION=%d' % duration)
print('FINGERPRINT=%s' % fp.decode('utf8'))
if __name__ == '__main__':
main()
| 34.106061
| 77
| 0.660595
| 284
| 2,251
| 5.126761
| 0.521127
| 0.037775
| 0.032967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007701
| 0.250111
| 2,251
| 65
| 78
| 34.630769
| 0.854858
| 0.356286
| 0
| 0
| 0
| 0
| 0.195941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.189189
| 0
| 0.216216
| 0.324324
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a7ec9858eb7869bba6e4129ded3a123b302b0e2
| 3,071
|
py
|
Python
|
Learning/button groups.py
|
atharva0300/PyQt5-Practice
|
0feacca6518190646a345ce2ea75e071e7861ac5
|
[
"MIT"
] | null | null | null |
Learning/button groups.py
|
atharva0300/PyQt5-Practice
|
0feacca6518190646a345ce2ea75e071e7861ac5
|
[
"MIT"
] | null | null | null |
Learning/button groups.py
|
atharva0300/PyQt5-Practice
|
0feacca6518190646a345ce2ea75e071e7861ac5
|
[
"MIT"
] | 1
|
2021-11-16T10:18:07.000Z
|
2021-11-16T10:18:07.000Z
|
# Button Groups in Python
import PyQt5
from PyQt5.QtWidgets import QApplication, QHBoxLayout, QLabel, QButtonGroup, QMainWindow, QDialog, QPushButton, QVBoxLayout
import sys
from PyQt5 import QtGui
from PyQt5.QtGui import QFont, QPixmap
from PyQt5.QtCore import QSize
class window(QDialog):
def __init__(self):
super().__init__()
self.title = 'PyQt5 Widow '
self.left = 500
self.top = 200
self.width = 300
self.height = 250
self.iconName= './icons/file.png'
# calling the initwindow function
self.initwindow()
# creata a label
self.label = QLabel('Hello')
self.label.setFont(QtGui.QFont('Sanserif' , 13))
self.hbox.addWidget(self.label)
# set the self.layout to hbox
self.setLayout(self.hbox)
# calling the onPresed function
self.on_Pressed()
# show the window
self.show()
def initwindow(self):
self.setWindowIcon(QtGui.QIcon('./icons/file.png'))
self.setWindowTitle(self.title)
self.setGeometry(self.left , self.top , self.width , self.height)
# create a Hbox layout
self.hbox = QHBoxLayout()
# create a button group
self.buttongroup = QButtonGroup()
# connecting the button group with signal
self.buttongroup.buttonClicked[int].connect(self.on_Pressed)
# create 3 buttons
self.button1 = QPushButton('Python')
# add button1 to the Button Group
self.buttongroup.addButton(self.button1 , 1)
self.button1.setIcon(QtGui.QIcon('./icons/python.png'))
self.button1.setIconSize(QSize(40,40))
# ---------- #
# add the button group to hbox layout
self.hbox.addWidget(self.button1)
# Button 2 ----
self.button2 = QPushButton('C++')
# add button1 to the Button Group
self.buttongroup.addButton(self.button2 , 2)
self.button2.setIcon(QtGui.QIcon('./icons/cpp.png'))
self.button2.setIconSize(QSize(40,40))
# ---------- #
# add the button group to hbox layout
self.hbox.addWidget(self.button2)
# Button 3 ---
self.button3 = QPushButton('Java')
# add button1 to the Button Group
self.buttongroup.addButton(self.button3 , 3)
self.button3.setIcon(QtGui.QIcon('./icons/java.png'))
self.button3.setIconSize(QSize(40,40))
# ---------- #
# add the button group to hbox layout
self.hbox.addWidget(self.button3)
def on_Pressed(self):
for button in self.buttongroup.buttons():
if button is self.buttongroup.button(id) :
# give the text an id in the above line
self.label.setText(button.text() + ' Was clicked')
if __name__ == "__main__":
App = QApplication(sys.argv)
window= window()
sys.exit(App.exec())
| 28.700935
| 124
| 0.581895
| 342
| 3,071
| 5.169591
| 0.318713
| 0.049774
| 0.05543
| 0.047511
| 0.210407
| 0.210407
| 0.210407
| 0.210407
| 0.210407
| 0.210407
| 0
| 0.026353
| 0.308043
| 3,071
| 106
| 125
| 28.971698
| 0.805647
| 0.182677
| 0
| 0
| 0
| 0
| 0.058452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.115385
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a7f65074a8ce42ce2f4be7f8b8b5034567b834f
| 20,126
|
py
|
Python
|
ct-tests/lib/crus_integration_test.py
|
Cray-HPE/cray-crus
|
6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17
|
[
"MIT"
] | null | null | null |
ct-tests/lib/crus_integration_test.py
|
Cray-HPE/cray-crus
|
6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17
|
[
"MIT"
] | 1
|
2022-03-02T21:06:21.000Z
|
2022-03-04T17:32:14.000Z
|
ct-tests/lib/crus_integration_test.py
|
Cray-HPE/cray-crus
|
6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# MIT License
#
# (C) Copyright 2020-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""
CRUS integration test
See crus_integration_test/argparse.py for command line usage.
### SETUP ###
1 Generate map of xnames, nids, and hostnames for target nodes (by default,
all computes)
2 Validate they work with the specified min/max node and step values.
3 Lookup BOS session template
4 Create empty starting, upgrading, and failed HSM groups
5 Create new session template for all target nodes
6 Create new session templates for the upgrading group
7 Use BOS to reboot all target nodes to new BOS session template
### TEST 1 ###
8 Put 1 node into starting group
9 Create CRUS session
10 Verify all goes well & delete CRUS session
### TEST 2 ###
11 Move all nodes into starting group.
Repeat steps 9-10, with step size that results in at least 2 steps
### TEST 3 ###
12 Select 2 nodes
13 Start slurm workload on 1 of them
14 Create CRUS session
15 Verify that CRUS waits while the slurm workloads run
16 Stop the slurm workloads
17 Verify that all goes well & delete CRUS session
### RESTORE NODES ###
18 Create CRUS session to reboot all nodes to base slurm template
19 Verify that all goes well & delete CRUS session
### CLEANUP ###
20 Delete new templates
21 Delete custom vcs branches
22 Delete new hsm groups
"""
from crus_integration_test.argparse import parse_args
from crus_integration_test.crus import verify_crus_waiting_for_quiesce
from crus_integration_test.hsm import create_hsm_groups
from crus_integration_test.slurm import complete_slurm_job, start_slurm_job, \
verify_initial_slurm_state
from crus_integration_test.utils import bos_reboot_nodes, create_bos_session_templates, \
monitor_crus_session, \
verify_results_of_crus_session
from common.bos import bos_session_template_validate_cfs, \
list_bos_session_templates, list_bos_sessions
from common.bosutils import delete_bos_session_templates, \
delete_cfs_configs, \
delete_hsm_groups, \
delete_vcs_repo_and_org
from common.cfs import describe_cfs_config
from common.crus import create_crus_session, delete_crus_session
from common.helpers import CMSTestError, create_tmpdir, debug, error_exit, exit_test, \
init_logger, info, log_exception_error, raise_test_exception_error, \
remove_tmpdir, section, subtest, warn
from common.hsm import set_hsm_group_members
from common.k8s import get_csm_private_key
from common.utils import get_compute_nids_xnames, validate_node_hostnames
from common.vcs import create_and_clone_vcs_repo
import random
import sys
TEST_NAME = "crus_integration_test"
def do_subtest(subtest_name, subtest_func, **subtest_kwargs):
"""
Log that we are about to run a subtest with the specified name, then call the specified function
with the specified arguments. Raise exception in case of an error.
"""
subtest(subtest_name)
try:
return subtest_func(**subtest_kwargs)
except CMSTestError:
raise
except Exception as e:
raise_test_exception_error(e, "%s subtest" % subtest_name)
def do_test(test_variables):
"""
Main test body. Execute each subtest in turn.
"""
# =============================
# =============================
# SETUP
# =============================
# =============================
use_api = test_variables["use_api"]
if use_api:
info("Using API")
else:
info("Using CLI")
# We don't need the CSM private key until it comes time to ssh into the compute nodes, but we'd
# rather know up front if this fails, to save time
do_subtest("Get CSM private key (for later use to ssh to computes)", get_csm_private_key)
nid_to_xname, xname_to_nid = do_subtest("Find compute nids & xnames",
get_compute_nids_xnames, use_api=use_api,
nids=test_variables["nids"],
groups=test_variables["groups"],
xnames=test_variables["xnames"],
min_required=3)
test_variables["nids"] = sorted(list(nid_to_xname.keys()))
test_variables["xnames"] = sorted(list(nid_to_xname.values()))
nids = test_variables["nids"]
xnames = test_variables["xnames"]
info("nids: %s" % str(nids))
slurm_nid = random.choice(nids)
slurm_xname = nid_to_xname[slurm_nid]
test_nids = [ n for n in nids if n != slurm_nid ]
test_xnames = [ x for x in xnames if x != slurm_xname ]
debug("Slurm controller: nid %d (xname %s)" % (slurm_nid, slurm_xname))
debug("Worker nodes:")
for test_nid in sorted(test_nids):
debug(" nid %d (xname %s)" % (test_nid, nid_to_xname[test_nid]))
max_step_size = len(nids)
if test_variables["max_step_size"]:
max_step_size = min(max_step_size, test_variables["max_step_size"])
do_subtest("Validate node hostnames", validate_node_hostnames, nid_to_xname=nid_to_xname)
template_objects = do_subtest("List all BOS session templates", list_bos_session_templates,
use_api=use_api)
info("BOS session template: %s" % test_variables["template"])
if test_variables["template"] not in template_objects:
error_exit("No BOS session template found with name %s" % test_variables["template"])
else:
slurm_template_name = test_variables["template"]
cfs_config_name = do_subtest("Get CFS configuration name from %s BOS session template" % slurm_template_name,
bos_session_template_validate_cfs, bst=template_objects[slurm_template_name])
info("CFS configuration name in %s is %s" % (slurm_template_name, cfs_config_name))
test_variables["base_cfs_config_name"] = cfs_config_name
do_subtest("Validate CFS configuration %s" % cfs_config_name,
describe_cfs_config, use_api=use_api, name=cfs_config_name)
test_hsm_groups = test_variables["test_hsm_groups"]
do_subtest("Create hsm groups", create_hsm_groups, use_api=use_api, test_hsm_groups=test_hsm_groups)
tmpdir = do_subtest("Create temporary directory", create_tmpdir)
test_variables["tmpdir"] = tmpdir
# Always want to make sure that we have a template which does not match any of the others
# for both cfs branch and kernel parameters.
num_test_templates = 3
test_vcs_org = "crus-integration-test-org-%d" % random.randint(0,9999999)
test_vcs_repo = "crus-integration-test-repo-%d" % random.randint(0,9999999)
test_variables["test_vcs_org"] = test_vcs_org
test_variables["test_vcs_repo"] = test_vcs_repo
vcs_repo_dir = do_subtest("Create and clone VCS repo %s in org %s" % (test_vcs_repo, test_vcs_org),
create_and_clone_vcs_repo, orgname=test_vcs_org, reponame=test_vcs_repo,
testname=TEST_NAME, tmpdir=tmpdir)
test_variables["vcs_repo_dir"] = vcs_repo_dir
do_subtest("Create modified BOS session templates",
create_bos_session_templates,
num_to_create=num_test_templates,
use_api=use_api,
template_objects=template_objects,
test_variables=test_variables,
xname_to_nid=xname_to_nid)
test_template_names = test_variables["test_template_names"]
base_test_template, test_template1, test_template2 = test_template_names
debug("Base test template: %s" % base_test_template)
debug("Test template 1: %s" % test_template1)
debug("Test template 2: %s" % test_template2)
# Use BOS to reboot all target nodes to new BOS session template
xname_template_map = dict()
do_subtest("Reboot all target nodes to %s template" % base_test_template, bos_reboot_nodes,
template_name=base_test_template, use_api=use_api, template_objects=template_objects,
xname_to_nid=xname_to_nid, xname_template_map=xname_template_map)
# Verify slurm reports all test nodes as ready
do_subtest("Verify slurm reports test nodes as ready", verify_initial_slurm_state,
use_api=use_api, slurm_control_xname=slurm_xname, worker_xnames=test_xnames,
xname_to_nid=xname_to_nid)
crus_session_hsm_groups = {
"failed_label": test_hsm_groups["failed"],
"starting_label": test_hsm_groups["starting"],
"upgrading_label": test_hsm_groups["upgrading"] }
def _set_starting_group(target_xnames):
"""
Wrapper to call common.hsm.set_hsm_group_members to set our starting
group's member list to equal the specified xnames
"""
group_name = crus_session_hsm_groups["starting_label"]
node_text = ", ".join(sorted(target_xnames))
if len(target_xnames) > 5:
info("Setting HSM group %s member list to: %s" % (group_name, node_text))
subtest_text = "Setting HSM group %s member list to %d test nodes" % (group_name, len(target_xnames))
else:
subtest_text = "Setting HSM group %s member list to: %s" % (group_name, node_text)
do_subtest(subtest_text, set_hsm_group_members, use_api=use_api, group_name=group_name, xname_list=target_xnames)
def _create_crus_session(target_xnames, step_size, template_name):
"""
First, makes a list of all current BOS sessions.
Then creates a CRUS session with the specified values.
The target_xnames list is just used for test logging purposes, to
describe the CRUS session.
Returns the session_id of the CRUS session, a
dictionary of the CRUS session values, and the collected
BOS session list.
"""
bos_sessions = do_subtest("Getting list of BOS sessions before CRUS session is running",
list_bos_sessions, use_api=use_api)
info("BOS session list: %s" % ", ".join(bos_sessions))
node_text = ", ".join(sorted(target_xnames))
if len(target_xnames) > 5:
info("Creating CRUS session with target nodes: %s" % node_text)
node_text = "%d test nodes" % len(target_xnames)
subtest_text = "Create CRUS session (template: %s, step size: %d, nodes: %s)" % (template_name, step_size, node_text)
crus_session_values = {
"use_api": use_api,
"upgrade_step_size": step_size,
"upgrade_template_id": template_name }
crus_session_values.update(crus_session_hsm_groups)
response_object = do_subtest(subtest_text, create_crus_session, **crus_session_values)
crus_session_id = response_object["upgrade_id"]
return crus_session_id, crus_session_values, bos_sessions
def _wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions):
"""
Wait for CRUS session to be complete.
Update the xname_template_map to reflect the new expected template for the nodes in the session.
Verify that the CRUS session results look okay.
Delete the CRUS session.
"""
do_subtest("Wait for CRUS session %s to complete" % crus_session_id, monitor_crus_session,
use_api=use_api, upgrade_id=crus_session_id, expected_values=crus_session_values,
bos_sessions=bos_sessions)
# Set new expected template for target xnames
for xn in target_xnames:
xname_template_map[xn] = crus_session_values["upgrade_template_id"]
do_subtest("Verify results of CRUS session %s" % crus_session_id, verify_results_of_crus_session,
use_api=use_api, xname_template_map=xname_template_map, template_objects=template_objects,
xname_to_nid=xname_to_nid, target_xnames=list(target_xnames), **crus_session_hsm_groups)
do_subtest("Delete CRUS session %s" % crus_session_id, delete_crus_session,
use_api=use_api, upgrade_id=crus_session_id, max_wait_for_completion_seconds=300)
# =============================
# =============================
# TEST 1
# =============================
# =============================
# Randomly pick 1 xname
xn = random.choice(test_xnames)
target_xnames = [xn]
# Put it into starting HSM group
_set_starting_group(target_xnames)
# Pick random step size (since we're only dealing with 1 node, it doesn't matter)
ssize = random.randint(1, 10000)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session(target_xnames, ssize, test_template1)
# Wait for it to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# TEST 2
# =============================
# =============================
# Set starting group to all test nodes
target_xnames = test_xnames
_set_starting_group(target_xnames)
# Set step size such that we get at least 2 steps
ssize = len(target_xnames) // 2
if (len(target_xnames) % 2) != 0:
ssize += 1
ssize = min(ssize, max_step_size)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session(target_xnames, ssize, test_template2)
# Wait for it to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# TEST 3
# =============================
# =============================
# Randomly select a node for the starting group
xn = random.choice(test_xnames)
target_xnames = [xn]
_set_starting_group(target_xnames)
# Pick random step size (since we're only dealing with 1 node, it doesn't matter)
ssize = random.randint(1, 10000)
# Start slurm workload on node
slurm_job_id, slurm_job_stopfile = do_subtest("Start slurm workload on %s" % xn, start_slurm_job,
slurm_control_xname=slurm_xname, worker_xname=xn, xname_to_nid=xname_to_nid, tmpdir=tmpdir)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session([xn], ssize, test_template1)
# Verify that CRUS session is waiting for nodes to quiesce
do_subtest("Verify CRUS session %s is waiting for nodes to quiesce" % crus_session_id,
verify_crus_waiting_for_quiesce, use_api=use_api, crus_session_id=crus_session_id,
expected_values=crus_session_values)
# Stop slurm workload on node
do_subtest("Stop slurm workload on %s" % xn, complete_slurm_job,
slurm_control_xname=slurm_xname, worker_xname=xn,
stopfile_name=slurm_job_stopfile, slurm_job_id=slurm_job_id)
# Wait for CRUS session to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# RESTORE NODES
# =============================
# =============================
# Set starting group to all test nodes plus the node we've been using for slurm
target_xnames = xnames
_set_starting_group(target_xnames)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session(target_xnames, ssize, base_test_template)
# Wait for it to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# CLEANUP
# =============================
# =============================
section("Cleaning up")
do_subtest("Delete modified BOS session templates", delete_bos_session_templates, use_api=use_api,
template_names=test_template_names)
do_subtest("Delete VCS repo and org", delete_vcs_repo_and_org, test_variables=test_variables)
do_subtest("Delete CFS configurations", delete_cfs_configs, use_api=use_api, cfs_config_names=test_variables["test_cfs_config_names"])
do_subtest("Delete hsm groups", delete_hsm_groups, use_api=use_api, group_map=test_hsm_groups)
do_subtest("Remove temporary directory", remove_tmpdir, tmpdir=tmpdir)
test_variables["tmpdir"] = None
section("Test passed")
def test_wrapper():
test_variables = {
"test_template_names": list(),
"test_cfs_config_names": list(),
"test_hsm_groups": dict(),
"tmpdir": None,
"test_vcs_org": None,
"test_vcs_repo": None,
"vcs_repo_dir": None }
parse_args(test_variables)
init_logger(test_name=TEST_NAME, verbose=test_variables["verbose"])
info("Starting test")
debug("Arguments: %s" % sys.argv[1:])
debug("test_variables: %s" % str(test_variables))
use_api = test_variables["use_api"]
try:
do_test(test_variables=test_variables)
except Exception as e:
# Adding this here to do cleanup when unexpected errors are hit (and to log those errors)
msg = log_exception_error(e)
section("Attempting cleanup before exiting in failure")
try:
test_template_names = test_variables["test_template_names"]
except KeyError:
test_template_names = None
try:
test_cfs_config_names = test_variables["test_cfs_config_names"]
except KeyError:
test_cfs_config_names = None
try:
test_hsm_groups = test_variables["test_hsm_groups"]
except KeyError:
test_hsm_groups = None
try:
tmpdir = test_variables["tmpdir"]
except KeyError:
tmpdir = None
if test_template_names:
info("Attempting to clean up test BOS session templates before exiting")
delete_bos_session_templates(use_api=use_api, template_names=test_template_names, error_cleanup=True)
if test_cfs_config_names:
delete_cfs_configs(use_api=use_api, cfs_config_names=test_cfs_config_names, error_cleanup=True)
delete_vcs_repo_and_org(test_variables=test_variables, error_cleanup=True)
if test_hsm_groups:
info("Attempting to clean up test HSM groups before exiting")
delete_hsm_groups(use_api=use_api, group_map=test_hsm_groups, error_cleanup=True)
if tmpdir != None:
remove_tmpdir(tmpdir)
section("Cleanup complete")
error_exit(msg)
if __name__ == '__main__':
test_wrapper()
exit_test()
| 45.226966
| 138
| 0.673507
| 2,686
| 20,126
| 4.741996
| 0.155622
| 0.071681
| 0.014132
| 0.018843
| 0.358719
| 0.284761
| 0.23153
| 0.221245
| 0.188349
| 0.165659
| 0
| 0.007099
| 0.223045
| 20,126
| 445
| 139
| 45.226966
| 0.807444
| 0.267862
| 0
| 0.168724
| 0
| 0
| 0.152437
| 0.009748
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024691
| false
| 0.004115
| 0.065844
| 0
| 0.098765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a80351f1ae9d22c12f2dfa0609670916e8b44d0
| 3,071
|
py
|
Python
|
backend/transaction/models.py
|
elielagmay/react-budgeteer
|
49a25dbd6dd6ea5d8bc93421eefbc12808f585af
|
[
"Unlicense"
] | 2
|
2018-10-23T00:40:53.000Z
|
2021-05-31T08:19:40.000Z
|
backend/transaction/models.py
|
elielagmay/react-budgeteer
|
49a25dbd6dd6ea5d8bc93421eefbc12808f585af
|
[
"Unlicense"
] | null | null | null |
backend/transaction/models.py
|
elielagmay/react-budgeteer
|
49a25dbd6dd6ea5d8bc93421eefbc12808f585af
|
[
"Unlicense"
] | null | null | null |
from decimal import Decimal
from django.core.exceptions import ValidationError
from django.db import models
from app.utils import get_balances
class Transaction(models.Model):
ledger = models.ForeignKey(
'ledger.Ledger',
on_delete=models.PROTECT,
related_name='transactions'
)
date = models.DateTimeField()
payee = models.CharField(max_length=255)
description = models.TextField(blank=True)
class Meta:
get_latest_by = 'date'
ordering = ('-date', )
def __str__(self):
return u'{} - {}{}'.format(
self.date.strftime('%d %b %Y'),
self.payee,
' - {}'.format(self.description) if self.description else ''
)
def is_balanced(self):
entries = self.entries.all()
balance = get_balances(entries, convert=True)
unbalanced = [v for v in balance if v['amount'] != 0]
return len(unbalanced) == 0
is_balanced.boolean = True
def is_cleared(self):
return not self.entries.filter(is_cleared=False).exists()
is_cleared.boolean = True
class Entry(models.Model):
transaction = models.ForeignKey(
'transaction.Transaction',
on_delete=models.CASCADE,
related_name='entries'
)
account = models.ForeignKey(
'account.Account',
on_delete=models.PROTECT,
related_name='entries'
)
commodity = models.ForeignKey(
'commodity.Commodity',
on_delete=models.PROTECT,
related_name='entries'
)
price = models.ForeignKey(
'commodity.Price',
on_delete=models.PROTECT,
related_name='entries',
null=True,
blank=True
)
amount = models.DecimalField(max_digits=32, decimal_places=8)
description = models.TextField(blank=True)
is_cleared = models.BooleanField(default=True)
class Meta:
verbose_name_plural = 'entries'
def __str__(self):
return u'Entry ID:{}'.format(self.id)
def clean(self):
errors = {}
ledger = self.transaction.ledger
if ledger != self.account.category.ledger:
errors['account'] = 'Selected account is invalid'
if ledger != self.commodity.ledger:
errors['commodity'] = 'Selected commodity is invalid'
if self.price is not None and ledger != self.price.primary.ledger:
errors['price'] = 'Selected price is invalid'
if self.price is not None and self.price.primary != self.commodity:
errors['price'] = 'Selected price must match commodity'
if errors:
raise ValidationError(errors)
def get_amount_tuple(self, convert=False):
amount = Decimal(str(self.amount))
commodity = self.commodity
if not amount.is_finite():
raise ValueError('amount is not a finite number')
if convert and self.price is not None:
commodity = self.price.secondary
amount *= self.price.amount
return (commodity.get_quantized_amount(amount), commodity)
| 29.528846
| 75
| 0.626506
| 345
| 3,071
| 5.466667
| 0.301449
| 0.033404
| 0.037116
| 0.044539
| 0.177625
| 0.112937
| 0.09597
| 0.033934
| 0.033934
| 0
| 0
| 0.003557
| 0.267665
| 3,071
| 103
| 76
| 29.815534
| 0.835038
| 0
| 0
| 0.156627
| 0
| 0
| 0.114295
| 0.007489
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072289
| false
| 0
| 0.048193
| 0.036145
| 0.361446
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a804a776b085e92ef90bbf2391ea52e871ea437
| 2,335
|
py
|
Python
|
src/games/textquiz.py
|
aleksandrgordienko/melissa-quiz
|
49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f
|
[
"MIT"
] | null | null | null |
src/games/textquiz.py
|
aleksandrgordienko/melissa-quiz
|
49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f
|
[
"MIT"
] | null | null | null |
src/games/textquiz.py
|
aleksandrgordienko/melissa-quiz
|
49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f
|
[
"MIT"
] | null | null | null |
# python-telegram-quiz
# @author: Aleksandr Gordienko
# @site: https://github.com/aleksandrgordienko/melissa-quiz
from random import randint
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Question(Base):
__tablename__ = 'questions'
id = Column(Integer, primary_key=True)
question = Column(String(1000))
answer = Column(String(100))
ask_count = Column(Integer)
class TextQuiz:
"""TextQuiz class"""
def __init__(self, session):
self.questions = {}
self.name = 'textquiz'
self.session = session
for question in self.session.query(Question).all():
self.questions[question.id] = {'question': question.question,
'answer': question.answer,
'ask_count': question.ask_count}
def nextq(self):
"""Generates next question_id, question_type and initial_hint"""
question_id = randint(0, len(self.questions))
question = self.questions[question_id]
question['ask_count'] += 1
self.session.merge(Question(id=question_id,
ask_count=question['ask_count']))
self.session.commit()
return question_id, self.get_initial_hint_mask(question_id)
def get_question(self, question_id):
return self.questions[question_id]['question']
def get_answer(self, question_id):
return self.questions[question_id]['answer']
def answer_is_correct(self, question_id, answer):
return answer.lower() in self.get_answer(question_id).lower()
def get_hint_text(self, question_id, hint_symbol, hint_separator, hint_mask):
out_text = ''
answer = self.get_answer(question_id)
if hint_mask:
for i, c in enumerate(answer):
if hint_mask[i]:
out_text += c
else:
out_text += hint_symbol
out_text += hint_separator
else:
out_text = (hint_symbol + hint_separator) * len(answer)
return out_text
def get_initial_hint_mask(self, question_id):
"""Returns initial hint mask"""
return [False] * len(self.get_answer(question_id))
| 33.357143
| 81
| 0.621842
| 268
| 2,335
| 5.175373
| 0.279851
| 0.129777
| 0.075703
| 0.06633
| 0.227109
| 0.062004
| 0.062004
| 0.062004
| 0
| 0
| 0
| 0.005332
| 0.277088
| 2,335
| 69
| 82
| 33.84058
| 0.816351
| 0.089079
| 0
| 0.041667
| 0
| 0
| 0.034139
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145833
| false
| 0
| 0.0625
| 0.0625
| 0.479167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a818c77d8d52a71bd103be2681594c2e4e919a8
| 1,246
|
py
|
Python
|
Automate the Boring Stuff with Python/readDocx.py
|
m-barnes/Python
|
0940d5f9b832c28703a32691db287b1361ce6ecc
|
[
"MIT"
] | null | null | null |
Automate the Boring Stuff with Python/readDocx.py
|
m-barnes/Python
|
0940d5f9b832c28703a32691db287b1361ce6ecc
|
[
"MIT"
] | null | null | null |
Automate the Boring Stuff with Python/readDocx.py
|
m-barnes/Python
|
0940d5f9b832c28703a32691db287b1361ce6ecc
|
[
"MIT"
] | null | null | null |
import docx
import time
import os
from os import system
from pprint import pprint
finished = False
def getText(filename):
print(filename)
doc = docx.Document(filename)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
pprint(fullText)
def clear():
try:
system('cls')
except:
system('clear')
while finished == False:
def parseFile():
print('The current working directory is ', os.getcwd())
path = input('\nPlease provide the full path to the Word document you wish to parse or press \'enter\' to keep the current directory.\n')
if len(path)==0:
path = os.getcwd()
try:
os.path.abspath(path)
os.chdir(path)
except:
print('Cannot find that directory. Please wait...')
time.sleep(2)
clear()
parseFile()
try:
filename = input('\nPlease provide the name of the Word document. ')
getText(filename + '.docx')
continueParse = input('\n\n\nWould you like to parse another file? (y)es or (n)o? ').lower()
if continueParse == 'y':
parseFile()
else:
print('Goodbye!')
time.sleep(2)
sys.exit()
except:
print('Cannot find that file. Please try again. Please wait...')
time.sleep(2)
clear()
parseFile()
parseFile()
| 20.766667
| 139
| 0.652488
| 170
| 1,246
| 4.782353
| 0.452941
| 0.03321
| 0.0369
| 0.054121
| 0.145141
| 0.083641
| 0.083641
| 0
| 0
| 0
| 0
| 0.004103
| 0.217496
| 1,246
| 59
| 140
| 21.118644
| 0.829744
| 0
| 0
| 0.3125
| 0
| 0.020833
| 0.298795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.104167
| 0
| 0.166667
| 0.145833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a862072dc82d94cea5c675c09cf65fbf2cd377c
| 4,510
|
py
|
Python
|
concord/ext/audio/middleware.py
|
nariman/concord-ext-audio
|
c7662507f641bfdba277509838433dbb24fe11a3
|
[
"MIT"
] | null | null | null |
concord/ext/audio/middleware.py
|
nariman/concord-ext-audio
|
c7662507f641bfdba277509838433dbb24fe11a3
|
[
"MIT"
] | 14
|
2019-02-19T03:14:07.000Z
|
2021-06-25T15:15:55.000Z
|
concord/ext/audio/middleware.py
|
narimanized/concord-ext-audio
|
c7662507f641bfdba277509838433dbb24fe11a3
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2017-2018 Nariman Safiulin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import asyncio
from typing import Callable, Optional
import discord
from concord.context import Context
from concord.middleware import Middleware, MiddlewareState
from concord.ext.audio.state import State
class Join(Middleware):
"""Middleware for joining to the user's voice channel."""
async def run(self, *_, ctx: Context, next: Callable, **kw): # noqa: D102
state = MiddlewareState.get_state(ctx, State)
if state is None:
return
message = ctx.kwargs["message"]
author = message.author
channel = message.channel
if not isinstance(author, discord.Member):
await channel.send("You're not a member of this guild.")
return
if not author.voice:
await channel.send("You're not in a voice channel.")
return
#
# Only guilds are allowed.
voice_client = channel.guild.voice_client
audio_state = state.get_audio_state(channel.guild)
voice_channel = author.voice.channel
if voice_client is None:
try:
voice_client = await voice_channel.connect()
except asyncio.TimeoutError:
await channel.send(
"Unfortunately, something wrong happened and I hasn't "
"joined your channel in a time."
)
return
await channel.send("Connected.")
elif voice_client.channel != voice_channel:
await voice_client.move_to(voice_channel)
await channel.send("Moved.")
else:
await channel.send("I'm already in your voice channel.")
#
audio_state.set_voice_client(voice_client)
class Leave(Middleware):
"""Middleware for leaving currently connected voice channel."""
async def run(self, *_, ctx: Context, next: Callable, **kw): # noqa: D102
message = ctx.kwargs["message"]
author = message.author
channel = message.channel
if not isinstance(author, discord.Member):
await channel.send("You're not a member of this guild.")
return
#
# Only guilds are allowed.
voice_client = channel.guild.voice_client
if voice_client is None:
await message.channel.send("I'm not connected to voice channel.")
return
#
# Voice client will be removed from audio state as well.
await voice_client.disconnect(force=True)
await message.channel.send("Disconnected.")
class Volume(Middleware):
"""Middleware for changing the master volume."""
async def run(
self,
*_,
ctx: Context,
next: Callable,
volume: Optional[str] = None,
**kw,
): # noqa: D102
state = MiddlewareState.get_state(ctx, State)
if state is None:
return
message = ctx.kwargs["message"]
channel = message.channel
# Only guilds are allowed.
audio_state = state.get_audio_state(channel.guild)
if volume is not None:
try:
audio_state.master_volume = float(volume)
except ValueError:
await channel.send("Only float values are possible.")
return
#
await channel.send(
f"Master volume is set to {audio_state.master_volume}"
)
| 34.166667
| 80
| 0.645455
| 559
| 4,510
| 5.146691
| 0.334526
| 0.049705
| 0.050052
| 0.015641
| 0.293361
| 0.280153
| 0.271811
| 0.271811
| 0.231144
| 0.231144
| 0
| 0.005255
| 0.282705
| 4,510
| 131
| 81
| 34.427481
| 0.88408
| 0.311308
| 0
| 0.467532
| 0
| 0
| 0.124349
| 0.008789
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.077922
| 0
| 0.220779
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a866ce737d90dd7710156bcd56f1d122772201c
| 28,704
|
py
|
Python
|
tf_rl/common/utils.py
|
Rowing0914/TF_RL
|
68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7
|
[
"MIT"
] | 23
|
2019-04-04T17:34:56.000Z
|
2021-12-14T19:34:10.000Z
|
tf_rl/common/utils.py
|
Rowing0914/TF_RL
|
68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7
|
[
"MIT"
] | null | null | null |
tf_rl/common/utils.py
|
Rowing0914/TF_RL
|
68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7
|
[
"MIT"
] | 3
|
2019-07-17T23:56:36.000Z
|
2022-03-13T03:55:21.000Z
|
import tensorflow as tf
import numpy as np
import os, datetime, itertools, shutil, gym, sys
from tf_rl.common.visualise import plot_Q_values
from tf_rl.common.wrappers import MyWrapper, CartPole_Pixel, wrap_deepmind, make_atari
"""
TF basic Utility functions
"""
def eager_setup():
"""
it eables an eager execution in tensorflow with config that allows us to flexibly access to a GPU
from multiple python scripts
:return:
"""
config = tf.compat.v1.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
tf.compat.v1.enable_eager_execution(config=config)
tf.compat.v1.enable_resource_variables()
"""
Common Utility functions
"""
def get_alg_name():
"""Returns the name of the algorithm.
We assume that the directory architecutre for that algo looks like below
- Atari: `examples/algo_name/algo_name_eager.py`
- Cartpole: `examples/algo_name/algo_name_eager_cartpole.py`
* where algo_name must be uppercase/capital letters!!
"""
alg_name = sys.argv[0].rsplit("/")[-1].rsplit(".")[0].replace("_eager", "")
return alg_name
def invoke_agent_env(params, alg):
"""Returns the wrapped env and string name of agent, then Use `eval(agent)` to activate it from main script
"""
if params.mode == "Atari":
env = wrap_deepmind(make_atari("{}NoFrameskip-v4".format(params.env_name, skip_frame_k=params.skip_frame_k)),
skip_frame_k=params.skip_frame_k)
if params.debug_flg:
agent = "{}_debug".format(alg)
else:
agent = "{}".format(alg)
else:
agent = "{}".format(alg)
if params.mode == "CartPole":
env = MyWrapper(gym.make("CartPole-v0"))
elif params.mode == "CartPole-p":
env = CartPole_Pixel(gym.make("CartPole-v0"))
return agent, env
def create_log_model_directory(params, alg):
"""
Create a directory for log/model
this is compatible with Google colab and can connect to MyDrive through the authorisation step
:param params:
:return:
"""
if params.mode in ["Atari", "atari", "MuJoCo", "mujoco"]:
second_name = params.env_name
else:
second_name = params.mode
now = datetime.datetime.now()
if params.google_colab:
# mount the MyDrive on google drive and create the log directory for saving model and logging using tensorboard
params.log_dir, params.model_dir, params.log_dir_colab, params.model_dir_colab = _setup_on_colab(alg,
params.mode)
else:
if params.debug_flg:
params.log_dir = "../../logs/logs/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}_debug/".format(alg,
second_name)
params.model_dir = "../../logs/models/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}_debug/".format(alg,
second_name)
else:
params.log_dir = "../../logs/logs/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}/".format(alg, second_name)
params.model_dir = "../../logs/models/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}/".format(alg, second_name)
return params
def create_loss_func(loss_name="mse"):
if loss_name == "huber":
loss_fn = tf.compat.v1.losses.huber_loss
elif loss_name == "mse":
loss_fn = tf.compat.v1.losses.mean_squared_error
else:
assert False, "Choose the loss_fn from either huber or mse"
return loss_fn
def get_ready(params):
"""
Print out the content of params
:param params:
:return:
"""
for key, item in vars(params).items():
print(key, " : ", item)
def create_checkpoint(model, optimizer, model_dir):
"""
Create a checkpoint for managing a model
:param model:
:param optimizer:
:param model_dir:
:return:
"""
checkpoint_dir = model_dir
check_point = tf.train.Checkpoint(optimizer=optimizer,
model=model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step())
manager = tf.train.CheckpointManager(check_point, checkpoint_dir, max_to_keep=3)
# try re-loading the previous training progress!
try:
print("Try loading the previous training progress")
check_point.restore(manager.latest_checkpoint)
assert tf.compat.v1.train.get_global_step().numpy() != 0
print("===================================================\n")
print("Restored the model from {}".format(checkpoint_dir))
print("Currently we are on time-step: {}".format(tf.compat.v1.train.get_global_step().numpy()))
print("\n===================================================")
except:
print("===================================================\n")
print("Previous Training files are not found in Directory: {}".format(checkpoint_dir))
print("\n===================================================")
return manager
def _setup_on_colab(alg_name, env_name):
"""
Mount MyDrive to current instance through authentication of Google account
Then use it as a backup of training related files
:param env_name:
:return:
"""
# mount your drive on google colab
from google.colab import drive
drive.mount("/content/gdrive")
log_dir = "/content/TF_RL/logs/logs/{}/{}".format(alg_name, env_name)
model_dir = "/content/TF_RL/logs/models/{}/{}".format(alg_name, env_name)
log_dir_colab = "/content/gdrive/My Drive/logs/logs/{}/{}".format(alg_name, env_name)
model_dir_colab = "/content/gdrive/My Drive/logs/models/{}/{}".format(alg_name, env_name)
# create the logs directory under the root dir
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
# if the previous directory existed in My Drive, then we would continue training on top of the previous training
if os.path.isdir(log_dir_colab):
print("=== {} IS FOUND ===".format(log_dir_colab))
copy_dir(log_dir_colab, log_dir, verbose=True)
else:
print("=== {} IS NOT FOUND ===".format(log_dir_colab))
os.makedirs(log_dir_colab)
print("=== FINISHED CREATING THE DIRECTORY ===")
if os.path.isdir(model_dir_colab):
print("=== {} IS FOUND ===".format(model_dir_colab))
copy_dir(model_dir_colab, model_dir, verbose=True)
else:
print("=== {} IS NOT FOUND ===".format(model_dir_colab))
os.makedirs(model_dir_colab)
print("=== FINISHED CREATING THE DIRECTORY ===")
return log_dir, model_dir, log_dir_colab, model_dir_colab
class AnnealingSchedule:
"""
Scheduling the gradually decreasing value, e.g., epsilon or beta params
"""
def __init__(self, start=1.0, end=0.1, decay_steps=500, decay_type="linear"):
self.start = start
self.end = end
self.decay_steps = decay_steps
self.annealed_value = np.linspace(start, end, decay_steps)
self.decay_type = decay_type
def old_get_value(self, timestep):
"""
Deprecated
:param timestep:
:return:
"""
if self.decay_type == "linear":
return self.annealed_value[min(timestep, self.decay_steps) - 1]
# don't use this!!
elif self.decay_type == "curved":
if timestep < self.decay_steps:
return self.start * 0.9 ** (timestep / self.decay_steps)
else:
return self.end
def get_value(self):
timestep = tf.train.get_or_create_global_step() # we are maintaining the global-step in train.py so it is accessible
if self.decay_type == "linear":
return self.annealed_value[min(timestep.numpy(), self.decay_steps) - 1]
# don't use this!!
elif self.decay_type == "curved":
if timestep.numpy() < self.decay_steps:
return self.start * 0.9 ** (timestep.numpy() / self.decay_steps)
else:
return self.end
def copy_dir(src, dst, symlinks=False, ignore=None, verbose=False):
"""
copy the all contents in `src` directory to `dst` directory
Usage:
```python
delete_files("./bb/")
```
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if verbose:
print("From:{}, To: {}".format(s, d))
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def delete_files(folder, verbose=False):
"""
delete the all contents in `folder` directory
Usage:
```python
copy_dir("./aa/", "./bb/")
```
"""
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
if verbose:
print("{} has been deleted".format(file_path))
except Exception as e:
print(e)
class RunningMeanStd:
"""
Running Mean and Standard Deviation for normalising the observation!
This is mainly used in MuJoCo experiments, e.g. DDPG!
Formula:
- Normalisation: y = (x-mean)/std
"""
def __init__(self, shape, clip_range=5, epsilon=1e-2):
self.size = shape
self.epsilon = epsilon
self.clip_range = clip_range
self._sum = 0.0
self._sumsq = np.ones(self.size, np.float32) * epsilon
self._count = np.ones(self.size, np.float32) * epsilon
self.mean = self._sum / self._count
self.std = np.sqrt(np.maximum(self._sumsq / self._count - np.square(self.mean), np.square(self.epsilon)))
def update(self, x):
"""
update the mean and std by given input
:param x: can be observation, reward, or action!!
:return:
"""
x = x.reshape(-1, self.size)
self._sum = x.sum(axis=0)
self._sumsq = np.square(x).sum(axis=0)
self._count = np.array([len(x)], dtype='float64')
self.mean = self._sum / self._count
self.std = np.sqrt(np.maximum(self._sumsq / self._count - np.square(self.mean), np.square(self.epsilon)))
def normalise(self, x):
"""
Using well-maintained mean and std, we normalise the input followed by update them.
:param x:
:return:
"""
result = np.clip((x - self.mean) / self.std, -self.clip_range, self.clip_range)
return result
def test(sess, agent, env, params):
xmax = agent.num_action
ymax = 3
print("\n ===== TEST STARTS: {0} Episodes ===== \n".format(params.test_episodes))
for i in range(params.test_episodes):
state = env.reset()
for t in itertools.count():
env.render()
q_values = sess.run(agent.pred, feed_dict={agent.state: state.reshape(params.state_reshape)})[0]
action = np.argmax(q_values)
plot_Q_values(q_values, xmax=xmax, ymax=ymax)
obs, reward, done, _ = env.step(action)
state = obs
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
return
class logger:
def __init__(self, params):
self.params = params
self.prev_update_step = 0
def logging(self, time_step, current_episode, exec_time, reward_buffer, loss, epsilon, cnt_action):
"""
Logging function
:param time_step:
:param max_steps:
:param current_episode:
:param exec_time:
:param reward:
:param loss:
:param cnt_action:
:return:
"""
cnt_actions = dict((x, cnt_action.count(x)) for x in set(cnt_action))
episode_steps = time_step - self.prev_update_step
# remaing_time_step/exec_time_for_one_step
remaining_time = str(datetime.timedelta(
seconds=(self.params.num_frames - time_step) * exec_time / (episode_steps)))
print(
"{0}/{1}: Ep: {2}({3:.1f} fps), Remaining: {4}, (R) {5} Ep => [MEAN: {6:.3f}, MAX: {7:.3f}], (last ep) Loss: {8:.3f}, Eps: {9:.3f}, Act: {10}".format(
time_step, self.params.num_frames, current_episode, episode_steps / exec_time, remaining_time,
self.params.reward_buffer_ep, np.mean(reward_buffer), np.max(reward_buffer), loss,
epsilon, cnt_actions
))
self.prev_update_step = time_step
"""
Algorithm Specific Utility functions
"""
class her_sampler:
# borrow from: https://github.com/TianhongDai/hindsight-experience-replay/blob/master/her.py
def __init__(self, replay_strategy, replay_k, reward_func=None):
self.replay_strategy = replay_strategy
self.replay_k = replay_k
if self.replay_strategy == 'future':
self.future_p = 1 - (1. / (1 + replay_k))
else:
self.future_p = 0
self.reward_func = reward_func
def sample_her_transitions(self, episode_batch, batch_size_in_transitions):
T = episode_batch['actions'].shape[1]
rollout_batch_size = episode_batch['actions'].shape[0]
batch_size = batch_size_in_transitions
# select which rollouts and which timesteps to be used
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
t_samples = np.random.randint(T, size=batch_size)
transitions = {key: episode_batch[key][episode_idxs, t_samples].copy() for key in episode_batch.keys()}
# her idx
her_indexes = np.where(np.random.uniform(size=batch_size) < self.future_p)
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + 1 + future_offset)[her_indexes]
# replace go with achieved goal
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
transitions['g'][her_indexes] = future_ag
# to get the params to re-compute reward
transitions['r'] = np.expand_dims(self.reward_func(transitions['ag_next'], transitions['g'], None), 1)
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}
return transitions
def action_postprocessing(action, params):
action += params.noise_eps * params.max_action * np.random.randn(*action.shape)
action = np.clip(action, -params.max_action, params.max_action)
# random actions...
random_actions = np.random.uniform(low=-params.max_action,
high=params.max_action,
size=params.num_action)
# choose if use the random actions
action += np.random.binomial(1, params.random_eps, 1)[0] * (random_actions - action)
return action
def state_unpacker(state):
"""
Given the dictionary of state, it unpacks and returns processed items as numpy.ndarray
Sample input:
{'observation': array([ 1.34193265e+00, 7.49100375e-01, 5.34722720e-01, 1.30179339e+00, 8.86399624e-01,
4.24702091e-01, -4.01392554e-02, 1.37299250e-01, -1.10020629e-01, 2.91834773e-06,
-4.72661656e-08, -3.85214084e-07, 5.92637053e-07, 1.12208536e-13, -7.74656889e-06,
-7.65027248e-08, 4.92570535e-05, 1.88857148e-07, -2.90549459e-07, -1.18156686e-18,
7.73934983e-06, 7.18103404e-08, -2.42928780e-06, 4.93607091e-07, 1.70999820e-07]),
'achieved_goal': array([1.30179339, 0.88639962, 0.42470209]),
'desired_goal': array([1.4018907 , 0.62021174, 0.4429846 ])}
:param state:
:return:
"""
obs = np.array(state["observation"])
achieved_goal = np.array(state["achieved_goal"])
desired_goal = np.array(state["desired_goal"])
remaining_goal = simple_goal_subtract(desired_goal, achieved_goal)
return obs, achieved_goal, desired_goal, remaining_goal
def simple_goal_subtract(goal, achieved_goal):
"""
We subtract the achieved goal from the desired one to see how much we are still far from the desired position
"""
assert goal.shape == achieved_goal.shape
return goal - achieved_goal
ALIVE_BONUS = 1.0
def get_distance(env_name):
"""
This returns the distance according to the implementation of env
For instance, halfcheetah and humanoid have the different way to return the distance
so that we need to deal with them accordingly.
:return: func to calculate the distance(float)
"""
obj_name = env_name.split("-")[0]
if not obj_name.find("Ant") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/ant.py#L14
distance = info["reward_forward"]
return distance
elif not obj_name.find("HalfCheetah") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/half_cheetah.py
distance = info["reward_run"]
return distance
elif not obj_name.find("Hopper") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/hopper.py#L15
distance = (reward - ALIVE_BONUS) + 1e-3 * np.square(action).sum()
return distance
elif not obj_name.find("Humanoid") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/humanoid.py#L30
distance = info["reward_linvel"] / 1.25
return distance
elif not obj_name.find("Swimmer") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/swimmer.py#L15
distance = info["reward_fwd"]
return distance
elif not obj_name.find("Walker2d") == -1:
def func(action, reward, info):
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/walker2d.py#L16 -> original version
distance = (reward - ALIVE_BONUS) + 1e-3 * np.square(action).sum()
# https://github.com/openai/gym/blob/master/gym/envs/mujoco/walker2d_v3.py#L90 -> version 3.0
# distance = info["x_velocity"]
return distance
elif not obj_name.find("Centipede") == -1:
def func(action, reward, info):
distance = info["reward_forward"]
return distance
else:
assert False, "This env: {} is not supported yet.".format(env_name)
return func
"""
TODO: I think I will remove this.
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
===== Tracker is A class for storing iteration-specific metrics. ====
"""
class Tracker(object):
"""A class for storing iteration-specific metrics.
The internal format is as follows: we maintain a mapping from keys to lists.
Each list contains all the values corresponding to the given key.
For example, self.data_lists['train_episode_returns'] might contain the
per-episode returns achieved during this iteration.
Attributes:
data_lists: dict mapping each metric_name (str) to a list of said metric
across episodes.
"""
def __init__(self):
self.data_lists = {}
def append(self, data_pairs):
"""Add the given values to their corresponding key-indexed lists.
Args:
data_pairs: A dictionary of key-value pairs to be recorded.
"""
for key, value in data_pairs.items():
if key not in self.data_lists:
self.data_lists[key] = []
self.data_lists[key].append(value)
"""
Update methods
"""
def sync_main_target(sess, target, source):
"""
Synchronise the models
from Denny Britz's excellent RL repo
https://github.com/dennybritz/reinforcement-learning/blob/master/DQN/Double%20DQN%20Solution.ipynb
:param main:
:param target:
:return:
"""
source_params = [t for t in tf.trainable_variables() if t.name.startswith(source.scope)]
source_params = sorted(source_params, key=lambda v: v.name)
target_params = [t for t in tf.trainable_variables() if t.name.startswith(target.scope)]
target_params = sorted(target_params, key=lambda v: v.name)
update_ops = []
for target_w, source_w in zip(target_params, source_params):
op = target_w.assign(source_w)
update_ops.append(op)
sess.run(update_ops)
def soft_target_model_update(sess, target, source, tau=1e-2):
"""
Soft update model parameters.
target = tau * source + (1 - tau) * target
:param main:
:param target:
:param tau:
:return:
"""
source_params = [t for t in tf.trainable_variables() if t.name.startswith(source.scope)]
source_params = sorted(source_params, key=lambda v: v.name)
target_params = [t for t in tf.trainable_variables() if t.name.startswith(target.scope)]
target_params = sorted(target_params, key=lambda v: v.name)
update_ops = []
for target_w, source_w in zip(target_params, source_params):
# target = tau * source + (1 - tau) * target
op = target_w.assign(tau * source_w + (1 - tau) * target_w)
update_ops.append(op)
sess.run(update_ops)
@tf.contrib.eager.defun(autograph=False)
def soft_target_model_update_eager(target, source, tau=1e-2):
"""
Soft update model parameters.
target = tau * source + (1 - tau) * target
:param main:
:param target:
:param tau:
:return:
"""
for param, target_param in zip(source.weights, target.weights):
target_param.assign(tau * param + (1 - tau) * target_param)
"""
Gradient Clipping
"""
def gradient_clip_fn(flag=None):
"""
given a flag, create the clipping function and returns it as a function
currently it supports:
- by_value
- norm
- None
:param flag:
:return:
"""
if flag == "":
def _func(grads):
return grads
elif flag == "by_value":
def _func(grads):
grads = [ClipIfNotNone(grad, -1., 1.) for grad in grads]
return grads
elif flag == "norm":
def _func(grads):
grads, _ = tf.clip_by_global_norm(grads, 10.0)
return grads
else:
assert False, "Choose the gradient clipping function from by_value, norm, or nothing!"
return _func
def ClipIfNotNone(grad, _min, _max):
"""
Reference: https://stackoverflow.com/a/39295309
:param grad:
:return:
"""
if grad is None:
return grad
return tf.clip_by_value(grad, _min, _max)
"""
Test Methods
"""
def eval_Agent(agent, env, n_trial=1):
"""
Evaluate the trained agent!
:return:
"""
all_rewards = list()
print("=== Evaluation Mode ===")
for ep in range(n_trial):
state = env.reset()
done = False
episode_reward = 0
while not done:
# epsilon-greedy for evaluation using a fixed epsilon of 0.05(Nature does this!)
if np.random.uniform() < 0.05:
action = np.random.randint(agent.num_action)
else:
action = np.argmax(agent.predict(state))
next_state, reward, done, _ = env.step(action)
state = next_state
episode_reward += reward
all_rewards.append(episode_reward)
tf.contrib.summary.scalar("Evaluation Score", episode_reward, step=agent.index_timestep)
print("| Ep: {}/{} | Score: {} |".format(ep + 1, n_trial, episode_reward))
# if this is running on Google Colab, we would store the log/models to mounted MyDrive
if agent.params.google_colab:
delete_files(agent.params.model_dir_colab)
delete_files(agent.params.log_dir_colab)
copy_dir(agent.params.log_dir, agent.params.log_dir_colab)
copy_dir(agent.params.model_dir, agent.params.model_dir_colab)
if n_trial > 2:
print("=== Evaluation Result ===")
all_rewards = np.array([all_rewards])
print("| Max: {} | Min: {} | STD: {} | MEAN: {} |".format(np.max(all_rewards), np.min(all_rewards),
np.std(all_rewards), np.mean(all_rewards)))
def eval_Agent_DDPG(env, agent, n_trial=1):
"""
Evaluate the trained agent with the recording of its behaviour
:return:
"""
all_distances, all_rewards, all_actions = list(), list(), list()
distance_func = get_distance(agent.params.env_name) # create the distance measure func
print("=== Evaluation Mode ===")
for ep in range(n_trial):
env.record_start()
state = env.reset()
done = False
episode_reward = 0
while not done:
action = agent.eval_predict(state)
# scale for execution in env (in DDPG, every action is clipped between [-1, 1] in agent.predict)
next_state, reward, done, info = env.step(action * env.action_space.high)
distance = distance_func(action, reward, info)
all_actions.append(action.mean() ** 2) # Mean Squared of action values
all_distances.append(distance)
state = next_state
episode_reward += reward
all_rewards.append(episode_reward)
tf.contrib.summary.scalar("Evaluation Score", episode_reward, step=agent.index_timestep)
print("| Ep: {}/{} | Score: {} |".format(ep + 1, n_trial, episode_reward))
env.record_end()
return all_rewards, all_distances, all_actions
def eval_Agent_TRPO(agent, env, n_trial=1):
"""
Evaluate the trained agent!
:return:
"""
all_rewards = list()
print("=== Evaluation Mode ===")
for ep in range(n_trial):
state = env.reset()
done = False
episode_reward = 0
while not done:
action = agent.predict(state)
# scale for execution in env (in DDPG, every action is clipped between [-1, 1] in agent.predict)
next_state, reward, done, _ = env.step(action)
state = next_state
episode_reward += reward
all_rewards.append(episode_reward)
tf.contrib.summary.scalar("Evaluation Score", episode_reward, step=agent.index_timestep)
print("| Ep: {}/{} | Score: {} |".format(ep + 1, n_trial, episode_reward))
if n_trial > 2:
print("=== Evaluation Result ===")
all_rewards = np.array([all_rewards])
print("| Max: {} | Min: {} | STD: {} | MEAN: {} |".format(np.max(all_rewards), np.min(all_rewards),
np.std(all_rewards), np.mean(all_rewards)))
def eval_Agent_HER(agent, env, n_trial=1):
"""
Evaluate the trained agent!
:return:
"""
successes = list()
for ep in range(n_trial):
state = env.reset()
# obs, achieved_goal, desired_goal in `numpy.ndarray`
obs, ag, dg, rg = state_unpacker(state)
success = list()
for ts in range(agent.params.num_steps):
# env.render()
action = agent.predict(obs, dg)
# action = action_postprocessing(action, agent.params)
next_state, reward, done, info = env.step(action)
success.append(info.get('is_success'))
# obs, achieved_goal, desired_goal in `numpy.ndarray`
next_obs, next_ag, next_dg, next_rg = state_unpacker(next_state)
obs = next_obs
dg = next_dg
successes.append(success)
return np.mean(np.array(successes))
| 35.745953
| 162
| 0.610089
| 3,705
| 28,704
| 4.561943
| 0.182996
| 0.010413
| 0.006508
| 0.009466
| 0.346527
| 0.31375
| 0.284996
| 0.252515
| 0.232931
| 0.206662
| 0
| 0.023698
| 0.264946
| 28,704
| 802
| 163
| 35.790524
| 0.777383
| 0.223
| 0
| 0.316425
| 0
| 0.002415
| 0.094276
| 0.015517
| 0
| 0
| 0
| 0.001247
| 0.012077
| 1
| 0.111111
| false
| 0
| 0.014493
| 0.002415
| 0.219807
| 0.074879
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a8780a44ac5da348e337c07269fb06faa67e8cd
| 2,284
|
py
|
Python
|
common/serializers.py
|
kollad/turbo-ninja
|
9c3f66b2af64aec01f522d19b309cfdd723e67cf
|
[
"MIT"
] | null | null | null |
common/serializers.py
|
kollad/turbo-ninja
|
9c3f66b2af64aec01f522d19b309cfdd723e67cf
|
[
"MIT"
] | 1
|
2017-12-14T05:35:38.000Z
|
2017-12-14T05:35:38.000Z
|
common/serializers.py
|
kollad/turbo-ninja
|
9c3f66b2af64aec01f522d19b309cfdd723e67cf
|
[
"MIT"
] | null | null | null |
from collections import namedtuple, OrderedDict
import json
__author__ = 'kollad'
def isnamedtuple(obj):
"""Heuristic check if an object is a namedtuple."""
return isinstance(obj, tuple) \
and hasattr(obj, "_fields") \
and hasattr(obj, "_asdict") \
and callable(obj._asdict)
def serialize(data):
if data is None or isinstance(data, (bool, int, float, str)):
return data
if isinstance(data, list):
return [serialize(val) for val in data]
if isinstance(data, OrderedDict):
return {"py/collections.OrderedDict":
[[serialize(k), serialize(v)] for k, v in data.items()]}
if isnamedtuple(data):
return {"py/collections.namedtuple": {
"type": type(data).__name__,
"fields": list(data._fields),
"values": [serialize(getattr(data, f)) for f in data._fields]}}
if isinstance(data, dict):
if all(isinstance(k, str) for k in data):
return {k: serialize(v) for k, v in data.items()}
return {"py/dict": [[serialize(k), serialize(v)] for k, v in data.items()]}
if isinstance(data, tuple):
return {"py/tuple": [serialize(val) for val in data]}
if isinstance(data, set):
return {"py/set": [serialize(val) for val in data]}
if isinstance(data, np.ndarray):
return {"py/numpy.ndarray": {
"values": data.tolist(),
"dtype": str(data.dtype)}}
raise TypeError("Type %s not data-serializable" % type(data))
def restore(dct):
if "py/dict" in dct:
return dict(dct["py/dict"])
if "py/tuple" in dct:
return tuple(dct["py/tuple"])
if "py/set" in dct:
return set(dct["py/set"])
if "py/collections.namedtuple" in dct:
data = dct["py/collections.namedtuple"]
return namedtuple(data["type"], data["fields"])(*data["values"])
if "py/numpy.ndarray" in dct:
data = dct["py/numpy.ndarray"]
return np.array(data["values"], dtype=data["dtype"])
if "py/collections.OrderedDict" in dct:
return OrderedDict(dct["py/collections.OrderedDict"])
return dct
def data_to_json(data):
return json.dumps(serialize(data))
def json_to_data(s):
return json.loads(s, object_hook=restore)
| 34.606061
| 83
| 0.609019
| 300
| 2,284
| 4.576667
| 0.223333
| 0.03496
| 0.06992
| 0.058267
| 0.182811
| 0.162418
| 0.162418
| 0.162418
| 0.162418
| 0.055353
| 0
| 0
| 0.24387
| 2,284
| 66
| 84
| 34.606061
| 0.79502
| 0.019702
| 0
| 0
| 0
| 0
| 0.164279
| 0.068487
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092593
| false
| 0
| 0.037037
| 0.037037
| 0.481481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a8812b8a7ce8889a96abd8e38c4d8b8f1956ab6
| 1,079
|
py
|
Python
|
setup.py
|
mjw99/Musketeer
|
0299a7974ad90c09d8d9206fcf862e45f9fddf30
|
[
"MIT"
] | null | null | null |
setup.py
|
mjw99/Musketeer
|
0299a7974ad90c09d8d9206fcf862e45f9fddf30
|
[
"MIT"
] | null | null | null |
setup.py
|
mjw99/Musketeer
|
0299a7974ad90c09d8d9206fcf862e45f9fddf30
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md") as readmeFile:
long_description = readmeFile.read()
setuptools.setup(
name="musketeer",
version="0.0.1",
author="Daniil Soloviev",
author_email="dos23@cam.ac.uk",
description="A tool for fitting data from titration experiments.",
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Chemistry"
],
url="https://github.com/daniilS/Musketeer",
packages=["musketeer"],
package_data={"": ["*.png"]},
include_package_data=True,
install_requires=[
"numpy",
"scipy",
"matplotlib",
"ttkbootstrap",
"tkscrolledframe",
"ttkwidgets"
],
python_requires=">=3"
)
| 29.162162
| 70
| 0.624652
| 105
| 1,079
| 6.304762
| 0.780952
| 0.090634
| 0.057402
| 0.090634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009697
| 0.235403
| 1,079
| 36
| 71
| 29.972222
| 0.792727
| 0
| 0
| 0.058824
| 0
| 0
| 0.448563
| 0.020389
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029412
| 0
| 0.029412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a89f586494444a77daa3b34a1bc45b72a73f85e
| 16,338
|
py
|
Python
|
EvolutiveStrategies.py
|
ignacioct/GeneticAlgorithms
|
6a92c3d5ec6f2796333576d93c3b6b421055b7a4
|
[
"MIT"
] | 4
|
2020-11-26T16:18:23.000Z
|
2021-06-28T08:43:35.000Z
|
EvolutiveStrategies.py
|
ignacioct/GeneticAlgorithms
|
6a92c3d5ec6f2796333576d93c3b6b421055b7a4
|
[
"MIT"
] | null | null | null |
EvolutiveStrategies.py
|
ignacioct/GeneticAlgorithms
|
6a92c3d5ec6f2796333576d93c3b6b421055b7a4
|
[
"MIT"
] | null | null | null |
import copy
import math
import operator
import random
import sys
from concurrent import futures
import numpy as np
import requests
class FitnessFunctionCaller:
"""Class for returning the fitness function of an individual."""
def __init__(self, *args):
functional_parts = []
# Full case with 10 motors
if len(args) > 0:
for arg in args:
functional_parts.append(arg)
def call(self) -> float:
"""Returns the fitness function"""
return 1# Fitness function
class Individual:
"""Candidate solution to the problem. Made by a functional value and a variance."""
def __init__(self, is10, **kwargs):
functional = kwargs.get("functional", None)
variance = kwargs.get("variance", None)
self.is10 = is10
if is10 is False:
self.motorNumber = 4
else:
self.motorNumber = 10
if len(kwargs) == 0:
self.functional = [
np.random.uniform(-180, 181) for _ in range(self.motorNumber)
]
self.variance = [
np.random.uniform(100, 360) for _ in range(self.motorNumber)
]
else:
self.functional = functional
self.variance = variance
self.fitness = sys.float_info.max # irrational high value
def update_fitness(self, incoming):
"""Update fitness function"""
self.fitness = incoming
def update_variance(self, incoming):
"""Update variance function"""
for i in range(self.motorNumber):
self.variance[i] = incoming[i]
class EvolutiveStrategyOneIndividual:
"""Evolution strategy made only one solution with mutation."""
def __init__(self, c, is10):
self.population = 1
self.pool = []
for _ in range(self.population): # reusable for bigger populations
indv = Individual(is10)
self.pool.append(indv)
self.successes = [] # 1 if improves, otherwise 0
self.psi = (
self.successes.count(1) / 10
) # ratio of improvement in the last 10 generations
self.c = c # coefficient for 1/5 rule
self.evaluations = 0
self.lastFitness = sys.float_info.max # irrational high value
def mutation(self):
"""A temporal solution is produced, being the second individual the result of the mutation"""
# Creating temporal dictionaries
self.temporalPool = []
temporal_functional = []
temporal_variance = []
for i in range(self.pool[0].motorNumber):
# Functional mutation
temporal_functional.append(
self.pool[0].functional[i]
+ np.random.normal(scale=self.pool[0].variance[i])
)
temp_indv = Individual(
is10=self.pool[0].is10,
functional=temporal_functional,
variance=self.pool[0].variance,
)
self.temporalPool.append(temp_indv)
def evaluation(self):
"""Selecting the best of the two individual and evaluating them"""
# Getting the fitness evaluations of the former individual and the mutated one
formerIndividualCaller = FitnessFunctionCaller(*(i for i in self.pool[0].functional))
temporalIndividualCaller = FitnessFunctionCaller(
*(i for i in self.temporalPool[0].functional)
)
formerIndividualFitness = formerIndividualCaller.call()
temporalIndividualFitness = temporalIndividualCaller.call()
self.evaluations += 2
# formerBetter is True if the mutation did not improve the fitness over the father
if formerIndividualFitness <= temporalIndividualFitness:
formerBetter = True
else:
formerBetter = False
# bestFitness in between former and temporal
bestFitness = min(formerIndividualFitness, temporalIndividualFitness)
# If the child did improved, we change the pool to the temporal pool
if formerBetter is False:
self.pool = copy.deepcopy(self.temporalPool)
# In any case, we delete the temporal pool at this point
del self.temporalPool
# Variance mutation
for i in range(self.pool[0].motorNumber):
self.pool[0].variance[i] = self.ruleOneFifth(self.pool[0].variance[i])
# Update fitness function
self.pool[0].update_fitness(bestFitness)
# Adding 1 to the success matrix if the best individual is the child
if formerBetter is True:
if len(self.successes) < 10:
self.successes.append(0)
else:
self.successes.pop(0)
self.successes.append(0)
else:
if len(self.successes) < 10:
self.successes.append(1)
else:
self.successes.pop(0)
self.successes.append(1)
# Updating last fitness
self.lastFitness = bestFitness
# Update psi
self.psi = (
self.successes.count(1) / 10
) # ratio of improvement in the last 10 generations
def trainingLoop(self, maxCycles):
"""Training loop, controlled at maximum by the last cicle"""
for cycle in range(maxCycles):
self.mutation()
self.evaluation()
formerResults = []
if len(formerResults) > 10:
formerResults.pop(0)
formerResults.append(
"Generation: "
+ str(cycle)
+ "\tBest fitness: "
+ str(self.pool[0].fitness)
+ "\nBest chromosome: "
+ str(self.pool[0].functional)
)
print(
"Generation: "
+ str(cycle)
+ "\tBest fitness: "
+ str(self.pool[0].fitness)
+ "\nBest chromosome: "
+ str(self.pool[0].functional)
)
stopping = False
for i in range(len(self.pool[0].functional)):
if self.pool[0].variance[i] < 0.0001:
stopping = True
if stopping == True:
print("Early stopping applied")
print(formerResults[0])
break
def ruleOneFifth(self, formerVariance) -> float:
"""Applies the one fifth rule given the former variance"""
# Update psi
self.psi = (
self.successes.count(1) / 10
) # ratio of improvement in the last 10 generations
if self.psi < 0.2:
return self.c * formerVariance
elif self.psi > 0.2:
return self.c / formerVariance
else:
return formerVariance
class EvolutiveStrategyMultiple:
"""Evolution strategy made with a population of individuals."""
def __init__(self, population, family_number, tournament_factor, is10):
self.population = population
self.pool = []
for _ in range(self.population):
indv = Individual(is10)
self.pool.append(indv)
self.family_number = family_number
self.tau = 1 / math.sqrt(2 * math.sqrt(self.pool[0].motorNumber))
self.zero_tau = 1 / math.sqrt(2 * self.pool[0].motorNumber)
self.tournament_factor = tournament_factor
self.evaluations = 0
def element_per_list(self, lista):
"""Auxiliar function; given a list of lists, picks a random for each position searching in all lists"""
temporal_list = []
for position in range(len(lista[0])):
rnd = random.randint(0, (self.family_number - 1))
temporal_list.append(lista[rnd][position])
return temporal_list
def tournament(self):
"""
Selects the best individuals by facing them to each other and keeping the best.
Returns a population of the best inidividuals
"""
len_population = self.family_number * self.population
temp_population = [] # Temporal place for the newly-created population
for _ in range(len_population):
# Get tournament size as the floored integer of the Population Size * Tournament Percentage (aka factor)
tournament_size = math.floor(self.tournament_factor * self.population)
# Selects a random fraction of the total population to participate in the tournament
tournament_selected = random.sample(range(self.population), tournament_size)
# Choose the fittest
fitnesses = []
indexes = []
for index in tournament_selected:
fitnesses.append(self.pool[index].fitness)
indexes.append(index)
fittest_index = indexes[fitnesses.index(min(fitnesses))]
fittest = self.pool[fittest_index]
temp_population.append(fittest)
return temp_population # Returning the new population
def crossover(self, pool):
"""Returns a pool of children, given a the pool of individuals of the last generation and a family number."""
temporal_pool = []
random.shuffle(pool) # randomize the pool of individuals, to randomize crossover
counter = 0 # controls the loops logic
avg_functionals = [0] * pool[0].motorNumber # functional list for the newborns (must be restarted with 0-init)
avg_variances = ([]) # variances list for the newborns (must be restarted by recasting)
for indv in pool:
if counter != (self.family_number - 1): # not the last member of the family
for position in range(indv.motorNumber):
avg_functionals[position] += indv.functional[position] # adds each functional of the current ind to corresponding positions
avg_variances.append(indv.variance) # adds the variance to the list of parent variances
counter += 1
else: # last member of the family -> extra functions
for position in range(indv.motorNumber):
avg_functionals[position] += indv.functional[position]
avg_functionals[
position
] /= (
self.family_number
) # no more sums left, time to divide by family number
avg_variances.append(indv.variance)
# Transforming the list of lists to a list of variances, with a random variance of the parents for each position
avg_variances = self.element_per_list(avg_variances)
# Adding the individual to the temporal pool
temp_indv = Individual(
is10=pool[0].is10,
functional=avg_functionals,
variance=avg_variances,
)
temporal_pool.append(temp_indv)
# Restarting variables, as this family has finished
counter = 0
avg_functionals = [0] * pool[0].motorNumber
avg_variances = []
"""
With this implementation, if population mod family number is not zero, those parents at the end wont create any child.
To cope with that, the parents pool is shuffled. This should not be a problem, just 1 or 2 will be excluded.
At the end, we get the same number of children, so the rest of the operators remain unchanged, and convergence will work just fine.
"""
return temporal_pool
def mutation(self, pool, scaling):
"""
Given a pool of individuals, mutates all individuals
functionals get mutated by a Gaussian distribution
variances get decreased by a Gaussian scheme
"""
for individual in pool:
for i in range(individual.motorNumber):
# Functional mutation
individual.functional[i] += np.random.normal(
loc=0, scale=individual.variance[i]
)
# Variance mutation
if scaling is True:
individual.variance[i] = (
individual.variance[i]
* np.exp(np.random.normal(loc=0, scale=self.tau))
* np.exp(np.random.normal(loc=0, scale=self.zero_tau))
)
else:
individual.variance[i] = individual.variance[i] * np.exp(
np.random.normal(loc=0, scale=self.tau)
)
return pool
def concurrent_evaluation(self, pool):
"""Given a pool of individuals, return a list with its fitness functions"""
callers = [] # list of caller objects of individuals
for individual in pool:
individual_caller = FitnessFunctionCaller(*(i for i in individual.functional))
callers.append(individual_caller)
with futures.ThreadPoolExecutor(max_workers=50) as execute:
future = [execute.submit(callers[i].call) for i in range(len(pool))]
self.evaluations += len(future)
fitnesses = [f.result() for f in future] # list of fitness of the pool
return fitnesses
def selection(self, children_pool):
"""Given a pool of mutated children, and using self.pool (parent's pool), selects the best individuals"""
fitnesses = []
combined_pool = copy.deepcopy(
self.pool
) # introducing parents to a combined pool
combined_pool.extend(children_pool) # introducing childs to a combined pool
for i in range(len(self.pool)):
fitnesses.append(self.pool[i].fitness)
fitnesses.extend(
self.concurrent_evaluation(children_pool)
) # list of fitnesses of the combined pool
for i in range(len(combined_pool)):
combined_pool[i].fitness = fitnesses[i]
combined_pool.sort(key=operator.attrgetter("fitness"))
# ordered_combined_pool = [x for _,x in sorted(zip(fitnesses, combined_pool))] # Population ordered by fitness
self.pool = copy.deepcopy(combined_pool[: self.population]) # The pool will now be the best individuals of both parents and children
fitnesses.sort()
for i in range(len(self.pool)):
self.pool[i].fitness = fitnesses[i]
return
def training_cycle(self, max_cycles, scaling):
"""Training loop, controlled at maximum by the max cycle"""
fitnesses = self.concurrent_evaluation(self.pool)
for i in range(len(self.pool)):
self.pool[i].fitness = fitnesses[i]
for cycle in range(max_cycles):
temp_pool = self.tournament()
temp_pool = self.crossover(temp_pool)
temp_pool = self.mutation(temp_pool, scaling)
self.selection(temp_pool)
print(
"Generation: "
+ str(cycle)
+ "\t Evaluation: "
+ str(self.evaluations)
+ "\tBest fitness: "
+ str(self.pool[0].fitness)
+ "\nBest chromosome: "
+ str(self.pool[0].functional)
+ "\n"
+ str(self.pool[0].variance)
)
if self.pool[0].fitness == 0.0:
print("Early stopping applied")
print(
"Generation: "
+ str(cycle)
+ "\t Evaluation: "
+ str(self.evaluations)
+ "\tBest fitness: "
+ str(self.pool[0].fitness)
+ "\nBest chromosome: "
+ str(self.pool[0].functional)
+ "\n"
+ str(self.pool[0].variance)
)
break
def main():
# Code for strategy of 1 individual
# ee = EvolutiveStrategyOneIndividual(c=ce, is10=True)
# ee.trainingLoop(10000)
# Code for strategy with the best results
ee = EvolutiveStrategyMultiple(
population=300, family_number=2, tournament_factor=0.05, is10=True
)
ee.training_cycle(1000, scaling=True)
if __name__ == "__main__":
main()
| 35.135484
| 144
| 0.580487
| 1,786
| 16,338
| 5.241881
| 0.18813
| 0.038453
| 0.024033
| 0.01175
| 0.280282
| 0.219077
| 0.195898
| 0.16834
| 0.117924
| 0.114505
| 0
| 0.015206
| 0.335843
| 16,338
| 464
| 145
| 35.211207
| 0.847572
| 0.222487
| 0
| 0.316667
| 0
| 0
| 0.024711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063333
| false
| 0
| 0.026667
| 0
| 0.136667
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a8ac6ed77639549d9368218a7f979d0a6bcc7b7
| 1,638
|
py
|
Python
|
src/arago/hiro/client/exception.py
|
166MMX/hiro-python-library
|
fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5
|
[
"MIT"
] | null | null | null |
src/arago/hiro/client/exception.py
|
166MMX/hiro-python-library
|
fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5
|
[
"MIT"
] | null | null | null |
src/arago/hiro/client/exception.py
|
166MMX/hiro-python-library
|
fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5
|
[
"MIT"
] | null | null | null |
from typing import Mapping, Any, List
class HiroClientError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class OntologyValidatorError(HiroClientError):
message: str
warnings: List[str]
errors: List[str]
def __init__(self, data: Mapping[str, Any]) -> None:
super().__init__()
error = data['error']
self.message = error['message']
result = error['result']
self.warnings = result['warnings']
self.errors = result['errors']
@staticmethod
def is_validator_error(data: Mapping[str, Any]) -> bool:
# {
# 'error': {
# 'message': 'validation failed',
# 'result': {
# 'errors': [
# 'attribute ogit/description is invalid'
# ],
# 'warnings': [
# ]
# }
# }
# }
if 'error' not in data:
return False
error = data['error']
if 'message' not in error or 'result' not in error:
return False
message = error['message']
result = error['result']
if message != 'validation failed' or 'errors' not in result or 'warnings' not in result:
return False
warnings = result['warnings']
errors = result['errors']
if not isinstance(warnings, list) or not isinstance(errors, list):
return False
return True
class HiroServerError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
| 27.762712
| 96
| 0.525031
| 155
| 1,638
| 5.380645
| 0.277419
| 0.029976
| 0.039568
| 0.047962
| 0.199041
| 0.199041
| 0.11271
| 0.11271
| 0.11271
| 0.11271
| 0
| 0
| 0.357753
| 1,638
| 58
| 97
| 28.241379
| 0.792776
| 0.139805
| 0
| 0.352941
| 0
| 0
| 0.080946
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.029412
| 0
| 0.470588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a8cac712e69f85d4085b70791e0d285fbcb5630
| 2,507
|
py
|
Python
|
BabysFirstNeuralNetwork/ToyNN.py
|
dwpicott/BasicNeuralNetwork
|
ad4f5878098e5ad167ee2280f5b9b03af02dfa27
|
[
"MIT"
] | null | null | null |
BabysFirstNeuralNetwork/ToyNN.py
|
dwpicott/BasicNeuralNetwork
|
ad4f5878098e5ad167ee2280f5b9b03af02dfa27
|
[
"MIT"
] | null | null | null |
BabysFirstNeuralNetwork/ToyNN.py
|
dwpicott/BasicNeuralNetwork
|
ad4f5878098e5ad167ee2280f5b9b03af02dfa27
|
[
"MIT"
] | null | null | null |
'''
Basic Python tutorial neural network.
Based on "A Neural Network in 11 Lines of Python" by i am trask
https://iamtrask.github.io/2015/07/12/basic-python-network/
'''
import numpy as np
class ToyNN(object):
'''
Simple two-layer toy neural network
'''
def __init__(self, inputs=3, outputs=1):
#Number of input and output neurons
self.inputs = inputs
self.outputs = outputs
#Initalize synapse weights randomly with a mean of 0
self.synapseWeights = 2 * np.random.random((inputs, outputs)) - 1
# Sigmoid activation function
def Activation(self, x):
return 1 / (1 + np.exp(-x))
# Derivative of the sigmoid activation function
def ActivationPrime(self, x):
return x * (1 - x)
# Forward propogation of inputs to outputs
def FeedForward(self, input):
return self.Activation(np.dot(input, self.synapseWeights));
# Training function
def TrainNN(self, features, targets, iterations=10000):
l0 = features #Input layer
for iter in range(iterations):
#Forward propogation
l1 = self.FeedForward(l0) #output layer
#Error calculation
error = targets - l1
#Back propogation
# multiply slope by the error at each predicted value
delta = error * self.ActivationPrime(l1)
#update weights
self.synapseWeights += np.dot(l0.T, delta)
# Training data: a 1 in the first column directly correlates with a 1 in the output
# training features
features = np.array([ [0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1] ])
# training targets
targets = np.array([ [0, 0, 1, 1] ]).T # 4x1 matrix
# Seed random number generator
np.random.seed(1)
nn = ToyNN()
print("Training neural network...")
nn.TrainNN(features, targets)
print("Training complete.\n")
print("Input training set:")
print(targets)
print("Expected output:")
print(targets)
print("\nOutput from training set after 10000 iterations:")
print(nn.FeedForward(features))
print("\n==============================\n")
newData = np.array([ [0, 0, 0],
[0, 1, 0],
[1, 0, 0] ])
print("New input data:")
print(newData)
print("Expected output:")
print(np.array([ [0, 0, 1] ]).T)
print("\nOutput for new data not in the training set:")
print(nn.FeedForward(newData))
| 27.25
| 83
| 0.59274
| 315
| 2,507
| 4.704762
| 0.365079
| 0.009447
| 0.021592
| 0.024292
| 0.029015
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037264
| 0.282808
| 2,507
| 92
| 84
| 27.25
| 0.786986
| 0.285201
| 0
| 0.088889
| 0
| 0
| 0.138049
| 0.019395
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.022222
| 0.066667
| 0.222222
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a8dcfa7190ecc79bdaa94535eba0d246aff05b9
| 1,122
|
py
|
Python
|
gaphor/UML/deployments/tests/test_connector.py
|
MartinIIOT/gaphor
|
b08bf6ddb8c92ec87fccabc2ddee697609f73e67
|
[
"Apache-2.0"
] | null | null | null |
gaphor/UML/deployments/tests/test_connector.py
|
MartinIIOT/gaphor
|
b08bf6ddb8c92ec87fccabc2ddee697609f73e67
|
[
"Apache-2.0"
] | null | null | null |
gaphor/UML/deployments/tests/test_connector.py
|
MartinIIOT/gaphor
|
b08bf6ddb8c92ec87fccabc2ddee697609f73e67
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from gaphor import UML
from gaphor.core.modeling import Diagram
from gaphor.core.modeling.modelinglanguage import (
CoreModelingLanguage,
MockModelingLanguage,
)
from gaphor.SysML.modelinglanguage import SysMLModelingLanguage
from gaphor.UML.deployments.connector import ConnectorItem
from gaphor.UML.modelinglanguage import UMLModelingLanguage
@pytest.fixture
def modeling_language():
return MockModelingLanguage(
CoreModelingLanguage(), UMLModelingLanguage(), SysMLModelingLanguage()
)
def test_create(create):
"""Test creation of connector item."""
conn = create(ConnectorItem, UML.Connector)
assert conn.subject is not None
def test_persistence(create, element_factory, saver, loader):
"""Test connector item saving/loading."""
conn = create(ConnectorItem, UML.Connector)
end = element_factory.create(UML.ConnectorEnd)
conn.end = end
data = saver()
assert end.id in data
loader(data)
diagram = next(element_factory.select(Diagram))
assert diagram.select(ConnectorItem)
assert element_factory.lselect(UML.ConnectorEnd)
| 27.365854
| 78
| 0.762923
| 121
| 1,122
| 7.016529
| 0.38843
| 0.070671
| 0.03298
| 0.051826
| 0.08245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15508
| 1,122
| 40
| 79
| 28.05
| 0.89557
| 0.060606
| 0
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.107143
| false
| 0
| 0.25
| 0.035714
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a8f0982e03b38e05aa03eb45840308eeb8e3dc5
| 3,730
|
py
|
Python
|
py_ti/helper_loops.py
|
tlpcap/tlp_ti
|
8d72b316b332fd5e20785dbf19401883958c0666
|
[
"MIT"
] | 7
|
2021-01-31T19:23:07.000Z
|
2022-03-10T21:22:41.000Z
|
py_ti/helper_loops.py
|
tlpcap/tlp_ti
|
8d72b316b332fd5e20785dbf19401883958c0666
|
[
"MIT"
] | null | null | null |
py_ti/helper_loops.py
|
tlpcap/tlp_ti
|
8d72b316b332fd5e20785dbf19401883958c0666
|
[
"MIT"
] | null | null | null |
import numpy as np
from numba import jit
@jit
def wilders_loop(data, n):
"""
Wilder's Moving Average Helper Loop
Jit used to improve performance
"""
for i in range(n, len(data)):
data[i] = (data[i-1] * (n-1) + data[i]) / n
return data
@jit
def kama_loop(data, sc, n_er, length):
"""
Kaufman's Adaptive Moving Average Helper Loop
Jit used to improve performance
"""
kama = np.full(length, np.nan)
kama[n_er-1] = data[n_er-1]
for i in range(n_er, length):
kama[i] = kama[i-1] + sc[i] * (data[i] - kama[i-1])
return kama
@jit
def psar_loop(psar, high, low, af_step, max_af):
"""
Wilder's Parabolic Stop and Reversal Helper Loop
Jit used to improve performance
"""
length = len(psar)
uptrend = True
af = af_step
high_point = high[0]
low_point = low[0]
psar_up = np.empty(length)
psar_up.fill(np.nan)
psar_down = np.empty(length)
psar_down.fill(np.nan)
for i in range(2, length):
reversal = False
if uptrend:
psar[i] = psar[i-1] + af * (high_point - psar[i-1])
if low[i] < psar[i]:
reversal = True
psar[i] = high_point
low_point = low[i]
af = af_step
else:
if high[i] > high_point:
high_point = high[i]
af = min(af + af_step, max_af)
if low[i-2] < psar[i]:
psar[i] = low[i-2]
elif low[i-1] < psar[i]:
psar[i] = low[i-1]
else:
psar[i] = psar[i-1] - af * (psar[i-1] - low_point)
if high[i] > psar[i]:
reversal = True
psar[i] = low_point
high_point = high[i]
af = af_step
else:
if low[i] < low_point:
low_point = low[i]
af = min(af + af_step, max_af)
if high[i-2] > psar[i]:
psar[i] = high[i-2]
elif high[i-1] > psar[i]:
psar[i] = high[i-1]
uptrend = uptrend ^ reversal
if uptrend:
psar_up[i] = psar[i]
else:
psar_down[i] = psar[i]
return psar
@jit
def supertrend_loop(close, basic_ub, basic_lb, n):
"""
Supertrend Helper Loop
Jit used to improve performance
"""
length = len(close)
final_ub = np.zeros(length)
final_lb = np.zeros(length)
supertrend = np.zeros(length)
for i in range(n, length):
if basic_ub[i] < final_ub[i-1] or close[i-1] > final_ub[i-1]:
final_ub[i] = basic_ub[i]
else:
final_ub[i] = final_ub[i-1]
if basic_lb[i] > final_lb[i-1] or close[i-1] < final_lb[i-1]:
final_lb[i] = basic_lb[i]
else:
final_lb[i] = final_lb[i-1]
if supertrend[i-1] == final_ub[i-1] and close[i] <= final_ub[i]:
supertrend[i] = final_ub[i]
elif supertrend[i-1] == final_ub[i-1] and close[i] > final_ub[i]:
supertrend[i] = final_lb[i]
elif supertrend[i-1] == final_lb[i-1] and close[i] >= final_lb[i]:
supertrend[i] = final_lb[i]
elif supertrend[i-1] == final_lb[i-1] and close[i] < final_lb[i]:
supertrend[i] = final_ub[i]
else:
supertrend[i] = 0.00
return supertrend
@jit
def fib_loop(n):
"""
Fibonacci loop
Returns the fibonacci sequence as a list from the 3rd to the n-1th number
Jit used to improve performance
"""
fib = [0, 1]
[fib.append(fib[-2] + fib[-1]) for i in range(n-1)]
return fib[3:]
| 24.866667
| 77
| 0.507507
| 554
| 3,730
| 3.297834
| 0.146209
| 0.029557
| 0.048166
| 0.032841
| 0.485495
| 0.448823
| 0.296661
| 0.253968
| 0.253968
| 0.125889
| 0
| 0.020228
| 0.363807
| 3,730
| 149
| 78
| 25.033557
| 0.749684
| 0.107775
| 0
| 0.308511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053191
| false
| 0
| 0.021277
| 0
| 0.12766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a9084ba87c0f5c49b0d1b1f5827e460b297b88e
| 3,991
|
py
|
Python
|
src/app.py
|
eug/cron-rest
|
2d0a2e0d0cf0cb464b71293802b85ac7076f9944
|
[
"MIT"
] | 3
|
2021-05-10T13:42:59.000Z
|
2022-03-28T02:07:23.000Z
|
src/app.py
|
eug/cron-rest
|
2d0a2e0d0cf0cb464b71293802b85ac7076f9944
|
[
"MIT"
] | null | null | null |
src/app.py
|
eug/cron-rest
|
2d0a2e0d0cf0cb464b71293802b85ac7076f9944
|
[
"MIT"
] | 4
|
2018-05-12T13:43:00.000Z
|
2021-10-30T01:23:00.000Z
|
# -*- coding: utf-8 -*-
import json
import os
from crontab import CronTab
from flask import Flask, request
from pathlib import Path
from pretty_cron import prettify_cron
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return Path(app.root_path + '/index.html').read_text()
@app.route('/create', methods=['POST'])
def create():
pattern = request.form['pattern']
command = request.form['command']
if not command or prettify_cron(pattern) == pattern:
return json.dumps({
'status': 'fail',
'message': 'Some arguments are invalid.'
})
cron = CronTab(user=os.getenv('USER'))
job_id = len(cron)
job = cron.new(command=command)
job.setall(pattern)
cron.write()
return json.dumps({
'status': 'ok',
'message': 'Job successfully created.',
'job': {
'id': job_id,
'pattern': pattern,
'command': command,
'description': prettify_cron(pattern)
}
})
@app.route('/retrieve', methods=['GET'], defaults={'job_id': -1})
@app.route('/retrieve/id/<int:job_id>', methods=['GET'])
def retrieve(job_id):
jobs = []
cron = CronTab(user=os.getenv('USER'))
if job_id < 0:
for i, job in enumerate(cron):
pattern = job.slices.render()
command = job.command
description = prettify_cron(pattern)
jobs.append({
'id': i,
'pattern': pattern,
'command': command,
'description': description
})
return json.dumps({
'status': 'ok',
'message': 'Jobs retrieved successfully',
'jobs' : jobs
})
elif job_id < len(cron):
job = cron[job_id]
pattern = job.slices.render()
command = job.command
description = prettify_cron(pattern)
return json.dumps({
'status': 'ok',
'message': 'Job retrieved successfully',
'jobs' : [{
'id': job_id,
'pattern': pattern,
'command': command,
'description': description
}]
})
return json.dumps({
'status': 'fail',
'message': 'Job ID is invalid.'
})
@app.route('/update/id/<int:job_id>', methods=['POST'])
def update(job_id):
pattern = request.form['pattern'] if 'pattern' in request.form else None
command = request.form['command'] if 'command' in request.form else None
description = ''
if not command and prettify_cron(pattern) == pattern:
return json.dumps({
'status': 'fail',
'message': 'Some argument must be provided.'
})
cron = CronTab(user=os.getenv('USER'))
if job_id >= len(cron) or job_id < 0:
return json.dumps({
'status': 'fail',
'message': 'Job ID is invalid.'
})
if not command:
command = cron[job_id].command
cron[job_id].set_command(command)
if pattern and prettify_cron(pattern) != pattern:
cron[job_id].setall(pattern)
description = prettify_cron(pattern)
else:
pattern = cron[job_id].slices.render()
cron.write()
return json.dumps({
'status': 'ok',
'message': 'Job updated successfully.',
'job': {
'id': job_id,
'pattern': pattern,
'command': command,
'description': description
}
})
@app.route('/delete/id/<int:job_id>', methods=['DELETE'])
def delete(job_id):
cron = CronTab(user=os.getenv('USER'))
if job_id >= len(cron) or job_id < 0:
return json.dumps({
'status': 'fail',
'message': 'Job ID is invalid.'
})
cron.remove(cron[job_id])
cron.write()
return json.dumps({
'status': 'ok',
'message': 'Job deleted successfully.'
})
if __name__ == '__main__':
app.run()
| 26.256579
| 76
| 0.540466
| 435
| 3,991
| 4.850575
| 0.2
| 0.066351
| 0.07109
| 0.099526
| 0.56872
| 0.474882
| 0.43981
| 0.424171
| 0.424171
| 0.293365
| 0
| 0.00182
| 0.311701
| 3,991
| 151
| 77
| 26.430464
| 0.766291
| 0.005262
| 0
| 0.528
| 0
| 0
| 0.180444
| 0.017893
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.048
| 0.008
| 0.176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a90d1f158c36003df58478dbdda2afff682b6b2
| 1,196
|
py
|
Python
|
2017/examples/05_randomization.py
|
limunan/stanford-tensorflow-tutorials
|
51e53daaa2a32cfe7a1966f060b28dbbd081791c
|
[
"MIT"
] | 9,180
|
2017-07-27T23:43:41.000Z
|
2022-03-29T17:10:14.000Z
|
2017/examples/05_randomization.py
|
Nianze/stanford-tensorflow-tutorials
|
51e53daaa2a32cfe7a1966f060b28dbbd081791c
|
[
"MIT"
] | 86
|
2017-08-04T12:38:38.000Z
|
2020-12-09T03:34:02.000Z
|
2017/examples/05_randomization.py
|
joshosu/stanford-tensorflow-tutorials
|
b16899102bf07964a15494452a2e91c1b9f88e46
|
[
"MIT"
] | 4,115
|
2017-07-28T06:53:12.000Z
|
2022-03-23T12:36:55.000Z
|
""" Examples to demonstrate ops level randomization
Author: Chip Huyen
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
cs20si.stanford.edu
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
# Example 1: session is the thing that keeps track of random state
c = tf.random_uniform([], -10, 10, seed=2)
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
print(sess.run(c)) # >> -5.97319
# Example 2: each new session will start the random state all over again.
c = tf.random_uniform([], -10, 10, seed=2)
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
# Example 3: with operation level random seed, each op keeps its own seed.
c = tf.random_uniform([], -10, 10, seed=2)
d = tf.random_uniform([], -10, 10, seed=2)
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
print(sess.run(d)) # >> 3.57493
# Example 4: graph level random seed
tf.set_random_seed(2)
c = tf.random_uniform([], -10, 10)
d = tf.random_uniform([], -10, 10)
with tf.Session() as sess:
print(sess.run(c)) # >> 9.12393
print(sess.run(d)) # >> -4.53404
| 27.813953
| 74
| 0.664716
| 203
| 1,196
| 3.857143
| 0.35468
| 0.091954
| 0.122605
| 0.130268
| 0.444444
| 0.444444
| 0.392082
| 0.392082
| 0.360153
| 0.319285
| 0
| 0.08731
| 0.176421
| 1,196
| 43
| 75
| 27.813953
| 0.707614
| 0.413043
| 0
| 0.695652
| 0
| 0
| 0.030702
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.086957
| 0.347826
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a91c8f71ed1bbfb503d86a5235097fd88dfae4a
| 5,651
|
py
|
Python
|
python-CSDN博客爬虫/CSDN_article/utils/myutils.py
|
wangchuanli001/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 12
|
2019-12-07T01:44:55.000Z
|
2022-01-27T14:13:30.000Z
|
python-CSDN博客爬虫/CSDN_article/utils/myutils.py
|
hujiese/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 23
|
2020-05-23T03:56:33.000Z
|
2022-02-28T07:54:45.000Z
|
python-CSDN博客爬虫/CSDN_article/utils/myutils.py
|
hujiese/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 7
|
2019-12-20T04:48:56.000Z
|
2021-11-19T02:23:45.000Z
|
# -*- coding: utf-8 -*-
'''
通用工具类
'''
import time
import MySQLdb
import jieba
import ast
import random, sys
# 日志类
import requests
sys.setrecursionlimit(1000000)
class Logger(object):
def __init__(self, filename='default.log', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'a', encoding='utf-8')
# def print(self, message):
# self.terminal.write(message + "\n")
# self.log.write(message.encode('utf-8') + b"\n")
# def flush(self):
# self.terminal.flush()
# self.log.flush()
# def close(self):
# self.log.close()
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
# 获得本地文件代理
def getproxyip(ip_file):
fo = open(ip_file, 'r', encoding='utf-8')
proxys = fo.read().split('\n')
proxy = ast.literal_eval(random.choice(proxys))
# print(proxy)
fo.close()
return proxy
# 随机请求头
def randomheader():
user_agents = [
"Mozilla/5.0 (Windows NT 10.0; WOW64)", 'Mozilla/5.0 (Windows NT 6.3; WOW64)',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729;\
.NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',
'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727;\
.NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000\
Chrome/26.0.1410.43 Safari/537.1 ',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2;\
.NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729;\
Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) \
Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) \
Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11'
]
user_agent = random.choice(user_agents)
headers = {
'User-Agent': user_agent,
'Connection': 'close',
}
return headers
'''
58.218.205.40:7754
221.229.196.234:6987
58.218.205.51:7038
58.218.205.57:2513
58.218.205.55:7817
58.218.205.52:5109
'''
# ip代理设置列表
ip_port = ["180.97.250.157:5147", "58.218.205.39:7893", "180.97.250.158:4107", "221.229.196.212:9311",
"221.229.196.212:6066", "221.229.196.192:6545",
"221.229.196.231:9975", "221.229.196.212:4953", "221.229.196.192:2133"]
# 代理服务器 阿布云
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
# 代理隧道验证信息
proxyUser = "HP48W550C1X873PD"
proxyPass = "FED1B0BB31CE94A3"
proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
"host": proxyHost,
"port": proxyPort,
"user": proxyUser,
"pass": proxyPass,
}
# 爬虫
def spider(url, times=0):
try:
proxies = {
"http": proxyMeta,
"https": proxyMeta,
}
# proxies = {
# "https": random.choices(port_list)[0]
# }
requests.packages.urllib3.disable_warnings()
# response = requests.get(url, headers=randomheader(), proxies=proxies, timeout=20, verify=False) # 使用代理ip
response = requests.get(url, headers=randomheader(), timeout=20, verify=False)# 不使用代理ip
requests.adapters.DEFAULT_RETRIES = 5
s = requests.session()
s.keep_alive = False
return response
except Exception as e:
times += 1
print("爬虫异常:" + url + "原因-:" + str(e))
if times > 6:
return ""
time.sleep(random.randint(0, 9))
print("重新爬取:" + str(times) + "===" + url)
spider(url, times)
# 数据库更新语句执行操作
def sql_opt(databse, sql):
db = MySQLdb.connect("localhost", "root", "123456789", databse, charset='utf8')
cursor = db.cursor()
try:
cursor.execute(sql)
db.commit()
except Exception as e:
print("sql_opt语句执行异常" + str(e) + "\n" + sql)
db.rollback()
db.close()
if __name__ == '__main__':
print("test")
fo = open("proxy_ip.txt", 'r', encoding='utf-8')
port_list = fo.read().split("\n")
fo.close()
proxies = {
"https": random.choices(port_list)[0],
}
print(proxies)
| 33.838323
| 121
| 0.599186
| 865
| 5,651
| 3.880925
| 0.294798
| 0.053619
| 0.048257
| 0.066726
| 0.361632
| 0.342568
| 0.297289
| 0.267501
| 0.221329
| 0.184391
| 0
| 0.15956
| 0.228101
| 5,651
| 166
| 122
| 34.042169
| 0.610041
| 0.087064
| 0
| 0.072072
| 0
| 0.198198
| 0.307815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063063
| false
| 0.036036
| 0.054054
| 0
| 0.162162
| 0.045045
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a96f177bdadd6a1d79e415e623de1950e19535a
| 17,315
|
py
|
Python
|
build/fbcode_builder/getdeps/cargo.py
|
dmitryvinn/watchman
|
668d3536031acd9b65950c29d6e956bb42b972bb
|
[
"MIT"
] | null | null | null |
build/fbcode_builder/getdeps/cargo.py
|
dmitryvinn/watchman
|
668d3536031acd9b65950c29d6e956bb42b972bb
|
[
"MIT"
] | null | null | null |
build/fbcode_builder/getdeps/cargo.py
|
dmitryvinn/watchman
|
668d3536031acd9b65950c29d6e956bb42b972bb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import shutil
from .builder import BuilderBase
class CargoBuilder(BuilderBase):
def __init__(
self,
build_opts,
ctx,
manifest,
src_dir,
build_dir,
inst_dir,
build_doc,
workspace_dir,
manifests_to_build,
loader,
cargo_config_file,
) -> None:
super(CargoBuilder, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
self.build_doc = build_doc
self.ws_dir = workspace_dir
self.manifests_to_build = manifests_to_build and manifests_to_build.split(",")
self.loader = loader
self.cargo_config_file_subdir = cargo_config_file
def run_cargo(self, install_dirs, operation, args=None) -> None:
args = args or []
env = self._compute_env(install_dirs)
# Enable using nightly features with stable compiler
env["RUSTC_BOOTSTRAP"] = "1"
env["LIBZ_SYS_STATIC"] = "1"
cmd = [
"cargo",
operation,
"--workspace",
"-j%s" % self.num_jobs,
] + args
self._run_cmd(cmd, cwd=self.workspace_dir(), env=env)
def build_source_dir(self):
return os.path.join(self.build_dir, "source")
def workspace_dir(self):
return os.path.join(self.build_source_dir(), self.ws_dir or "")
def manifest_dir(self, manifest):
return os.path.join(self.build_source_dir(), manifest)
def recreate_dir(self, src, dst) -> None:
if os.path.isdir(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
def cargo_config_file(self):
build_source_dir = self.build_dir
if self.cargo_config_file_subdir:
return os.path.join(build_source_dir, self.cargo_config_file_subdir)
else:
return os.path.join(build_source_dir, ".cargo", "config")
def _create_cargo_config(self):
cargo_config_file = self.cargo_config_file()
cargo_config_dir = os.path.dirname(cargo_config_file)
if not os.path.isdir(cargo_config_dir):
os.mkdir(cargo_config_dir)
print(f"Writing cargo config for {self.manifest.name} to {cargo_config_file}")
with open(cargo_config_file, "w+") as f:
f.write(
"""\
# Generated by getdeps.py
[build]
target-dir = '''{}'''
[net]
git-fetch-with-cli = true
[profile.dev]
debug = false
incremental = false
""".format(
self.build_dir.replace("\\", "\\\\")
)
)
# Point to vendored sources from getdeps manifests
dep_to_git = self._resolve_dep_to_git()
for _dep, git_conf in dep_to_git.items():
if "cargo_vendored_sources" in git_conf:
with open(cargo_config_file, "a") as f:
vendored_dir = git_conf["cargo_vendored_sources"].replace(
"\\", "\\\\"
)
f.write(
f"""
[source."{git_conf["repo_url"]}"]
directory = "{vendored_dir}"
"""
)
# Point to vendored crates.io if possible
try:
from .facebook.rust import vendored_crates
vendored_crates(self.build_opts, cargo_config_file)
except ImportError:
# This FB internal module isn't shippped to github,
# so just rely on cargo downloading crates on it's own
pass
return dep_to_git
def _prepare(self, install_dirs, reconfigure):
build_source_dir = self.build_source_dir()
self.recreate_dir(self.src_dir, build_source_dir)
dep_to_git = self._create_cargo_config()
if self.ws_dir is not None:
self._patchup_workspace(dep_to_git)
def _build(self, install_dirs, reconfigure) -> None:
# _prepare has been run already. Actually do the build
build_source_dir = self.build_source_dir()
if self.manifests_to_build is None:
self.run_cargo(
install_dirs,
"build",
["--out-dir", os.path.join(self.inst_dir, "bin"), "-Zunstable-options"],
)
else:
for manifest in self.manifests_to_build:
self.run_cargo(
install_dirs,
"build",
[
"--out-dir",
os.path.join(self.inst_dir, "bin"),
"-Zunstable-options",
"--manifest-path",
self.manifest_dir(manifest),
],
)
self.recreate_dir(build_source_dir, os.path.join(self.inst_dir, "source"))
def run_tests(
self, install_dirs, schedule_type, owner, test_filter, retry, no_testpilot
) -> None:
if test_filter:
args = ["--", test_filter]
else:
args = []
if self.manifests_to_build is None:
self.run_cargo(install_dirs, "test", args)
if self.build_doc:
self.run_cargo(install_dirs, "doc", ["--no-deps"])
else:
for manifest in self.manifests_to_build:
margs = ["--manifest-path", self.manifest_dir(manifest)]
self.run_cargo(install_dirs, "test", args + margs)
if self.build_doc:
self.run_cargo(install_dirs, "doc", ["--no-deps"] + margs)
def _patchup_workspace(self, dep_to_git) -> None:
"""
This method makes some assumptions about the state of the project and
its cargo dependendies:
1. Crates from cargo dependencies can be extracted from Cargo.toml files
using _extract_crates function. It is using a heuristic so check its
code to understand how it is done.
2. The extracted cargo dependencies crates can be found in the
dependency's install dir using _resolve_crate_to_path function
which again is using a heuristic.
Notice that many things might go wrong here. E.g. if someone depends
on another getdeps crate by writing in their Cargo.toml file:
my-rename-of-crate = { package = "crate", git = "..." }
they can count themselves lucky because the code will raise an
Exception. There migh be more cases where the code will silently pass
producing bad results.
"""
workspace_dir = self.workspace_dir()
config = self._resolve_config(dep_to_git)
if config:
patch_cargo = os.path.join(workspace_dir, "Cargo.toml")
print(f"writing patch to {patch_cargo}")
with open(patch_cargo, "r+") as f:
manifest_content = f.read()
if "[package]" not in manifest_content:
# A fake manifest has to be crated to change the virtual
# manifest into a non-virtual. The virtual manifests are limited
# in many ways and the inability to define patches on them is
# one. Check https://github.com/rust-lang/cargo/issues/4934 to
# see if it is resolved.
null_file = "/dev/null"
if self.build_opts.is_windows():
null_file = "nul"
f.write(
f"""
[package]
name = "fake_manifest_of_{self.manifest.name}"
version = "0.0.0"
[lib]
path = "{null_file}"
"""
)
else:
f.write("\n")
f.write(config)
def _resolve_config(self, dep_to_git) -> str:
"""
Returns a configuration to be put inside root Cargo.toml file which
patches the dependencies git code with local getdeps versions.
See https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section
"""
dep_to_crates = self._resolve_dep_to_crates(self.build_source_dir(), dep_to_git)
config = []
git_url_to_crates_and_paths = {}
for dep_name in sorted(dep_to_git.keys()):
git_conf = dep_to_git[dep_name]
req_crates = sorted(dep_to_crates.get(dep_name, []))
if not req_crates:
continue # nothing to patch, move along
git_url = git_conf.get("repo_url", None)
crate_source_map = git_conf["crate_source_map"]
if git_url and crate_source_map:
crates_to_patch_path = git_url_to_crates_and_paths.get(git_url, {})
for c in req_crates:
if c in crate_source_map and c not in crates_to_patch_path:
crates_to_patch_path[c] = crate_source_map[c]
print(
f"{self.manifest.name}: Patching crate {c} via virtual manifest in {self.workspace_dir()}"
)
if crates_to_patch_path:
git_url_to_crates_and_paths[git_url] = crates_to_patch_path
for git_url, crates_to_patch_path in git_url_to_crates_and_paths.items():
crates_patches = [
'{} = {{ path = "{}" }}'.format(
crate,
crates_to_patch_path[crate].replace("\\", "\\\\"),
)
for crate in sorted(crates_to_patch_path.keys())
]
config.append(f'\n[patch."{git_url}"]\n' + "\n".join(crates_patches))
return "\n".join(config)
def _resolve_dep_to_git(self):
"""
For each direct dependency of the currently build manifest check if it
is also cargo-builded and if yes then extract it's git configs and
install dir
"""
dependencies = self.manifest.get_dependencies(self.ctx)
if not dependencies:
return []
dep_to_git = {}
for dep in dependencies:
dep_manifest = self.loader.load_manifest(dep)
dep_builder = dep_manifest.get("build", "builder", ctx=self.ctx)
dep_cargo_conf = dep_manifest.get_section_as_dict("cargo", self.ctx)
dep_crate_map = dep_manifest.get_section_as_dict("crate.pathmap", self.ctx)
if (
not (dep_crate_map or dep_cargo_conf)
and dep_builder not in ["cargo"]
or dep == "rust"
):
# This dependency has no cargo rust content so ignore it.
# The "rust" dependency is an exception since it contains the
# toolchain.
continue
git_conf = dep_manifest.get_section_as_dict("git", self.ctx)
if dep != "rust" and "repo_url" not in git_conf:
raise Exception(
f"{dep}: A cargo dependency requires git.repo_url to be defined."
)
if dep_builder == "cargo":
dep_source_dir = self.loader.get_project_install_dir(dep_manifest)
dep_source_dir = os.path.join(dep_source_dir, "source")
else:
fetcher = self.loader.create_fetcher(dep_manifest)
dep_source_dir = fetcher.get_src_dir()
crate_source_map = {}
if dep_crate_map:
for (crate, subpath) in dep_crate_map.items():
if crate not in crate_source_map:
if self.build_opts.is_windows():
subpath = subpath.replace("/", "\\")
crate_path = os.path.join(dep_source_dir, subpath)
print(
f"{self.manifest.name}: Mapped crate {crate} to dep {dep} dir {crate_path}"
)
crate_source_map[crate] = crate_path
elif dep_cargo_conf:
# We don't know what crates are defined buy the dep, look for them
search_pattern = re.compile('\\[package\\]\nname = "(.*)"')
for crate_root, _, files in os.walk(dep_source_dir):
if "Cargo.toml" in files:
with open(os.path.join(crate_root, "Cargo.toml"), "r") as f:
content = f.read()
match = search_pattern.search(content)
if match:
crate = match.group(1)
if crate:
print(
f"{self.manifest.name}: Discovered crate {crate} in dep {dep} dir {crate_root}"
)
crate_source_map[crate] = crate_root
git_conf["crate_source_map"] = crate_source_map
if not dep_crate_map and dep_cargo_conf:
dep_cargo_dir = self.loader.get_project_build_dir(dep_manifest)
dep_cargo_dir = os.path.join(dep_cargo_dir, "source")
dep_ws_dir = dep_cargo_conf.get("workspace_dir", None)
if dep_ws_dir:
dep_cargo_dir = os.path.join(dep_cargo_dir, dep_ws_dir)
git_conf["cargo_vendored_sources"] = dep_cargo_dir
dep_to_git[dep] = git_conf
return dep_to_git
def _resolve_dep_to_crates(self, build_source_dir, dep_to_git):
"""
This function traverse the build_source_dir in search of Cargo.toml
files, extracts the crate names from them using _extract_crates
function and returns a merged result containing crate names per
dependency name from all Cargo.toml files in the project.
"""
if not dep_to_git:
return {} # no deps, so don't waste time traversing files
dep_to_crates = {}
# First populate explicit crate paths from depedencies
for name, git_conf in dep_to_git.items():
crates = git_conf["crate_source_map"].keys()
if crates:
dep_to_crates.setdefault(name, set()).update(crates)
# Now find from Cargo.tomls
for root, _, files in os.walk(build_source_dir):
for f in files:
if f == "Cargo.toml":
more_dep_to_crates = CargoBuilder._extract_crates_used(
os.path.join(root, f), dep_to_git
)
for dep_name, crates in more_dep_to_crates.items():
existing_crates = dep_to_crates.get(dep_name, set())
for c in crates:
if c not in existing_crates:
print(
f"Patch {self.manifest.name} uses {dep_name} crate {crates}"
)
existing_crates.insert(c)
dep_to_crates.setdefault(name, set()).update(existing_crates)
return dep_to_crates
@staticmethod
def _extract_crates_used(cargo_toml_file, dep_to_git):
"""
This functions reads content of provided cargo toml file and extracts
crate names per each dependency. The extraction is done by a heuristic
so it might be incorrect.
"""
deps_to_crates = {}
with open(cargo_toml_file, "r") as f:
for line in f.readlines():
if line.startswith("#") or "git = " not in line:
continue # filter out commented lines and ones without git deps
for dep_name, conf in dep_to_git.items():
# Only redirect deps that point to git URLS
if 'git = "{}"'.format(conf["repo_url"]) in line:
pkg_template = ' package = "'
if pkg_template in line:
crate_name, _, _ = line.partition(pkg_template)[
2
].partition('"')
else:
crate_name, _, _ = line.partition("=")
deps_to_crates.setdefault(dep_name, set()).add(
crate_name.strip()
)
return deps_to_crates
def _resolve_crate_to_path(self, crate, crate_source_map):
"""
Tries to find <crate> in source_dir by searching a [package]
keyword followed by name = "<crate>".
"""
search_pattern = '[package]\nname = "{}"'.format(crate)
for (_crate, crate_source_dir) in crate_source_map.items():
for crate_root, _, files in os.walk(crate_source_dir):
if "Cargo.toml" in files:
with open(os.path.join(crate_root, "Cargo.toml"), "r") as f:
content = f.read()
if search_pattern in content:
return crate_root
raise Exception(
f"{self.manifest.name}: Failed to find dep crate {crate} in paths {crate_source_map}"
)
| 40.081019
| 119
| 0.551372
| 2,068
| 17,315
| 4.355899
| 0.185203
| 0.018317
| 0.019538
| 0.015098
| 0.264987
| 0.200599
| 0.161523
| 0.107682
| 0.087589
| 0.080484
| 0
| 0.001265
| 0.360785
| 17,315
| 431
| 120
| 40.174014
| 0.812613
| 0.165348
| 0
| 0.161184
| 0
| 0.009868
| 0.09808
| 0.019129
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055921
| false
| 0.003289
| 0.019737
| 0.009868
| 0.121711
| 0.019737
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a98425fabf2f4efae0310710f9d76f3fbba768a
| 3,995
|
py
|
Python
|
donn/layers.py
|
sharan-amutharasu/DONN
|
c14557e8ef57f3e1c1b73c1fa98cb6ba19a82904
|
[
"MIT"
] | 3
|
2018-08-17T05:31:25.000Z
|
2020-02-13T19:43:02.000Z
|
tests/donn/layers.py
|
sharan-amutharasu/DONN
|
c14557e8ef57f3e1c1b73c1fa98cb6ba19a82904
|
[
"MIT"
] | 1
|
2018-11-19T06:16:50.000Z
|
2018-11-19T06:17:53.000Z
|
tests/donn/layers.py
|
sharan-amutharasu/DONN
|
c14557e8ef57f3e1c1b73c1fa98cb6ba19a82904
|
[
"MIT"
] | 2
|
2018-12-06T05:01:07.000Z
|
2018-12-06T11:59:47.000Z
|
# coding: utf-8
# In[4]:
from keras.layers import Activation, Dense, Dropout
from keras.layers.advanced_activations import LeakyReLU, PReLU, ThresholdedReLU, ELU
from keras import regularizers
# In[5]:
def get_activation_layer(activation):
"""
Returns the activation layer given its name
"""
if activation == 'ELU':
return ELU()
if activation == 'LeakyReLU':
return LeakyReLU()
if activation == 'ThresholdedReLU':
return ThresholdedReLU()
if activation == 'PReLU':
return PReLU()
return Activation(activation)
# In[4]:
class Layer(object):
"""
Layer object for adding different types of layers to the model
"""
def __init__(self, layer_type):
self.layer_type = layer_type
if self.layer_type in ["hidden", "input", "output"]:
self.kernel_initializer='normal'
self.kernel_regularizer=regularizers.l2(0.01)
def add_to_model(self, model, params, count, input_dim=None, output_layer_units=None, mode=None, layers=None):
"""
Add layer to model
"""
## Input Layer
if self.layer_type == "input":
units = params[str(self.layer_type + "_layer_" + str(count) + "_units")]
if input_dim is not None:
model.add(Dense(units, input_dim=input_dim, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
else:
model.add(Dense(units, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
return model
## Hidden Layer
if self.layer_type == "hidden":
units = params[str(self.layer_type + "_layer_" + str(count) + "_units")]
if input_dim is not None:
model.add(Dense(units, input_dim=input_dim, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
else:
model.add(Dense(units, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer))
return model
## Activation Layer
if self.layer_type == "activation":
model.add(get_activation_layer(params["activation_function"]))
return model
## Dropout Layer
if self.layer_type == "dropout":
dropout_rate = params["dropout_rate"]
if dropout_rate > 0:
model.add(Dropout(dropout_rate))
return model
## Output Layer
if self.layer_type == "output":
if mode == "classifier":
model.add(Dense(output_layer_units, kernel_initializer=self.kernel_initializer))
try:
if params["output_activation_function"] != None:
model.add(get_activation_layer(params["output_activation_function"]))
except KeyError:
pass
elif mode == "regressor":
model.add(Dense(output_layer_units, kernel_initializer=self.kernel_initializer))
else:
raise ValueError("mode has to be 'regressor' or 'classifier'")
return model
## LSTM Layer
# if self.layer_type == "LSTM":
# units = params[str(self.layer_type + "_layer_" + str(count) + "_units")]
# count_LSTM = layers.count("LSTM")
# if count < count_LSTM:
# return_sequences = True
# else:
# return_sequences = False
# if input_dim is not None:
# model.add(LSTM(units, input_dim=input_dim, recurrent_activation=params["LSTM_recurrent_activation_function"], return_sequences=return_sequences))
# else:
# model.add(LSTM(units, recurrent_activation=params["LSTM_recurrent_activation_function"], return_sequences=return_sequences))
# return model
| 37.336449
| 163
| 0.606758
| 429
| 3,995
| 5.421911
| 0.191142
| 0.050301
| 0.067068
| 0.045142
| 0.493981
| 0.433362
| 0.405847
| 0.405847
| 0.394239
| 0.394239
| 0
| 0.003197
| 0.295369
| 3,995
| 106
| 164
| 37.688679
| 0.823091
| 0.23229
| 0
| 0.327273
| 0
| 0
| 0.086796
| 0.017426
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0.018182
| 0.054545
| 0
| 0.309091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a9b8204ffb1f187d8be96695d1cf97c47ce3c0a
| 3,618
|
py
|
Python
|
tournament.py
|
karol-prz/predictor
|
2774fe2a88a9bf5f7aa58f884cdcf879182c64c7
|
[
"MIT"
] | null | null | null |
tournament.py
|
karol-prz/predictor
|
2774fe2a88a9bf5f7aa58f884cdcf879182c64c7
|
[
"MIT"
] | null | null | null |
tournament.py
|
karol-prz/predictor
|
2774fe2a88a9bf5f7aa58f884cdcf879182c64c7
|
[
"MIT"
] | null | null | null |
class Tournament:
def __init__(self):
# Dictionary of games played, scored, conceded, gd, points
self.tables = {'A': {}, 'B': {}, 'C': {}, 'D': {}, 'E': {}, 'F': {}, 'G': {}, 'H':{}}
self.groups_finished = False
self.records = {}
self.references = {}
from parsers.utils import read_json
self.r = read_json('/home/karol/python/predictor/data/matches')
def update_match(self, group, home_team, away_team, home_score, away_score, game, result, date):
self.r.append({
"away_score": away_score,
"away_team": away_team,
"date": date,
"home_score": home_score,
"home_team": home_team
})
if not self.groups_finished:
self.update_group_match(group, home_team, away_team, home_score, away_score)
else:
if result == 'W':
self.records['W'+game] = home_team
self.records['L'+game] = away_team
elif result == 'L':
self.records['L'+game] = home_team
self.records['W'+game] = away_team
def get_reference(self, key):
if key[:1] not in ['W', 'L']:
return self.references[key]
else:
return self.records[key]
def update_group_match(self, group, home_team, away_team, home_score, away_score):
group = group.split(' ')[1]
table = self.tables[group]
home_score = int(home_score)
away_score = int(away_score)
home_points = 0
away_points = 0
if home_score > away_score:
home_points = 3
away_points = 0
elif away_score > home_score:
home_points = 0
away_points = 3
else:
home_points = 1
away_points = 1
d = {
home_team: [home_score, away_score, home_points],
away_team: [away_score, home_score, away_points]
}
# Check if teams are present
for i in [home_team, away_team]:
if i not in table:
table[i] = [0, 0, 0, 0, 0, 0]
table[i][0] += 1
table[i][1] += d[i][0]
table[i][2] += d[i][1]
table[i][3] += d[i][0] - d[i][1]
table[i][4] += d[i][2]
self.tables[group] = table
self.check_finished()
def check_finished(self):
for i in self.tables:
table = self.tables[i]
for j in table:
team = table[j]
if team[0] != 3:
return
self.groups_finished = True
for i in self.tables:
table = self.tables[i]
print(table)
table = self.sort_group(table)
keys = list(table)
one = ''
two = ''
for item in table:
team = table[item]
if team[5] == 1:
one = item
elif team[5] == 2:
two = item
self.references['1'+ i] = one
self.references['2'+ i] = two
from pprint import pprint
pprint(self.tables)
pprint(self.references)
def sort_group(self, table):
sorted = 1
keys = list(table)
print(table)
for i in range(len(table)):
highest = None
highest_index = None
for j in range(len(table)):
current = table[keys[j]]
print(current)
if current[5] != 0:
continue
if highest_index == None and highest == None:
highest_index = j
highest = current
if current[4] > highest[4] :
current = highest
highest_index = j
elif current[4] == highest[4]:
if current[3] > highest[3]:
current = highest
highest_index = j
elif current[3] == highest[3]:
if current[1] > highest[1]:
current = highest
highest_index = j
print (keys[highest_index])
table[keys[highest_index]][5] = sorted
sorted += 1
return table
def get_form(self, country, date):
from parsers.match_parser import get_form
return get_form(country, date, self.r)
def get_h2h(self, country1, country2, date):
from parsers.match_parser import get_h2h
return get_h2h(country1, country2, date, self.r)
| 22.060976
| 97
| 0.622443
| 538
| 3,618
| 4.027881
| 0.187732
| 0.045685
| 0.041994
| 0.049838
| 0.255653
| 0.209045
| 0.159206
| 0.091832
| 0.091832
| 0.044301
| 0
| 0.020645
| 0.236871
| 3,618
| 163
| 98
| 22.196319
| 0.764216
| 0.022941
| 0
| 0.180328
| 0
| 0
| 0.028961
| 0.011641
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.032787
| 0
| 0.155738
| 0.057377
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a9f119bf4f058c5f85a03cbf6f4da2b349b8dd5
| 1,604
|
py
|
Python
|
data/ABC/filter_out_tiny_models.py
|
YoungXIAO13/6DPoseEstimationDatasets
|
b9cb1d9842870860a15bf3cf600cdfb68d1e195e
|
[
"MIT"
] | 383
|
2019-09-03T15:29:22.000Z
|
2022-03-28T02:01:15.000Z
|
data/ABC/filter_out_tiny_models.py
|
Fang-Haoshu/ObjectPoseEstimationSummary
|
2a11797e6b01e1820105740fcaeb7c049094c57f
|
[
"MIT"
] | 5
|
2019-10-18T13:04:07.000Z
|
2021-09-29T05:26:52.000Z
|
data/ABC/filter_out_tiny_models.py
|
Fang-Haoshu/ObjectPoseEstimationSummary
|
2a11797e6b01e1820105740fcaeb7c049094c57f
|
[
"MIT"
] | 63
|
2019-09-17T12:13:51.000Z
|
2022-03-28T03:06:05.000Z
|
import os
from os.path import join, getsize
from PIL import Image
from tqdm import tqdm
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=str, help='dataset directory')
parser.add_argument('--model', type=str, default='abc_0000', help='subdirectory containing obj files')
parser.add_argument('--views', type=str, default='multiviews', help='subdirectory containing multiviews')
args = parser.parse_args()
obj_dir = join(args.dataset_dir, args.model)
view_dir = join(args.dataset_dir, args.views)
model_names = sorted(os.listdir(view_dir))
csv_file = join(args.dataset_dir, '{}.txt'.format(args.model))
with open(csv_file, 'w') as f:
f.write('model_name,size,ratio_min,ratio_max,occupy_min,occupy_max\n')
for model_name in tqdm(model_names):
size = int(getsize(join(obj_dir, '{}.obj'.format(model_name))) / (2 ** 20))
img_dir = join(view_dir, model_name, 'nocs')
images = os.listdir(img_dir)
ratio = []
occupy = []
for img in images:
try:
rgb = Image.open(join(img_dir, img))
w, h = rgb.size
left, upper, right, lower = rgb.getbbox()
ratio.append((lower - upper) / (right - left))
occupy.append(np.sum(np.array(rgb.convert('L')) != 0) / (w * h))
except TypeError:
ratio.append(0)
occupy.append(0)
with open(csv_file, 'a') as f:
f.write(model_name + ',' + str(size) + ',' + str(np.min(ratio)) + ',' + str(np.max(ratio)) + ',' +
str(np.min(occupy)) + ',' + str(np.max(occupy)) + '\n')
| 39.121951
| 106
| 0.639027
| 231
| 1,604
| 4.30303
| 0.34632
| 0.045272
| 0.051308
| 0.054326
| 0.086519
| 0.086519
| 0
| 0
| 0
| 0
| 0
| 0.007764
| 0.197007
| 1,604
| 40
| 107
| 40.1
| 0.763975
| 0
| 0
| 0
| 0
| 0
| 0.133416
| 0.036783
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aa10622900b7fd3873b3fb7ab47170cdb7c2440
| 2,959
|
py
|
Python
|
assignments/06-python-first-lines/first_lines.py
|
patarajarina/biosys-analytics
|
a5e8845211797364ec6f7f8679911ed3b5312887
|
[
"MIT"
] | null | null | null |
assignments/06-python-first-lines/first_lines.py
|
patarajarina/biosys-analytics
|
a5e8845211797364ec6f7f8679911ed3b5312887
|
[
"MIT"
] | null | null | null |
assignments/06-python-first-lines/first_lines.py
|
patarajarina/biosys-analytics
|
a5e8845211797364ec6f7f8679911ed3b5312887
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Author : patarajarina
Date : 2019-02-25
Purpose: Rock the Casbah
"""
import argparse
import sys
import os
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'positional', metavar='DIR', help='A positional argument', nargs='+')
# parser.add_argument(
# 'DIR',
# '--',
# help='A named string argument',
# metavar='DIR',
# type=dir,
# default=None,
# nargs='+',
# default='')
parser.add_argument(
'-w',
'--width',
help='A named integer argument',
metavar='int',
type=int,
default=50)
# parser.add_argument(
# '-f', '--flag', help='A boolean flag', action='store_true')
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
# args = sys.argv[1:]
# str_arg = args.arg
DIRS = args.positional
# flag_arg = args.flag
width = args.width
# if not os.path.isdir(DIRS):
# print('"{}" is not a directory'.format(dirname), file=sys.stderr)
# print(DIRS)
# dirname = args[0] #check
for dirname in DIRS:
if not dirname[-1:] == '/':
dirname = dirname + '/'
if not os.path.isdir(dirname):
if dirname[-1:] == '/':
dirname = dirname[:-1]
print('"{}" is not a directory'.format(dirname), file=sys.stderr)
else:
#if len(DIRS)>1:
print(dirname[:-1])
# for tup in dirname.items():
# print(tup)
out = {}
for eachfile in os.listdir(dirname):
#print(eachfile)
f = open(dirname + eachfile, "r")
firstline = f.readline()
firstline=firstline.strip()
out[firstline]=eachfile
#print(out)
for keyline, valfile in sorted(out.items()):
leftlen = width - len(keyline) - len(valfile)
dots ='.'
for i in range(1,leftlen):
dots = dots+'.'
#print(len(dots+keyline+valfile))
print('{} {} {}'.format(keyline, dots,valfile))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 26.9
| 82
| 0.460967
| 285
| 2,959
| 4.719298
| 0.403509
| 0.026766
| 0.050558
| 0.016357
| 0.092193
| 0.068401
| 0.068401
| 0.068401
| 0.068401
| 0.068401
| 0
| 0.009965
| 0.32173
| 2,959
| 109
| 83
| 27.146789
| 0.660189
| 0.370057
| 0
| 0.040816
| 0
| 0
| 0.088594
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.061224
| 0
| 0.163265
| 0.081633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aa3c3abf98c6d1ad3b59e984112889aa463ffaf
| 4,251
|
py
|
Python
|
inocybe_dhcp/rfc2131.py
|
kot-begemot-uk/opx-dhcp
|
683c7c52f19eedc57196403213c9695ac3439526
|
[
"Apache-2.0"
] | null | null | null |
inocybe_dhcp/rfc2131.py
|
kot-begemot-uk/opx-dhcp
|
683c7c52f19eedc57196403213c9695ac3439526
|
[
"Apache-2.0"
] | null | null | null |
inocybe_dhcp/rfc2131.py
|
kot-begemot-uk/opx-dhcp
|
683c7c52f19eedc57196403213c9695ac3439526
|
[
"Apache-2.0"
] | 2
|
2018-09-05T07:59:21.000Z
|
2018-09-14T07:15:17.000Z
|
#!/usr/bin/env python3
'''RFC 2131 DHCP message structures.'''
# Copyright (c) 2018 Inocybe Technologies.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT
# LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS
# FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
from six import add_metaclass
from .types import (
StructuredValue,
UInt8, UInt16, UInt32, IPv4,
HexString, NulTerminatedString,
)
from .rfc2132 import Cookie, Options
from .options import Supported
@add_metaclass(StructuredValue)
class Message(object):
'''A class representing a RFC 2131 DHCP message.
Each instance is a :class:`dict` instance restricted to the pairs specified in :attr:`spec`:
attempting to set a pair at a key not in :attr:`spec` is rejected with :class:`KeyError`;
attempting to set a pair with a value which is not supported by that pair's value type is
rejected with :class:`ValueError` or :class:`TypeError`.
An instance of this class may be created as per :class:`dict`, or by calling classmethod
:meth:`unpack` with a binary string, encoded as per RFC 2131. To serialise an instance to a
binary string, call :meth:`pack`.
If a new value is set at 'hlen' or 'chaddr' then call :meth:`truncate_chaddr` to ensure that
the encoded value of 'chaddr' does not exceed 'hlen' octets.
'''
name = 'RFC 2131 DHCP message'
### :attr:`spec` is a sequence of (key, value type) pairs
spec = (
('op', UInt8(1, 2)),
('htype', UInt8()),
('hlen', UInt8(1, 16)),
('hops', UInt8()),
('xid', UInt32()),
('secs', UInt16()),
('flags', UInt16()),
('ciaddr', IPv4()),
('yiaddr', IPv4()),
('siaddr', IPv4()),
('giaddr', IPv4()),
('chaddr', HexString(16)),
('sname', NulTerminatedString(64)),
('file', NulTerminatedString(128)),
('cookie', Cookie()),
('options', Options()),
)
def __init__(self):
self.truncate_chaddr()
def truncate_chaddr(self):
'''If this instance's 'chaddr' is too long to be encoded in 'hlen' octets then truncate the
value of 'chaddr' so that it can be encoded in 'hlen' octets. If this instance does not
have a value for 'chaddr' or 'hlen' then do nothing.
'''
### pylint: disable=unsubscriptable-object
try:
self['chaddr'] = self.fields['chaddr'].truncate(self['chaddr'], self['hlen']) ### pylint: disable=no-member
except KeyError:
pass
def decode_options(self, supported=None):
'''Return a plain :class:`dict` copy of `self`, with 'options' decoded using `supported`. If
`supported` is None, then decode options as TLV.
'''
if supported is None:
### use an empty set of supported options to decode as TLV
supported = Supported()
copy = dict(self)
copy['options'] = supported.decode(self['options']) ### pylint: disable=unsubscriptable-object
return copy
def encode_options(self, options, supported=None, append=False):
'''Set this instance's 'options' from `options` encoded using `supported`. If `supported` is
None, then encode options from TLV. If `append` is True, then append encoded `options` to
the existing 'options' rather than replacing them.
'''
if supported is None:
### use an empty set of supported options to encode from TLV
supported = Supported()
encoded = tuple(supported.encode(options))
if append:
self['options'] += encoded ### pylint: disable=unsubscriptable-object
else:
self['options'] = encoded ### pylint: disable=unsubscriptable-object
| 42.51
| 119
| 0.639379
| 545
| 4,251
| 4.966972
| 0.357798
| 0.024012
| 0.041374
| 0.05024
| 0.131511
| 0.101219
| 0.101219
| 0.036941
| 0.036941
| 0.036941
| 0
| 0.019706
| 0.247942
| 4,251
| 99
| 120
| 42.939394
| 0.827025
| 0.559868
| 0
| 0.08
| 0
| 0
| 0.09047
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0.02
| 0.08
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aa5d9d21b6bad4cb5b8740e530181d78e841342
| 1,883
|
py
|
Python
|
src/data/get_raw_data.py
|
vivek1739/titanic
|
39058f7ecef3ae0e1962fc1dfc550b654e97e1f0
|
[
"MIT"
] | null | null | null |
src/data/get_raw_data.py
|
vivek1739/titanic
|
39058f7ecef3ae0e1962fc1dfc550b654e97e1f0
|
[
"MIT"
] | null | null | null |
src/data/get_raw_data.py
|
vivek1739/titanic
|
39058f7ecef3ae0e1962fc1dfc550b654e97e1f0
|
[
"MIT"
] | null | null | null |
# encoding utf-8
import os
from dotenv import find_dotenv,load_dotenv
from requests import session
import logging
#payload for login to kaggle
payload = {
'action':'login',
'username': os.environ.get("KAGGLE_USERNAME"),
'password': os.environ.get("KAGGLE_PASSWORD")
}
def extract_data(url, file_path):
'''method to extract data'''
with session() as c:
c.post('https://www.kaggle.com/account/login',data=payload)
with open(file_path,'wb') as handle:
response = c.get(url, stream=True)
for block in response.iter_content(1024):
handle.write(block)
def main(project_dir):
''' main method '''
#get logger
logger = logging.getLogger(__name__)
logger.info('getting raw data')
logger.info(project_dir)
#urls
# urls
train_url = 'https://www.kaggle.com/c/3136/download/train.csv'
test_url = 'https://www.kaggle.com/c/3136/download/test.csv'
# raw sub folder inside data folder
raw_data_path = os.path.join(os.path.curdir,'data','raw')
train_data_path = os.path.join(raw_data_path,'train.csv')
test_data_path = os.path.join(raw_data_path,'test.csv')
# extract data
extract_data(train_url,train_data_path)
extract_data(test_url,test_data_path)
logger.info('downloaded raw training and test data')
if __name__ =='__main__':
#getting the root directory
project_dir = os.path.join(os.path.dirname(__file__),os.pardir,os.pardir)
print('project dir : '+project_dir)
# setup logger
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO,format=log_fmt)
# find .env automatically by walking up the directories until its found
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)
# call the main
main(project_dir)
| 30.868852
| 77
| 0.668083
| 263
| 1,883
| 4.574144
| 0.376426
| 0.04655
| 0.03325
| 0.042394
| 0.136326
| 0.103076
| 0.103076
| 0.103076
| 0
| 0
| 0
| 0.008719
| 0.208178
| 1,883
| 61
| 78
| 30.868852
| 0.798122
| 0.141795
| 0
| 0
| 0
| 0
| 0.213793
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.027778
| 0.111111
| 0
| 0.166667
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aa8e8a10f90ca6b21d728f7a1f51b3d5e590506
| 770
|
py
|
Python
|
apps/splash/migrations/0006_auto_20151213_0309.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 32
|
2017-02-22T13:38:38.000Z
|
2022-03-31T23:29:54.000Z
|
apps/splash/migrations/0006_auto_20151213_0309.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 694
|
2017-02-15T23:09:52.000Z
|
2022-03-31T23:16:07.000Z
|
apps/splash/migrations/0006_auto_20151213_0309.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 35
|
2017-09-02T21:13:09.000Z
|
2022-02-21T11:30:30.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django_extensions.db.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("splash", "0005_auto_20150422_2236")]
operations = [
migrations.AlterField(
model_name="splashevent",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
migrations.AlterField(
model_name="splashevent",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
]
| 27.5
| 72
| 0.614286
| 69
| 770
| 6.594203
| 0.521739
| 0.105495
| 0.118681
| 0.158242
| 0.320879
| 0.193407
| 0
| 0
| 0
| 0
| 0
| 0.030853
| 0.284416
| 770
| 27
| 73
| 28.518519
| 0.794918
| 0.027273
| 0
| 0.380952
| 0
| 0
| 0.108434
| 0.03079
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aaa3f49b8735100881fb406c235065fe7efe4e9
| 314
|
py
|
Python
|
Ekeopara_Praise/Phase 1/Python Basic 1/Day 3 Tasks/Task 10.py
|
nkem1010/python-challenge-solutions
|
203cedc691094a83b110fc75764aac51dbbc1a03
|
[
"MIT"
] | null | null | null |
Ekeopara_Praise/Phase 1/Python Basic 1/Day 3 Tasks/Task 10.py
|
nkem1010/python-challenge-solutions
|
203cedc691094a83b110fc75764aac51dbbc1a03
|
[
"MIT"
] | null | null | null |
Ekeopara_Praise/Phase 1/Python Basic 1/Day 3 Tasks/Task 10.py
|
nkem1010/python-challenge-solutions
|
203cedc691094a83b110fc75764aac51dbbc1a03
|
[
"MIT"
] | null | null | null |
'''10. Write a Python program to get a string which is n (non-negative integer) copies of a given string.
Tools: input function, slicing'''
word = str(input("Type in any string or word: "))
n = int(input("Enter the number of repititions: "))
ans = ""
for i in range(n):
ans = ans + word
print(ans)
| 24.153846
| 105
| 0.656051
| 52
| 314
| 3.961538
| 0.711538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008163
| 0.219745
| 314
| 12
| 106
| 26.166667
| 0.832653
| 0.423567
| 0
| 0
| 0
| 0
| 0.358824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aac4e1d77f4bf335aa448746527f97c1db73e42
| 2,085
|
py
|
Python
|
tests/test_api.py
|
sebaacuna/bigcommerce-api-python
|
59ef206d7296c196a0ae0400b6bf9bdb5c2f72af
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
sebaacuna/bigcommerce-api-python
|
59ef206d7296c196a0ae0400b6bf9bdb5c2f72af
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
sebaacuna/bigcommerce-api-python
|
59ef206d7296c196a0ae0400b6bf9bdb5c2f72af
|
[
"MIT"
] | null | null | null |
import unittest
import bigcommerce.api
from bigcommerce.connection import Connection, OAuthConnection
from bigcommerce.resources import ApiResource
from mock import MagicMock, patch, Mock
class TestBigcommerceApi(unittest.TestCase):
""" Test API client creation and helpers"""
def test_create_basic(self):
api = bigcommerce.api.BigcommerceApi(host='store.mybigcommerce.com', basic_auth=('admin', 'abcdef'))
self.assertIsInstance(api.connection, Connection)
self.assertNotIsInstance(api.connection, OAuthConnection)
def test_create_oauth(self):
api = bigcommerce.api.BigcommerceApi(client_id='123456', store_hash='abcdef', access_token='123abc')
self.assertIsInstance(api.connection, OAuthConnection)
def test_create_incorrect_args(self):
self.assertRaises(Exception, lambda: bigcommerce.api.BigcommerceApi(client_id='123', basic_auth=('admin', 'token')))
class TestApiResourceWrapper(unittest.TestCase):
def test_create(self):
api = MagicMock()
api.connection = MagicMock()
wrapper = bigcommerce.api.ApiResourceWrapper('ApiResource', api)
self.assertEqual(api.connection, wrapper.connection)
self.assertEqual(wrapper.resource_class, ApiResource)
wrapper = bigcommerce.api.ApiResourceWrapper(ApiResource, api)
self.assertEqual(wrapper.resource_class, ApiResource)
def test_str_to_class(self):
cls = bigcommerce.api.ApiResourceWrapper.str_to_class('ApiResource')
self.assertEqual(cls, ApiResource)
self.assertRaises(AttributeError, lambda: bigcommerce.api.ApiResourceWrapper.str_to_class('ApiResourceWhichDoesNotExist'))
@patch.object(ApiResource, 'get')
def test_get_attr(self, patcher):
api = MagicMock()
api.connection = MagicMock()
result = {'id': 1}
patcher.return_value = result
wrapper = bigcommerce.api.ApiResourceWrapper('ApiResource', api)
self.assertEqual(wrapper.get(1), result)
patcher.assert_called_once_with(1, connection=api.connection)
| 36.578947
| 130
| 0.729976
| 217
| 2,085
| 6.880184
| 0.317972
| 0.084394
| 0.107167
| 0.078366
| 0.425988
| 0.304086
| 0.146015
| 0.146015
| 0.100469
| 0
| 0
| 0.008641
| 0.167386
| 2,085
| 56
| 131
| 37.232143
| 0.851382
| 0.017266
| 0
| 0.216216
| 0
| 0
| 0.064247
| 0.025012
| 0
| 0
| 0
| 0
| 0.297297
| 1
| 0.162162
| false
| 0
| 0.135135
| 0
| 0.351351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aad54a74724c543c7739f87f3d7419f9de3dd0e
| 638
|
py
|
Python
|
media.py
|
anuraglahon16/Make_a_movie_website
|
4d5371b7cc1286f2444376a221595d8c6bb0d492
|
[
"MIT"
] | null | null | null |
media.py
|
anuraglahon16/Make_a_movie_website
|
4d5371b7cc1286f2444376a221595d8c6bb0d492
|
[
"MIT"
] | null | null | null |
media.py
|
anuraglahon16/Make_a_movie_website
|
4d5371b7cc1286f2444376a221595d8c6bb0d492
|
[
"MIT"
] | null | null | null |
"""Defines the Movie class"""
import webbrowser
class Movie(object):
"""This class provides a way to store movie related information."""
def __init__(self, movie_title, movie_storyline, poster_image,
trailer_youtube, movie_release_date):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.release_date = movie_release_date
def show_trailer(self):
"""Plays the movie trailer in the web browser."""
webbrowser.open(self.trailer_youtube_url)
| 33.578947
| 72
| 0.677116
| 77
| 638
| 5.298701
| 0.428571
| 0.137255
| 0.078431
| 0.102941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.247649
| 638
| 18
| 73
| 35.444444
| 0.85
| 0.202194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ab1c994ef22b2ed6be0bfd91c5d34915c683650
| 629
|
py
|
Python
|
sync_binlog/output_log.py
|
liusl104/py_sync_binlog
|
33a67f545159767d38a522d28d2f79b3ac3802ca
|
[
"Apache-2.0"
] | 3
|
2018-09-18T03:29:33.000Z
|
2020-01-13T03:34:39.000Z
|
sync_binlog/output_log.py
|
liusl104/py_sync_binlog
|
33a67f545159767d38a522d28d2f79b3ac3802ca
|
[
"Apache-2.0"
] | null | null | null |
sync_binlog/output_log.py
|
liusl104/py_sync_binlog
|
33a67f545159767d38a522d28d2f79b3ac3802ca
|
[
"Apache-2.0"
] | 1
|
2022-01-25T09:39:17.000Z
|
2022-01-25T09:39:17.000Z
|
# encoding=utf8
import logging # 引入logging模块
from logging.handlers import TimedRotatingFileHandler
from sync_conf import log_bese_path, log_backup_count, log_msg_level
# 日志
logfile = log_bese_path + '/logs/' + 'binlog_sync.log'
logger = logging.getLogger()
logger.setLevel(log_msg_level)
# 按日分割日志,默认日志保留7份
fh = TimedRotatingFileHandler(logfile, when='D', interval=1, backupCount=log_backup_count)
# datefmt = '%Y-%m-%d %H:%M:%S'
format_str = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
formatter = logging.Formatter(format_str, datefmt=None)
fh.setFormatter(formatter)
logger.addHandler(fh)
| 28.590909
| 90
| 0.761526
| 86
| 629
| 5.383721
| 0.569767
| 0.030238
| 0.047516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0053
| 0.100159
| 629
| 21
| 91
| 29.952381
| 0.812721
| 0.117647
| 0
| 0
| 0
| 0.090909
| 0.171533
| 0.05292
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ac6b31791c0cfaed3de6874a765f1f48fec4c3e
| 741
|
py
|
Python
|
tunobase/social_media/facebook/utils.py
|
unomena/tunobase
|
9219e6c5a49eecd1c66dd1b518640c5d678acab6
|
[
"BSD-3-Clause"
] | null | null | null |
tunobase/social_media/facebook/utils.py
|
unomena/tunobase
|
9219e6c5a49eecd1c66dd1b518640c5d678acab6
|
[
"BSD-3-Clause"
] | null | null | null |
tunobase/social_media/facebook/utils.py
|
unomena/tunobase
|
9219e6c5a49eecd1c66dd1b518640c5d678acab6
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Created on 09 Nov 2013
@author: michael
'''
import json
import urllib2
import urllib
from django.conf import settings
import facebook
def validate_access_token(access_token):
'''
Validate a Facebook access token
'''
# Get an app access token
app_token = facebook.get_app_access_token(
settings.FACEBOOK_APP_ID,
settings.FACEBOOK_APP_SECRET
)
args = {
'input_token': access_token,
'access_token': app_token
}
file = urllib2.urlopen(
"https://graph.facebook.com/debug_token?" + urllib.urlencode(args)
)
try:
result = json.loads(file.read())
finally:
file.close()
return result['data']['is_valid'], result['data']['user_id']
| 19.5
| 74
| 0.645074
| 90
| 741
| 5.111111
| 0.511111
| 0.167391
| 0.104348
| 0.095652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014286
| 0.244265
| 741
| 37
| 75
| 20.027027
| 0.807143
| 0.132254
| 0
| 0
| 0
| 0
| 0.137097
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.227273
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3acea141522edbabbe9dcfc8fdb02306077a23f4
| 6,988
|
py
|
Python
|
pysc2/agents/myAgent/myAgent_6/decisionMaker/hierarchical_learning_structure.py
|
Hotpotfish/pysc2
|
3d7f7ffc01a50ab69d435b65c892cd0bc11265a8
|
[
"Apache-2.0"
] | null | null | null |
pysc2/agents/myAgent/myAgent_6/decisionMaker/hierarchical_learning_structure.py
|
Hotpotfish/pysc2
|
3d7f7ffc01a50ab69d435b65c892cd0bc11265a8
|
[
"Apache-2.0"
] | null | null | null |
pysc2/agents/myAgent/myAgent_6/decisionMaker/hierarchical_learning_structure.py
|
Hotpotfish/pysc2
|
3d7f7ffc01a50ab69d435b65c892cd0bc11265a8
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import pysc2.agents.myAgent.myAgent_6.config.config as config
from pysc2.agents.myAgent.myAgent_6.decisionMaker.DQN import DQN
import pysc2.agents.myAgent.myAgent_6.smart_actions as sa
import pysc2.agents.myAgent.myAgent_6.tools.handcraft_function as handcraft_function
from pysc2.env.environment import StepType
from pysc2.lib import actions
class decision_maker():
def __init__(self, network):
self.network = network
self.previous_state = None
self.previous_action = None
self.previous_reward = None
self.current_state = None
self.load_and_train = True
class hierarchical_learning_structure():
def __init__(self):
self.episode = -1
self.begin_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
self.DataShape = (None, config.MAP_SIZE, config.MAP_SIZE, 39)
self.top_decision_maker = decision_maker(
DQN(config.MU, config.SIGMA, config.LEARING_RATE, len(sa.controllers), 0, self.DataShape, 'top_decision_maker'))
self.controllers = []
for i in range(len(sa.controllers)):
# 5代表增加的参数槽 6个槽分别代表动作编号,RAW_TYPES.queued, RAW_TYPES.unit_tags, RAW_TYPES.target_unit_tag 和RAW_TYPES.world(占两位)
self.controllers.append(
decision_maker(DQN(config.MU, config.SIGMA, config.LEARING_RATE, len(sa.controllers[i]), 5, self.DataShape, 'controller' + str(i))))
def top_decision_maker_train_model(self, obs, modelLoadPath):
# 数据是否记录
if self.top_decision_maker.previous_action is not None:
self.top_decision_maker.network.perceive(self.top_decision_maker.previous_state,
self.top_decision_maker.previous_action,
self.top_decision_maker.previous_reward,
self.top_decision_maker.current_state,
obs.last())
# 是否为继续训练模式
if modelLoadPath is not None and self.top_decision_maker.load_and_train is True:
self.top_decision_maker.load_and_train = False
self.top_decision_maker.network.restoreModel(modelLoadPath)
print('top')
controller_number = self.top_decision_maker.network.egreedy_action(self.top_decision_maker.current_state)
self.top_decision_maker.previous_reward = obs.reward
self.top_decision_maker.previous_state = self.top_decision_maker.current_state
self.top_decision_maker.previous_action = controller_number
return controller_number
def top_decision_maker_test_model(self, modelLoadPath):
return self.top_decision_maker.network.action(self.top_decision_maker.current_state, modelLoadPath)
def choose_controller(self, obs, mark, modelLoadPath):
self.top_decision_maker.current_state = handcraft_function.get_all_observation(obs)
if mark == 'TRAIN':
controller_number = self.top_decision_maker_train_model(obs, modelLoadPath)
return controller_number
elif mark == 'TEST':
controller_number = self.top_decision_maker_test_model(modelLoadPath)
return controller_number
def controller_train_model(self, obs, controller_number, modelLoadPath):
if self.controllers[controller_number].previous_action is not None:
self.controllers[controller_number].network.perceive(self.controllers[controller_number].previous_state,
self.controllers[controller_number].previous_action,
self.controllers[controller_number].previous_reward,
self.controllers[controller_number].current_state,
obs.last())
if modelLoadPath is not None and self.controllers[controller_number].load_and_train is True:
self.controllers[controller_number].load_and_train = False
self.top_decision_maker.network.restoreModel(modelLoadPath)
print('con' + str(controller_number))
action_and_parameter = self.controllers[controller_number].network.egreedy_action(self.controllers[controller_number].current_state)
self.controllers[controller_number].previous_reward = obs.reward
self.controllers[controller_number].previous_state = self.controllers[controller_number].current_state
self.controllers[controller_number].previous_action = action_and_parameter
action_and_parameter = handcraft_function.reflect(obs, action_and_parameter)
action = handcraft_function.assembly_action(obs, controller_number, action_and_parameter)
return action
def controller_test_model(self, obs, controller_number, modelLoadPath):
state = self.controllers[controller_number].current_state
action_and_parameter = self.controllers[controller_number].network.action(state, modelLoadPath)
macro_and_parameter = handcraft_function.reflect(obs, action_and_parameter)
action = handcraft_function.assembly_action(obs, controller_number, macro_and_parameter)
return action
def choose_macro(self, obs, controller_number, mark, modelLoadPath):
self.controllers[controller_number].current_state = handcraft_function.get_all_observation(obs)
if mark == 'TRAIN':
action = self.controller_train_model(obs, controller_number, modelLoadPath)
return action
elif mark == 'TEST':
action = self.controller_test_model(obs, controller_number, modelLoadPath)
return action
def get_save_and_loadPath(self, mark, modelSavePath, modelLoadPath):
self.episode += 1
time = str(self.begin_time)
if mark == 'TRAIN':
self.modelSavePath = modelSavePath + '/' + time + '/'
self.modelLoadPath = modelLoadPath
def train_all_neural_network(self):
self.top_decision_maker.network.train_Q_network(self.modelSavePath, self.episode)
for i in range(len(sa.controllers)):
self.controllers[i].network.train_Q_network(self.modelSavePath, self.episode)
def make_choice(self, obs, mark, modelSavePath, modelLoadPath):
if obs[0] == StepType.FIRST:
# 更新读取和保存路径
self.get_save_and_loadPath(mark, modelSavePath, modelLoadPath)
return actions.RAW_FUNCTIONS.raw_move_camera((config.MAP_SIZE / 2, config.MAP_SIZE / 2))
elif obs[0] == StepType.LAST and mark == 'TRAIN':
self.train_all_neural_network()
else:
controller_number = int(self.choose_controller(obs, mark, self.modelLoadPath)[0])
action = self.choose_macro(obs, controller_number, mark, self.modelLoadPath)
print(action)
return action
| 50.637681
| 148
| 0.680309
| 787
| 6,988
| 5.747141
| 0.161372
| 0.120274
| 0.091974
| 0.101702
| 0.600929
| 0.533053
| 0.395976
| 0.293389
| 0.247402
| 0.247402
| 0
| 0.004336
| 0.240985
| 6,988
| 137
| 149
| 51.007299
| 0.848416
| 0.019319
| 0
| 0.180952
| 0
| 0
| 0.011098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104762
| false
| 0
| 0.066667
| 0.009524
| 0.285714
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3acef1e57e1e1cdd81c2829c115eefd77da35670
| 8,696
|
py
|
Python
|
tfx/components/transform/executor_utils_test.py
|
avelez93/tfx
|
75fbb6a7d50e99138609be3ca4c3a204a13a2195
|
[
"Apache-2.0"
] | 1
|
2021-08-22T21:10:48.000Z
|
2021-08-22T21:10:48.000Z
|
tfx/components/transform/executor_utils_test.py
|
avelez93/tfx
|
75fbb6a7d50e99138609be3ca4c3a204a13a2195
|
[
"Apache-2.0"
] | null | null | null |
tfx/components/transform/executor_utils_test.py
|
avelez93/tfx
|
75fbb6a7d50e99138609be3ca4c3a204a13a2195
|
[
"Apache-2.0"
] | 1
|
2020-12-13T22:07:53.000Z
|
2020-12-13T22:07:53.000Z
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.transform.executor_utils."""
import tensorflow as tf
from tfx.components.transform import executor_utils
from tfx.components.transform import labels
from tfx.proto import transform_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
from tfx.utils import proto_utils
class ExecutorUtilsTest(tf.test.TestCase):
def testMaybeBindCustomConfig(self):
def dummy(custom_config):
return custom_config
patched = executor_utils.MaybeBindCustomConfig(
{labels.CUSTOM_CONFIG: '{"value":42}'}, dummy)
self.assertEqual({'value': 42}, patched())
def testValidateOnlyOneSpecified(self):
executor_utils.ValidateOnlyOneSpecified({'a': 1}, ('a', 'b', 'c'))
with self.assertRaisesRegex(ValueError, 'One of'):
executor_utils.ValidateOnlyOneSpecified({'z': 1}, ('a', 'b', 'c'))
with self.assertRaisesRegex(ValueError, 'At most one of'):
executor_utils.ValidateOnlyOneSpecified({
'a': [1],
'b': '1'
}, ('a', 'b', 'c'))
def testValidateOnlyOneSpecifiedAllowMissing(self):
executor_utils.ValidateOnlyOneSpecified({'z': 1}, ('a', 'b', 'c'), True)
with self.assertRaisesRegex(ValueError, 'At most one of'):
executor_utils.ValidateOnlyOneSpecified({
'a': [1],
'b': '1'
}, ('a', 'b', 'c'), True)
def testMatchNumberOfTransformedExamplesArtifacts(self):
input_dict = {
standard_component_specs.EXAMPLES_KEY: [
standard_artifacts.Examples(),
standard_artifacts.Examples()
]
}
original_output_artifact = standard_artifacts.Examples()
original_output_artifact.uri = '/dummy/path'
output_dict = {
standard_component_specs.TRANSFORMED_EXAMPLES_KEY: [
original_output_artifact
]
}
executor_utils.MatchNumberOfTransformedExamplesArtifacts(
input_dict, output_dict)
self.assertLen(
output_dict[standard_component_specs.TRANSFORMED_EXAMPLES_KEY], 2)
# Uris of the new artifacts should be located under the original artifact.
self.assertTrue(output_dict[
standard_component_specs.TRANSFORMED_EXAMPLES_KEY][0].uri.startswith(
original_output_artifact.uri))
def testResolveSplitsConfigEmptyAnalyze(self):
wrong_config = transform_pb2.SplitsConfig(transform=['train'])
with self.assertRaisesRegex(ValueError, 'analyze cannot be empty'):
config_str = proto_utils.proto_to_json(wrong_config)
executor_utils.ResolveSplitsConfig(config_str, [])
def testResolveSplitsConfigOk(self):
config = transform_pb2.SplitsConfig(
analyze=['train'], transform=['train', 'eval'])
config_str = proto_utils.proto_to_json(config)
resolved = executor_utils.ResolveSplitsConfig(config_str, [])
self.assertProtoEquals(config, resolved)
def testResolveSplitsConfigInconsistentSplits(self):
examples1 = standard_artifacts.Examples()
examples1.split_names = artifact_utils.encode_split_names(['train'])
examples2 = standard_artifacts.Examples()
examples2.split_names = artifact_utils.encode_split_names(['train', 'test'])
with self.assertRaisesRegex(ValueError, 'same split names'):
executor_utils.ResolveSplitsConfig(None, [examples1, examples2])
def testResolveSplitsConfigDefault(self):
examples1 = standard_artifacts.Examples()
examples1.split_names = artifact_utils.encode_split_names(['train', 'test'])
examples2 = standard_artifacts.Examples()
examples2.split_names = artifact_utils.encode_split_names(['train', 'test'])
resolved = executor_utils.ResolveSplitsConfig(None, [examples1, examples2])
self.assertEqual(set(resolved.analyze), {'train'})
self.assertEqual(set(resolved.transform), {'train', 'test'})
def testSetSplitNames(self):
# Should work with None.
executor_utils.SetSplitNames(['train'], None)
examples1 = standard_artifacts.Examples()
examples2 = standard_artifacts.Examples()
executor_utils.SetSplitNames(['train'], [examples1, examples2])
self.assertEqual(examples1.split_names, '["train"]')
self.assertEqual(examples2.split_names, examples1.split_names)
def testGetSplitPaths(self):
# Should work with None.
self.assertEmpty(executor_utils.GetSplitPaths(None))
examples1 = standard_artifacts.Examples()
examples1.uri = '/uri1'
examples2 = standard_artifacts.Examples()
examples2.uri = '/uri2'
executor_utils.SetSplitNames(['train', 'test'], [examples1, examples2])
paths = executor_utils.GetSplitPaths([examples1, examples2])
self.assertCountEqual([
'/uri1/Split-train/transformed_examples',
'/uri2/Split-train/transformed_examples',
'/uri1/Split-test/transformed_examples',
'/uri2/Split-test/transformed_examples'
], paths)
def testGetCachePathEntry(self):
# Empty case.
self.assertEmpty(
executor_utils.GetCachePathEntry(
standard_component_specs.ANALYZER_CACHE_KEY, {}))
cache_artifact = standard_artifacts.TransformCache()
cache_artifact.uri = '/dummy'
# input
result = executor_utils.GetCachePathEntry(
standard_component_specs.ANALYZER_CACHE_KEY,
{standard_component_specs.ANALYZER_CACHE_KEY: [cache_artifact]})
self.assertEqual({labels.CACHE_INPUT_PATH_LABEL: '/dummy'}, result)
# output
result = executor_utils.GetCachePathEntry(
standard_component_specs.UPDATED_ANALYZER_CACHE_KEY,
{standard_component_specs.UPDATED_ANALYZER_CACHE_KEY: [cache_artifact]})
self.assertEqual({labels.CACHE_OUTPUT_PATH_LABEL: '/dummy'}, result)
def testGetStatusOutputPathsEntries(self):
# disabled.
self.assertEmpty(executor_utils.GetStatsOutputPathEntries(True, {}))
# enabled.
pre_transform_stats = standard_artifacts.ExampleStatistics()
pre_transform_stats.uri = '/pre_transform_stats'
pre_transform_schema = standard_artifacts.Schema()
pre_transform_schema.uri = '/pre_transform_schema'
post_transform_anomalies = standard_artifacts.ExampleAnomalies()
post_transform_anomalies.uri = '/post_transform_anomalies'
post_transform_stats = standard_artifacts.ExampleStatistics()
post_transform_stats.uri = '/post_transform_stats'
post_transform_schema = standard_artifacts.Schema()
post_transform_schema.uri = '/post_transform_schema'
result = executor_utils.GetStatsOutputPathEntries(
False, {
standard_component_specs.PRE_TRANSFORM_STATS_KEY:
[pre_transform_stats],
standard_component_specs.PRE_TRANSFORM_SCHEMA_KEY:
[pre_transform_schema],
standard_component_specs.POST_TRANSFORM_ANOMALIES_KEY:
[post_transform_anomalies],
standard_component_specs.POST_TRANSFORM_STATS_KEY:
[post_transform_stats],
standard_component_specs.POST_TRANSFORM_SCHEMA_KEY:
[post_transform_schema],
})
self.assertEqual(
{
labels.PRE_TRANSFORM_OUTPUT_STATS_PATH_LABEL:
'/pre_transform_stats',
labels.PRE_TRANSFORM_OUTPUT_SCHEMA_PATH_LABEL:
'/pre_transform_schema',
labels.POST_TRANSFORM_OUTPUT_ANOMALIES_PATH_LABEL:
'/post_transform_anomalies',
labels.POST_TRANSFORM_OUTPUT_STATS_PATH_LABEL:
'/post_transform_stats',
labels.POST_TRANSFORM_OUTPUT_SCHEMA_PATH_LABEL:
'/post_transform_schema',
}, result)
def testGetStatusOutputPathsEntriesMissingArtifact(self):
pre_transform_stats = standard_artifacts.ExampleStatistics()
pre_transform_stats.uri = '/pre_transform_stats'
with self.assertRaisesRegex(
ValueError, 'all stats_output_paths should be specified or none'):
executor_utils.GetStatsOutputPathEntries(False, {
standard_component_specs.PRE_TRANSFORM_STATS_KEY:
[pre_transform_stats]
})
if __name__ == '__main__':
tf.test.main()
| 39.889908
| 80
| 0.722516
| 913
| 8,696
| 6.58379
| 0.210296
| 0.051905
| 0.058559
| 0.034936
| 0.44718
| 0.311429
| 0.277824
| 0.248544
| 0.195309
| 0.153718
| 0
| 0.007996
| 0.180198
| 8,696
| 217
| 81
| 40.073733
| 0.83518
| 0.090156
| 0
| 0.17284
| 0
| 0
| 0.08775
| 0.041593
| 0
| 0
| 0
| 0
| 0.12963
| 1
| 0.08642
| false
| 0
| 0.049383
| 0.006173
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ad6ff269fcd4396a9cf1a6ea13465342af4b41c
| 2,017
|
py
|
Python
|
kingfisher_scrapy/spiders/uruguay_historical.py
|
open-contracting/kingfisher-collect
|
2fbbd6361a0ec959e0603343a4b363f97fae3815
|
[
"BSD-3-Clause"
] | 7
|
2020-07-24T13:15:37.000Z
|
2021-12-11T22:40:07.000Z
|
kingfisher_scrapy/spiders/uruguay_historical.py
|
open-contracting/kingfisher-collect
|
2fbbd6361a0ec959e0603343a4b363f97fae3815
|
[
"BSD-3-Clause"
] | 418
|
2020-04-27T22:15:27.000Z
|
2022-03-31T23:49:34.000Z
|
kingfisher_scrapy/spiders/uruguay_historical.py
|
open-contracting/kingfisher-collect
|
2fbbd6361a0ec959e0603343a4b363f97fae3815
|
[
"BSD-3-Clause"
] | 6
|
2020-05-28T16:06:53.000Z
|
2021-03-16T02:54:15.000Z
|
import datetime
import scrapy
from kingfisher_scrapy.base_spider import CompressedFileSpider
from kingfisher_scrapy.util import components, handle_http_error
class UruguayHistorical(CompressedFileSpider):
"""
Domain
Agencia Reguladora de Compras Estatales (ARCE)
Spider arguments
from_date
Download only data from this year onward (YYYY format).
If ``until_date`` is provided, defaults to '2002'.
until_date
Download only data until this year (YYYY format).
If ``from_date`` is provided, defaults to the current year.
Bulk download documentation
https://www.gub.uy/agencia-compras-contrataciones-estado/datos-y-estadisticas/datos/open-contracting
"""
name = 'uruguay_historical'
download_timeout = 1000
# BaseSpider
date_format = 'year'
default_from_date = '2002'
skip_pluck = 'Already covered (see code for details)' # uruguay_releases
# SimpleSpider
data_type = 'release_package'
def start_requests(self):
# A CKAN API JSON response.
url = 'https://catalogodatos.gub.uy/api/3/action/package_show?id=arce-datos-historicos-de-compras'
yield scrapy.Request(url, meta={'file_name': 'list.json'}, callback=self.parse_list)
@handle_http_error
def parse_list(self, response):
data = response.json()
for resource in data['result']['resources']:
if resource['format'].upper() == 'JSON':
url = resource['url']
if self.from_date and self.until_date:
# URL looks like
# https://catalogodatos.gub.uy/dataset/44d3-b09c/resource/1e39-453d/download/ocds-2002.zip
url_year = int(url.split('-')[-1].split('.')[0])
url_date = datetime.datetime(url_year, 1, 1)
if not (self.from_date <= url_date <= self.until_date):
continue
yield self.build_request(url, formatter=components(-1))
| 38.056604
| 110
| 0.644522
| 240
| 2,017
| 5.275
| 0.5
| 0.031596
| 0.031596
| 0.031596
| 0.037915
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021898
| 0.252851
| 2,017
| 52
| 111
| 38.788462
| 0.818182
| 0.313832
| 0
| 0
| 0
| 0.038462
| 0.164021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ad6ff65fc6e55b853f7c971880fa3dab4b97d0c
| 1,136
|
py
|
Python
|
toSpcy/toSpacy.py
|
patrick013/toSpcy
|
8c10bc01e4a549dc177e1efb18c9c87a4dbd6f4c
|
[
"Apache-2.0"
] | 3
|
2019-11-07T17:29:57.000Z
|
2022-03-21T01:45:04.000Z
|
toSpcy/toSpacy.py
|
patrick013/toSpcy
|
8c10bc01e4a549dc177e1efb18c9c87a4dbd6f4c
|
[
"Apache-2.0"
] | null | null | null |
toSpcy/toSpacy.py
|
patrick013/toSpcy
|
8c10bc01e4a549dc177e1efb18c9c87a4dbd6f4c
|
[
"Apache-2.0"
] | null | null | null |
import re
class Convertor():
def __init__(self, tagslabels={}):
self._tagslabels = tagslabels
def _handleLabel(self, tag):
if tag in self._tagslabels.keys():
return self._tagslabels[tag]
return tag
def _handleSingle(self, t):
entities = []
index = 0
t = re.sub(r'\s+', ' ', t)
tList = re.split('(<[a-zA-Z]+>[^<]+</[a-zA-Z]+>)', t)
if len(tList) % 2 == 0:
print("Error! Some labels might be missed! ")
return
pattern = re.compile("<[a-zA-Z]+>[^<]+</[a-zA-Z]+>")
for ele in tList:
if pattern.match(ele):
len_notag = len(''.join(re.split('</?[a-zA-Z]+>', ele)))
entities.append((index, index + len_notag,
self._handleLabel(re.split('.+</|>', ele)[1])))
index += len_notag
else:
index += len(ele)
return (''.join(re.split('</?[a-zA-Z]+>', t)), {'entities': entities})
def toSpacyFormat(self, tagged_data):
return [self._handleSingle(data) for data in tagged_data]
| 31.555556
| 80
| 0.491197
| 133
| 1,136
| 4.075188
| 0.368421
| 0.03321
| 0.04428
| 0.055351
| 0.097786
| 0.084871
| 0
| 0
| 0
| 0
| 0
| 0.005222
| 0.325704
| 1,136
| 35
| 81
| 32.457143
| 0.70235
| 0
| 0
| 0
| 0
| 0
| 0.121479
| 0.051056
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.035714
| 0.035714
| 0.392857
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ad8fecb1b29ee0733883fd90b75697788d9e406
| 2,109
|
py
|
Python
|
backend_final/apartacho/users/views.py
|
cenavia/skylynx
|
6286294a8cd57279e3c176d8fcae656cef4b40a8
|
[
"MIT"
] | 3
|
2020-04-29T18:07:40.000Z
|
2020-05-20T20:52:52.000Z
|
backend_final/apartacho/users/views.py
|
cenavia/Apartacho
|
6286294a8cd57279e3c176d8fcae656cef4b40a8
|
[
"MIT"
] | 53
|
2020-05-13T03:27:41.000Z
|
2022-03-12T00:32:46.000Z
|
backend_final/apartacho/users/views.py
|
cenavia/Apartacho
|
6286294a8cd57279e3c176d8fcae656cef4b40a8
|
[
"MIT"
] | 2
|
2020-05-16T05:34:45.000Z
|
2020-06-11T14:47:50.000Z
|
"""Users views."""
# Django REST Framework
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import (ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin)
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.response import Response
# Serializers
from apartacho.users.serializers import (
AccountVerificationSerializer,
UserLoginSerializer,
UserModelSerializer,
UserSignUpSerializer
)
# Models
from apartacho.users.models import User
class UserViewSet(ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
GenericViewSet):
"""User view set.
Handle sign up, login and account verification.
"""
queryset = User.objects.filter(is_active=True)
serializer_class = UserModelSerializer
lookup_field = 'email'
@action(detail=False, methods=['post'])
def login(self, request):
"""User sign in."""
serializer = UserLoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user, token = serializer.save()
data = {
'user': UserModelSerializer(user).data,
'access_token': token
}
return Response(data, status=status.HTTP_200_OK)
@action(detail=False, methods=['post'])
def signup(self, request):
"""User sign up."""
serializer = UserSignUpSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
data = UserModelSerializer(user).data
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=False, methods=['post'])
def verify(self, request):
"""Account verification."""
serializer = AccountVerificationSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
data = {'message': 'Congratulation, now go find to dream!'}
return Response(data, status=status.HTTP_200_OK)
| 31.954545
| 69
| 0.662399
| 203
| 2,109
| 6.778325
| 0.374384
| 0.056686
| 0.061773
| 0.052326
| 0.263808
| 0.263808
| 0.171512
| 0.171512
| 0.114826
| 0.078488
| 0
| 0.00565
| 0.244666
| 2,109
| 65
| 70
| 32.446154
| 0.858129
| 0.079659
| 0
| 0.222222
| 0
| 0
| 0.040441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.155556
| 0
| 0.377778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ad9ef77b1856ec357338eec91418078b9e1ee31
| 1,069
|
py
|
Python
|
examples/cpp/simplicial_complex.py
|
TripleEss/TDALayer
|
25a2da5eab50fad2d006167c2d1c97ec5efb53e0
|
[
"MIT"
] | null | null | null |
examples/cpp/simplicial_complex.py
|
TripleEss/TDALayer
|
25a2da5eab50fad2d006167c2d1c97ec5efb53e0
|
[
"MIT"
] | null | null | null |
examples/cpp/simplicial_complex.py
|
TripleEss/TDALayer
|
25a2da5eab50fad2d006167c2d1c97ec5efb53e0
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from topologylayer.functional.persistence import SimplicialComplex, persistenceForwardCohom
from topologylayer.util.process import remove_zero_bars
import torch
# first, we build our complex
s = SimplicialComplex()
# a cycle graph on vertices 1,2,3,4
# cone with vertex 0
s.append([0])
s.append([1])
s.append([2])
s.append([3])
s.append([4])
s.append([0,1])
s.append([0,2])
s.append([0,3])
s.append([0,4])
s.append([1,2])
s.append([1,3])
s.append([4,2])
s.append([4,3])
s.append([0,1,2])
s.append([0,1,3])
s.append([0,2,4])
s.append([0,3,4])
# initialize internal data structures
s.initialize()
# function on vertices
# we are doing sub-level set persistence
# expect single H0 [0,inf]
# expect single H1 [0,2]
f = torch.Tensor([2., 0., 0., 0., 0.])
# extend filtration to simplical complex
s.extendFloat(f)
# compute persistence with MAXDIM=1
ret = persistenceForwardCohom(s, 1)
for k in range(2):
print("dimension %d bars" % k)
print(remove_zero_bars(ret[k]))
| 20.960784
| 92
| 0.669785
| 176
| 1,069
| 4.017045
| 0.375
| 0.168317
| 0.101839
| 0.03819
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057497
| 0.170253
| 1,069
| 50
| 93
| 21.38
| 0.739572
| 0.27783
| 0
| 0
| 0
| 0
| 0.02391
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.137931
| 0
| 0.137931
| 0.103448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ada4a4b168c9f32d42b44792e037eaf4b8ba00b
| 4,020
|
py
|
Python
|
python/featureextraction/subscriber.py
|
JonathanCamargo/Eris
|
34c389f0808c8b47933605ed19d98e62280e56dd
|
[
"MIT"
] | null | null | null |
python/featureextraction/subscriber.py
|
JonathanCamargo/Eris
|
34c389f0808c8b47933605ed19d98e62280e56dd
|
[
"MIT"
] | null | null | null |
python/featureextraction/subscriber.py
|
JonathanCamargo/Eris
|
34c389f0808c8b47933605ed19d98e62280e56dd
|
[
"MIT"
] | null | null | null |
import rospy
import threading
import importlib
from collections import deque
from custom_msgs.msg import *
def Subscriber(topic_name,type_str, window):
#creates a subscriber for topic topic_name
#using the class given as a string: type_str
# in the form package_name/message_type
# or in the form package_name.msg.message_type
# alternatively type_str can be passed not as an str, but as the actual msg class
# returns the subscriber instance
try:
if not (type(type_str)==str):
type_str=type_str.__module__
if type(type_str)==str:
if '/' in type_str:
split_type=type_str.split('/')
package_name=split_type[0]
class_name=split_type[1]
if '.' in type_str:
split_type=type_str.split('.')
package_name=split_type[0]
class_name=split_type[2]
class_name=class_name[1:]
module_=importlib.import_module(package_name+'.msg')
data_class=getattr(module_,class_name)
subscriber=GenericSubscriber(topic_name,data_class, window)
except ImportError as e:
print('ERROR in '+package_name+'.msg')
raise ImportError("package %s not found %s"%(package_name,e))
return subscriber
# A generic subscriber class for interfacing any type of message into the GUI
class GenericSubscriber(object):
def __init__(self,topic,data_class,QUEUE_SIZE=1000):
#Properties
self.topic="" # topic name (e.g. /myrobot/someNamespace/somemessage)
self.data_class="" # type of message in the form 'package_name/message_type' e.g. 'custom_msgs/JointState
self.registered = False #indicates if subscriber is registered (i.e. listening to data)
self.paused = False #indicates if subscriber pauses appending data to the queue
self.channels = None
self.queue = deque(maxlen=QUEUE_SIZE) #Queue for saving data
self.subs = None # subscriber object
if topic!="":
self.topic=topic
if data_class!="":
self.topic=topic
self.data_class=data_class
self.channels=self.data_class.__slots__
self.channel_types=self.data_class._slot_types
def callback(self,msg):
if __debug__:
pass
#rospy.loginfo(rospy.get_caller_id()+" %s",msg)
if self.paused==False:
#Get each field in the message
data=[]
for channel in self.channels:
if channel == 'header':
#If header just take the timestamp
time=msg.header.stamp.secs+msg.header.stamp.nsecs/1.0E9
data.append(time)
else:
data.append(getattr(msg,channel))
self.append(data)
def listener(self):
try:
self.subs=rospy.Subscriber(self.topic, self.data_class, self.callback)
except:
print("Could not subscribe")
else:
self.registered=True
def append(self, newElement):
if self.paused == False:
self.queue.append(newElement)
def getQueue(self):
return list(self.queue)
def getChannels(self):
return self.channels
def unsubscribe(self):
if self.subs is not None:
self.subs.unregister()
self.registered=False
def subscribe(self):
if self.registered is False:
self.t=threading.Thread(target=self.listener())
self.t.start()
self.registered=True
def __str__(self):
''' Overload str to use print for the subcriber'''
string_1="Topic: {0}\nChannels:{1}\nChannel types:{2}\n".format(self.topic,self.channels,self.channel_types)
if self.registered is True:
string_2="This subscriber is registered"
else:
string_2="This subscriber is NOT registered"
return string_1+string_2
| 34.358974
| 116
| 0.615672
| 503
| 4,020
| 4.753479
| 0.276342
| 0.032204
| 0.027185
| 0.020075
| 0.112923
| 0.08532
| 0.08532
| 0.059389
| 0.059389
| 0.059389
| 0
| 0.00705
| 0.294279
| 4,020
| 116
| 117
| 34.655172
| 0.835742
| 0.202488
| 0
| 0.178571
| 0
| 0
| 0.055294
| 0.008168
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119048
| false
| 0.011905
| 0.095238
| 0.02381
| 0.27381
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3adcb0e64f1098771d6fe0dddfcf3ecb6a0a3c9a
| 19,324
|
py
|
Python
|
scripts/condinst.py
|
Spritaro/condinst_tensorrt
|
22063a75e015bba45b588cdb6ebf1ac663ff1924
|
[
"MIT"
] | 3
|
2021-11-14T14:11:10.000Z
|
2022-02-16T11:42:40.000Z
|
scripts/condinst.py
|
datomi79/condinst_tensorrt
|
22063a75e015bba45b588cdb6ebf1ac663ff1924
|
[
"MIT"
] | null | null | null |
scripts/condinst.py
|
datomi79/condinst_tensorrt
|
22063a75e015bba45b588cdb6ebf1ac663ff1924
|
[
"MIT"
] | 1
|
2022-02-14T21:47:55.000Z
|
2022-02-14T21:47:55.000Z
|
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
import torchvision
# from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
# from torchvision.ops.focal_loss import sigmoid_focal_loss
from loss import heatmap_focal_loss
from loss import dice_loss
def get_centroid_indices(masks):
"""
Params:
masks: Tensor[num_objects, height, width]
Returns:
centroids: Tensor[num_objects, (x, y)]
"""
_, height, width = masks.shape
dtype = masks.dtype
device = masks.device
location_x = torch.arange(0, width, 1, dtype=dtype, device=device) # Tensor[width]
location_y = torch.arange(0, height, 1, dtype=dtype, device=device) # Tensor[height]
total_area = masks.sum(dim=(1,2)) + 1e-9
centroids_x = torch.sum(masks.sum(dim=1) * location_x[None,:], dim=1) / total_area # Tensor[num_objects]
centroids_y = torch.sum(masks.sum(dim=2) * location_y[None,:], dim=1) / total_area # Tensor[num_objects]
centroids = torch.stack((centroids_x, centroids_y), dim=1) # Tensor[num_objects, (x, y)]
centroids = centroids.to(torch.int64)
return centroids
def generate_heatmap(gt_labels, gt_masks, num_classes):
"""
Params:
gt_labels: Tensor[num_objects]
gt_masks: Tensor[num_objects, height, width]
num_classes:
Returns:
heatmap: Tensor[num_classes, height, width]
centroids: Tensor[num_objects, (x, y)]
"""
num_objects, height, width = gt_masks.shape
dtype = gt_masks.dtype
device = gt_masks.device
centroids = get_centroid_indices(gt_masks) # Tensor[num_objects, (x, y)]
radius2 = torch.sum(gt_masks, dim=(1, 2)) / height / width * 10 + 1
location_x = torch.arange(0, width, 1, dtype=dtype, device=device) # Tensor[width]
location_y = torch.arange(0, height, 1, dtype=dtype, device=device) # Tensor[height]
location_y, location_x = torch.meshgrid(location_y, location_x) # [height, width], [height, width]
heatmap = torch.zeros(size=(num_classes, height, width), dtype=dtype, device=device)
for i in range(num_objects):
label = gt_labels[i]
px = centroids[i][0]
py = centroids[i][1]
single_heatmap = torch.exp(-((location_x-px)**2 + (location_y-py)**2) / (2. * radius2[i]))
# Take element-wise maximum in case of overlapping objects
heatmap[label,:,:] = torch.maximum(heatmap[label,:,:], single_heatmap)
return heatmap, centroids
def get_heatmap_peaks(cls_logits, topk, kernel=3):
"""
Params:
cls_logits: Tensor[num_batch, num_classes, height, width]
topk: Int
kernel: Int
Returns:
labels: Tensor[num_batch, topk]
cls_preds: Tensor[num_batch, topk]
points: Tensor[num_batch, topk, (x, y)]
"""
num_batch, num_classes, height, width = cls_logits.shape
device = cls_logits.device
# Get peak maps
heatmap_preds = cls_logits.sigmoid() # Tensor[num_batch, num_classes, height, width]
pad = (kernel - 1) // 2
heatmap_max = F.max_pool2d(heatmap_preds, (kernel, kernel), stride=1, padding=pad) # Tensor[num_batch, num_classes, height, width]
peak_map = (heatmap_max == heatmap_preds).to(dtype=heatmap_preds.dtype)
peak_map = peak_map * heatmap_preds
peak_map = peak_map.view(num_batch, -1) # Tensor[num_batch, (num_classes*height*width)]
# Get properties of each peak
# NOTE: TensorRT7 does not support rounding_mode='floor' for toch.div()
cls_preds, keep_idx = torch.topk(peak_map, k=topk, dim=1) # [num_batch, topk], [num_batch, topk]
labels = torch.div(keep_idx, height*width).long() # [num_batch, topk]
yx_idx = torch.remainder(keep_idx, height*width).long() # [num_batch, topk]
ys = torch.div(yx_idx, width).long() # [num_batch, topk]
xs = torch.remainder(yx_idx, width).long() # [num_batch, topk]
points = torch.stack([xs, ys], dim=2) # Tensor[num_batch, topk, (x,y)]
return labels, cls_preds, points
class CondInst(nn.Module):
def __init__(self, mode, input_channels, num_classes, topk):
super().__init__()
assert mode in ['training', 'inference']
self.mode = mode
self.topk = topk
self.num_filters = 8
self.conv1_w = (self.num_filters + 2) * self.num_filters
self.conv2_w = self.conv1_w + self.num_filters * self.num_filters
self.conv3_w = self.conv2_w + self.num_filters * 1
self.conv1_b = self.conv3_w + self.num_filters
self.conv2_b = self.conv1_b + self.num_filters
self.conv3_b = self.conv2_b + 1
num_channels = self.conv3_b
# self.backbone = resnet_fpn_backbone('resnet50', pretrained=True, trainable_layers=0)
# self.backbone = resnet_fpn_backbone('resnet34', pretrained=True, trainable_layers=5)
self.backbone = torchvision.models.resnet50(pretrained=True)
self.lateral_conv2 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(num_features=256)
)
self.lateral_conv3 = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(num_features=256)
)
self.lateral_conv4 = nn.Sequential(
nn.Conv2d(in_channels=1024, out_channels=256, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(num_features=256)
)
self.lateral_conv5 = nn.Sequential(
nn.Conv2d(in_channels=2048, out_channels=256, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(num_features=256)
)
self.cls_head = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=num_classes, kernel_size=1, padding=0)
)
self.ctr_head = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=num_channels, kernel_size=1, padding=0)
)
self.mask_head = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=128),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=128),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=128),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_features=128),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=self.num_filters, kernel_size=1, padding=0)
)
# Initialize
def initialize(m):
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.lateral_conv2.apply(initialize)
self.lateral_conv3.apply(initialize)
self.lateral_conv4.apply(initialize)
self.lateral_conv5.apply(initialize)
self.cls_head.apply(initialize)
self.ctr_head.apply(initialize)
self.mask_head.apply(initialize)
# Initialize last layer of class head
# NOTE: see Focal Loss paper for detail https://arxiv.org/abs/1708.02002
pi = 0.01
bias = -math.log((1 - pi) / pi)
nn.init.constant_(self.cls_head[-1].bias, bias)
# Change number of input channels
if input_channels != 3:
output_channels, _, h, w = self.backbone.conv1.weight.shape
weight = torch.zeros(output_channels, input_channels, h, w)
nn.init.normal_(weight, std=0.01)
weight[:, :3, :, :] = self.backbone.conv1.weight
self.backbone.conv1.weight = nn.Parameter(weight, requires_grad=True)
# self.backbone.conv1.apply(initialize)
def forward(self, images):
# Convert input images to FP32 or FP16 depending on backbone dtype
images = images.to(dtype=self.backbone.conv1.weight.dtype)
# Backbone
x = self.backbone.conv1(images)
x = self.backbone.bn1(x)
x = self.backbone.relu(x)
x = self.backbone.maxpool(x)
c2 = self.backbone.layer1(x) # 1/4
c3 = self.backbone.layer2(c2) # 1/8
c4 = self.backbone.layer3(c3) # 1/16
c5 = self.backbone.layer4(c4) # 1/32
# FPN
p5 = self.lateral_conv5(c5)
p4 = self.lateral_conv4(c4) + F.interpolate(p5, scale_factor=2, mode='bilinear', align_corners=False)
p3 = self.lateral_conv3(c3) + F.interpolate(p4, scale_factor=2, mode='bilinear', align_corners=False)
p2 = self.lateral_conv2(c2) + F.interpolate(p3, scale_factor=2, mode='bilinear', align_corners=False)
x = p3
cls_logits = self.cls_head(x) # [num_batch, num_classes, feature_height, feature_width]
ctr_logits = self.ctr_head(x) # [num_batch, num_channels, feature_height, feature_width]
x = p2
mask_logits = self.mask_head(x) # [num_batch, num_filters, mask_height, mask_width]
if self.mode == 'training':
return cls_logits, ctr_logits, mask_logits
else:
labels, scores, points = get_heatmap_peaks(cls_logits, topk=self.topk)
num_batch, num_objects, _ = points.shape
masks = []
for i in range(num_batch):
mask = self.generate_mask(ctr_logits[i], mask_logits[i], points[i])
masks.append(mask)
masks = torch.stack(masks, dim=0)
return labels.int(), scores.float(), masks.float()
def generate_mask(self, ctr_logits, mask_logits, centroids):
"""
Params:
ctr_logits: Tensor[num_channels, feature_height, feature_width]
mask_logits: Tensor[num_filters, mask_height, mask_width]
centroids: Tensor[num_objects, (x, y)]
Returns:
masks: Tensor[num_objects, mask_height, mask_width]
"""
_, feature_height, feature_width = ctr_logits.shape
_, mask_height, mask_width = mask_logits.shape
num_objects, _ = centroids.shape
dtype = ctr_logits.dtype
device = ctr_logits.device
# Absolute coordinates
# NOTE: TensorRT7 does not support float range operation. Use cast instead.
location_x = torch.arange(0, mask_width, 1, dtype=torch.int32, device=device) # Tensor[mask_width]
location_y = torch.arange(0, mask_height, 1, dtype=torch.int32, device=device) # Tensor[mask_height]
location_x = location_x.to(dtype)
location_y = location_y.to(dtype)
location_y, location_x = torch.meshgrid(location_y, location_x) # Tensor[mask_height, mask_width], Tensor[mask_height, mask_width]
location_xs = location_x[None,:,:].repeat(num_objects, 1, 1) # Tensor[num_objects, mask_height, mask_width]
location_ys = location_y[None,:,:].repeat(num_objects, 1, 1) # Tensor[num_objects, mask_height, mask_width]
# Relative coordinates
location_xs -= centroids[:, 0].view(-1, 1, 1) * (mask_width // feature_width) # Tensor[num_objects, mask_height, mask_width]
location_ys -= centroids[:, 1].view(-1, 1, 1) * (mask_height // feature_height) # Tensor[num_objects, mask_height, mask_width]
# location_xs /= mask_width
# location_ys /= mask_height
# Add relative coordinates to mask features
mask_logits = mask_logits[None,:,:,:].expand(num_objects, self.num_filters, mask_height, mask_width) # Tensor[num_objects, num_filters, mask_height, mask_width]
mask_logits = torch.cat([mask_logits, location_xs[:,None,:,:], location_ys[:,None,:,:]], dim=1) # Tensor[num_objects, num_filters+2, mask_height, mask_width]
# Create instance-aware mask head
px = centroids[:,0] # Tensor[num_objects]
py = centroids[:,1] # Tensor[num_objects]
weights1 = ctr_logits[:self.conv1_w, py, px].view(self.num_filters, self.num_filters+2, num_objects, 1)
weights2 = ctr_logits[self.conv1_w:self.conv2_w, py, px].view(self.num_filters, self.num_filters, num_objects, 1)
weights3 = ctr_logits[self.conv2_w:self.conv3_w, py, px].view(1, self.num_filters, num_objects, 1)
biases1 = ctr_logits[self.conv3_w:self.conv1_b, py, px]
biases2 = ctr_logits[self.conv1_b:self.conv2_b, py, px]
biases3 = ctr_logits[self.conv2_b:self.conv3_b, py, px]
# Apply mask head to mask features with relative coordinates
# NOTE: TensorRT7 does not support dynamic filter for conv2d. Use matmul instead.
# NOTE: matmul is used in the following way: [N, H*W, 1, C1] * [N, 1, C1, C2] = [N, H*W, 1, C2]
x = mask_logits.view(num_objects, self.num_filters+2, -1, 1) # Tensor[num_objects, num_filters+2, mask_height*mask_width, 1]
x = x.permute(0, 2, 3, 1) # Tensor[num_objects, mask_height*mask_width, 1, num_filters+2]
weights1 = weights1.permute(2, 3, 1, 0) # Tensor[num_objects, 1, num_filters+2, num_filters]
x = torch.matmul(x, weights1) # Tensor[num_objects, mask_height*mask_width, 1, num_filters]
biases1 = biases1[:, None, None, :].permute(3, 1, 2, 0) # Tensor[num_object, 1, 1, num_filters]
x = x + biases1
x = F.relu(x)
weights2 = weights2.permute(2, 3, 1, 0) # Tensor[num_objects, 1, num_filters, num_filters]
x = torch.matmul(x, weights2) # Tensor[num_objects, mask_height*mask_width, 1, num_filters]
biases2 = biases2[:, None, None, :].permute(3, 1, 2, 0) # Tensor[num_object, 1, 1, num_filters]
x = x + biases2
x = F.relu(x)
weights3 = weights3.permute(2, 3, 1, 0) # Tensor[num_objects, 1, num_filters, 1]
x = torch.matmul(x, weights3) # Tensor[num_objects, mask_height*mask_width, 1, 1]
biases3 = biases3[:, None, None, :].permute(3, 1, 2, 0) # Tensor[num_objects, 1, 1, 1]
x = x + biases3
x = x.view(num_objects, mask_height, mask_width) # Tensor[num_objects, mask_height, mask_width]
masks = torch.sigmoid(x)
return masks
def loss(self, cls_logits, ctr_logits, mask_logits, targets):
"""
Params:
cls_logits: Tensor[num_batch, num_classes, feature_height, feature_width]
ctr_logits: Tensor[num_batch, num_channels, feature_height, feature_width]
mask_logits: Tensor[num_batch, num_filters, mask_height, mask_width]
targets: List[List[Dict{'class_labels': int, 'segmentations': Tensor[image_height, image_width]}]]
Returns:
heatmap_loss: Tensor[]
mask_loss: Tensor[]
"""
num_batch, num_classes, feature_height, feature_width = cls_logits.shape
num_batch, num_filters, mask_height, mask_width = mask_logits.shape
dtype = cls_logits.dtype
device = cls_logits.device
# Assign each GT mask to one point in feature map, then calculate loss
heatmap_losses = []
mask_losses = []
for i in range(num_batch):
num_objects = len(targets[i])
# # Skip if no object in targets
# if len(targets[i]) == 0:
# heatmap_losses.append(torch.tensor(0, dtype=dtype, device=device))
# mask_losses.append(torch.tensor(0, dtype=dtype, device=device))
# continue
if num_objects > 0:
# Convert list of dicts to Tensors
gt_labels = torch.as_tensor([obj['class_labels'] for obj in targets[i]], dtype=torch.int64, device=device) # Tensor[num_objects]
gt_masks = torch.stack([torch.as_tensor(obj['segmentation'], dtype=dtype, device=device) for obj in targets[i]], dim=0) # Tensor[num_objects, image_height, image_width]
# Downsample GT masks
gt_masks_size_feature = F.interpolate(gt_masks[None,...], size=(feature_height, feature_width)) # Tensor[1, num_objects, feature_height, feature_width]
gt_masks_size_feature = gt_masks_size_feature[0,...] # Tensor[num_objects, feature_height, feature_width]
# Generate GT heatmap
gt_heatmap, gt_centroids = generate_heatmap(gt_labels, gt_masks_size_feature, num_classes) # Tensor[num_classes, feature_height, feature_width], Tensor[num_objects, (x, y)]
# Generate mask for each object
masks = self.generate_mask(ctr_logits[i], mask_logits[i], gt_centroids) # Tensor[num_objects, mask_height, mask_width]
# Calculate loss
heatmap_loss = heatmap_focal_loss(cls_logits[i].sigmoid(), gt_heatmap, alpha=2, gamma=4) / num_objects
gt_masks_size_mask = F.adaptive_avg_pool2d(gt_masks[None,...], output_size=(mask_height, mask_width))
mask_loss = dice_loss(masks, gt_masks_size_mask)
else:
# No GT objects
gt_heatmap = torch.zeros_like(cls_logits[i])
heatmap_loss = heatmap_focal_loss(cls_logits[i].sigmoid(), gt_heatmap, alpha=2, gamma=4)
mask_loss = torch.tensor(0, dtype=dtype, device=device, requires_grad=True)
heatmap_losses.append(heatmap_loss)
mask_losses.append(mask_loss)
heatmap_loss =torch.stack(heatmap_losses, dim=0).mean()
mask_loss = torch.stack(mask_losses).mean()
return heatmap_loss, mask_loss
| 48.431078
| 188
| 0.644328
| 2,635
| 19,324
| 4.503605
| 0.113472
| 0.039437
| 0.045841
| 0.038426
| 0.531558
| 0.469369
| 0.386534
| 0.350805
| 0.29561
| 0.262914
| 0
| 0.035702
| 0.23758
| 19,324
| 398
| 189
| 48.552764
| 0.769769
| 0.247413
| 0
| 0.229885
| 0
| 0
| 0.005142
| 0
| 0
| 0
| 0
| 0
| 0.003831
| 1
| 0.030651
| false
| 0
| 0.038314
| 0
| 0.099617
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3add20f0867b88372260c1d964a87cf06a4f4b64
| 1,621
|
py
|
Python
|
verification/conf.py
|
HosseinMohammadii/django-rest-verification
|
0e0d4633f4420896fbfa0005f9df49eb4ed68f88
|
[
"MIT"
] | 1
|
2020-10-23T08:20:59.000Z
|
2020-10-23T08:20:59.000Z
|
verification/conf.py
|
HosseinMohammadii/django-rest-verification
|
0e0d4633f4420896fbfa0005f9df49eb4ed68f88
|
[
"MIT"
] | null | null | null |
verification/conf.py
|
HosseinMohammadii/django-rest-verification
|
0e0d4633f4420896fbfa0005f9df49eb4ed68f88
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.utils import timezone
from .base import default_config, numeric, lowercase_alphabetic, uppercase_alphabetic
config = default_config
config.update(settings.VERIFICATION)
VERIFICATION_CODE_FIELD = 'verification_code'
VERIFICATIONS = config.get('VERIFICATIONS')
CODE_LENGTH = config.get('CODE_LENGTH')
LIFE_TIME_SECOND = config.get('LIFE_TIME_SECOND')
LIFE_TIME_MINUTE = config.get('LIFE_TIME_MINUTE')
LIFE_TIME_HOUR = config.get('LIFE_TIME_HOUR')
LIFE_TIME_DAY = config.get('LIFE_TIME_DAY')
LIFE_TIME_PENALTY_SECOND = config.get('LIFE_TIME_PENALTY_SECOND')
CODE_LIEF_TIME = timezone.timedelta(
seconds=LIFE_TIME_SECOND + LIFE_TIME_PENALTY_SECOND,
minutes=LIFE_TIME_MINUTE,
hours=LIFE_TIME_HOUR,
days=LIFE_TIME_DAY,
)
ALLOWED_CODE_LETTERS = ''
if config.get('CONTAINS_NUMERIC'):
ALLOWED_CODE_LETTERS += numeric
if config.get('CONTAINS_UPPER_ALPHABETIC'):
ALLOWED_CODE_LETTERS += uppercase_alphabetic
if config.get('CONTAINS_LOWER_ALPHABETIC'):
ALLOWED_CODE_LETTERS += lowercase_alphabetic
if len(ALLOWED_CODE_LETTERS) == 0:
raise Exception("No letters are allowed for code generation")
VERIFICATIONS_DICT = {}
VERIFICATIONS_TYPES = []
VERIFICATIONS_USER_MODEL_FIELDS = []
for verification in VERIFICATIONS:
VERIFICATIONS_TYPES.append(verification.get('type'))
VERIFICATIONS_USER_MODEL_FIELDS.append(verification.get('user_model_field'))
VERIFICATIONS_DICT[verification.get('type')] = verification
def get_user_model_field(verification_type):
return VERIFICATIONS_DICT.get(verification_type, None).get('user_model_field')
| 33.770833
| 85
| 0.805676
| 210
| 1,621
| 5.833333
| 0.280952
| 0.097959
| 0.053061
| 0.069388
| 0.066939
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000686
| 0.101172
| 1,621
| 47
| 86
| 34.489362
| 0.840082
| 0
| 0
| 0
| 0
| 0
| 0.167901
| 0.045679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.081081
| 0.027027
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3adedc706a3e7e55655cf8dce7e7f4c1476ebd61
| 13,078
|
py
|
Python
|
nxbt/controller/server.py
|
Yamakaky/nxbt
|
0fe9acaaf0fac8014f9aaee53943711a106b572c
|
[
"MIT"
] | null | null | null |
nxbt/controller/server.py
|
Yamakaky/nxbt
|
0fe9acaaf0fac8014f9aaee53943711a106b572c
|
[
"MIT"
] | null | null | null |
nxbt/controller/server.py
|
Yamakaky/nxbt
|
0fe9acaaf0fac8014f9aaee53943711a106b572c
|
[
"MIT"
] | null | null | null |
import socket
import fcntl
import os
import time
import queue
import logging
import traceback
from .controller import Controller, ControllerTypes
from ..bluez import BlueZ
from .protocol import ControllerProtocol
from .input import InputParser
from .utils import format_msg_controller, format_msg_switch
class ControllerServer():
def __init__(self, controller_type, adapter_path="/org/bluez/hci0",
state=None, task_queue=None, lock=None, colour_body=None,
colour_buttons=None):
self.logger = logging.getLogger('nxbt')
# Cache logging level to increase performance on checks
self.logger_level = self.logger.level
if state:
self.state = state
else:
self.state = {
"state": "",
"finished_macros": [],
"errors": None,
"direct_input": None
}
self.task_queue = task_queue
self.controller_type = controller_type
self.colour_body = colour_body
self.colour_buttons = colour_buttons
if lock:
self.lock = lock
self.reconnect_counter = 0
# Intializing Bluetooth
self.bt = BlueZ(adapter_path=adapter_path)
self.controller = Controller(self.bt, self.controller_type)
self.protocol = ControllerProtocol(
self.controller_type,
self.bt.address,
colour_body=self.colour_body,
colour_buttons=self.colour_buttons)
self.input = InputParser(self.protocol)
self.slow_input_frequency = False
def run(self, reconnect_address=None):
"""Runs the mainloop of the controller server.
:param reconnect_address: The Bluetooth MAC address of a
previously connected to Nintendo Switch, defaults to None
:type reconnect_address: string or list, optional
"""
self.state["state"] = "initializing"
try:
# If we have a lock, prevent other controllers
# from initializing at the same time and saturating the DBus,
# potentially causing a kernel panic.
if self.lock:
self.lock.acquire()
try:
self.controller.setup()
if reconnect_address:
itr, ctrl = self.reconnect(reconnect_address)
else:
itr, ctrl = self.connect()
finally:
if self.lock:
self.lock.release()
self.switch_address = itr.getsockname()[0]
self.state["state"] = "connected"
self.mainloop(itr, ctrl)
except KeyboardInterrupt:
pass
except Exception:
self.state["state"] = "crashed"
self.state["errors"] = traceback.format_exc()
return self.state
def mainloop(self, itr, ctrl):
# Mainloop
while True:
# Start timing the command processing
timer_start = time.perf_counter()
# Attempt to get output from Switch
try:
reply = itr.recv(50)
if self.logger_level <= logging.DEBUG and len(reply) > 40:
self.logger.debug(format_msg_switch(reply))
except BlockingIOError:
reply = None
# Getting any inputs from the task queue
if self.task_queue:
try:
while True:
msg = self.task_queue.get_nowait()
if msg and msg["type"] == "macro":
self.input.buffer_macro(
msg["macro"], msg["macro_id"])
elif msg and msg["type"] == "stop":
self.input.stop_macro(
msg["macro_id"], state=self.state)
elif msg and msg["type"] == "clear":
self.input.clear_macros()
except queue.Empty:
pass
# Set Direct Input
if self.state["direct_input"]:
self.input.set_controller_input(self.state["direct_input"])
self.protocol.process_commands(reply)
self.input.set_protocol_input(state=self.state)
msg = self.protocol.get_report()
if self.logger_level <= logging.DEBUG and reply and len(reply) > 45:
self.logger.debug(format_msg_controller(msg))
try:
itr.sendall(msg)
except BlockingIOError:
continue
except OSError as e:
# Attempt to reconnect to the Switch
itr, ctrl = self.save_connection(e)
# Figure out how long it took to process commands
timer_end = time.perf_counter()
elapsed_time = (timer_end - timer_start)
if self.slow_input_frequency:
# Check if we can switch out of slow frequency input
if self.input.exited_grip_order_menu:
self.slow_input_frequency = False
if elapsed_time < 1/15:
time.sleep(1/15 - elapsed_time)
else:
# Respond at 120Hz for Pro Controller
# or 60Hz for Joy-Cons.
# Sleep timers are compensated with the elapsed command
# processing time.
if self.controller_type == ControllerTypes.PRO_CONTROLLER:
if elapsed_time < 1/120:
time.sleep(1/120 - elapsed_time)
else:
if elapsed_time < 1/60:
time.sleep(1/60 - elapsed_time)
def save_connection(self, error, state=None):
while self.reconnect_counter < 2:
try:
self.logger.debug("Attempting to reconnect")
# Reinitialize the protocol
self.protocol = ControllerProtocol(
self.controller_type,
self.bt.address,
colour_body=self.colour_body,
colour_buttons=self.colour_buttons)
if self.lock:
self.lock.acquire()
try:
itr, ctrl = self.reconnect(self.switch_address)
return itr, ctrl
finally:
if self.lock:
self.lock.release()
except OSError:
self.reconnect_counter += 1
self.logger.exception(error)
time.sleep(0.5)
# If we can't reconnect, transition to attempting
# to connect to any Switch.
self.logger.debug("Connecting to any Switch")
self.reconnect_counter = 0
# Reinitialize the protocol
self.protocol = ControllerProtocol(
self.controller_type,
self.bt.address,
colour_body=self.colour_body,
colour_buttons=self.colour_buttons)
self.input.reassign_protocol(self.protocol)
# Since we were forced to attempt a reconnection
# we need to press the L/SL and R/SR buttons before
# we can proceed with any input.
if self.controller_type == ControllerTypes.PRO_CONTROLLER:
self.input.current_macro_commands = "L R 0.0s".strip(" ").split(" ")
elif self.controller_type == ControllerTypes.JOYCON_L:
self.input.current_macro_commands = "JCL_SL JCL_SR 0.0s".strip(" ").split(" ")
elif self.controller_type == ControllerTypes.JOYCON_R:
self.input.current_macro_commands = "JCR_SL JCR_SR 0.0s".strip(" ").split(" ")
if self.lock:
self.lock.acquire()
try:
itr, ctrl = self.connect()
finally:
if self.lock:
self.lock.release()
self.state["state"] = "connected"
self.switch_address = itr.getsockname()[0]
return itr, ctrl
def connect(self):
"""Configures as a specified controller, pairs with a Nintendo Switch,
and creates/accepts sockets for communication with the Switch.
"""
self.state["state"] = "connecting"
# Creating control and interrupt sockets
s_ctrl = socket.socket(
family=socket.AF_BLUETOOTH,
type=socket.SOCK_SEQPACKET,
proto=socket.BTPROTO_L2CAP)
s_itr = socket.socket(
family=socket.AF_BLUETOOTH,
type=socket.SOCK_SEQPACKET,
proto=socket.BTPROTO_L2CAP)
# Setting up HID interrupt/control sockets
try:
s_ctrl.bind((self.bt.address, 17))
s_itr.bind((self.bt.address, 19))
except OSError:
s_ctrl.bind((socket.BDADDR_ANY, 17))
s_itr.bind((socket.BDADDR_ANY, 19))
s_itr.listen(1)
s_ctrl.listen(1)
self.bt.set_discoverable(True)
ctrl, ctrl_address = s_ctrl.accept()
itr, itr_address = s_itr.accept()
# Send an empty input report to the Switch to prompt a reply
self.protocol.process_commands(None)
msg = self.protocol.get_report()
itr.sendall(msg)
# Setting interrupt connection as non-blocking.
# In this case, non-blocking means it throws a "BlockingIOError"
# for sending and receiving, instead of blocking.
fcntl.fcntl(itr, fcntl.F_SETFL, os.O_NONBLOCK)
# Mainloop
while True:
# Attempt to get output from Switch
try:
reply = itr.recv(50)
if self.logger_level <= logging.DEBUG and len(reply) > 40:
self.logger.debug(format_msg_switch(reply))
except BlockingIOError:
reply = None
self.protocol.process_commands(reply)
msg = self.protocol.get_report()
if self.logger_level <= logging.DEBUG and reply:
self.logger.debug(format_msg_controller(msg))
try:
itr.sendall(msg)
except BlockingIOError:
continue
# Exit pairing loop when player lights have been set and
# vibration has been enabled
if (reply and len(reply) > 45 and
self.protocol.vibration_enabled and self.protocol.player_number):
break
# Switch responds to packets slower during pairing
# Pairing cycle responds optimally on a 15Hz loop
time.sleep(1/15)
self.slow_input_frequency = True
self.input.exited_grip_order_menu = False
return itr, ctrl
def reconnect(self, reconnect_address):
"""Attempts to reconnect with a Switch at the given address.
:param reconnect_address: The Bluetooth MAC address of the Switch
:type reconnect_address: string or list
"""
def recreate_sockets():
# Creating control and interrupt sockets
ctrl = socket.socket(
family=socket.AF_BLUETOOTH,
type=socket.SOCK_SEQPACKET,
proto=socket.BTPROTO_L2CAP)
itr = socket.socket(
family=socket.AF_BLUETOOTH,
type=socket.SOCK_SEQPACKET,
proto=socket.BTPROTO_L2CAP)
return itr, ctrl
self.state["state"] = "reconnecting"
itr = None
ctrl = None
if type(reconnect_address) == list:
for address in reconnect_address:
test_itr, test_ctrl = recreate_sockets()
try:
# Setting up HID interrupt/control sockets
test_ctrl.connect((address, 17))
test_itr.connect((address, 19))
itr = test_itr
ctrl = test_ctrl
except OSError:
test_itr.close()
test_ctrl.close()
pass
elif type(reconnect_address) == str:
test_itr, test_ctrl = recreate_sockets()
# Setting up HID interrupt/control sockets
test_ctrl.connect((reconnect_address, 17))
test_itr.connect((reconnect_address, 19))
itr = test_itr
ctrl = test_ctrl
if not itr and not ctrl:
raise OSError("Unable to reconnect to sockets at the given address(es)",
reconnect_address)
fcntl.fcntl(itr, fcntl.F_SETFL, os.O_NONBLOCK)
# Send an empty input report to the Switch to prompt a reply
self.protocol.process_commands(None)
msg = self.protocol.get_report()
itr.sendall(msg)
# Setting interrupt connection as non-blocking
# In this case, non-blocking means it throws a "BlockingIOError"
# for sending and receiving, instead of blocking
fcntl.fcntl(itr, fcntl.F_SETFL, os.O_NONBLOCK)
return itr, ctrl
| 34.415789
| 90
| 0.558878
| 1,417
| 13,078
| 5.018349
| 0.201129
| 0.0135
| 0.025313
| 0.011813
| 0.476586
| 0.407538
| 0.368865
| 0.346927
| 0.325552
| 0.306567
| 0
| 0.009512
| 0.364964
| 13,078
| 379
| 91
| 34.506596
| 0.846719
| 0.170362
| 0
| 0.485944
| 0
| 0
| 0.034991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028112
| false
| 0.012048
| 0.048193
| 0
| 0.104418
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3adefdd3fa592b7c580b21e49f739074251c1d6b
| 391
|
py
|
Python
|
mfi_customization/mfi/patch/set_first_responded_on_issue.py
|
anuradha-88/mfi_customization
|
eb19ed43d0178b461f1d9914d2f7b6b55c9d030c
|
[
"MIT"
] | null | null | null |
mfi_customization/mfi/patch/set_first_responded_on_issue.py
|
anuradha-88/mfi_customization
|
eb19ed43d0178b461f1d9914d2f7b6b55c9d030c
|
[
"MIT"
] | null | null | null |
mfi_customization/mfi/patch/set_first_responded_on_issue.py
|
anuradha-88/mfi_customization
|
eb19ed43d0178b461f1d9914d2f7b6b55c9d030c
|
[
"MIT"
] | null | null | null |
import frappe
from datetime import datetime
# bench execute mfi_customization.mfi.patch.set_first_responded_on_issue.execute
def execute():
for d in frappe.get_all("Issue"):
for tk in frappe.get_all("Task",{"issue": d.name}, ['attended_date_time', 'status']):
if tk.attended_date_time:
frappe.db.set_value("Issue", {"name": d.name},"first_responded_on",tk.attended_date_time)
| 35.545455
| 93
| 0.749361
| 61
| 391
| 4.540984
| 0.491803
| 0.129964
| 0.173285
| 0.101083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109974
| 391
| 10
| 94
| 39.1
| 0.795977
| 0.199488
| 0
| 0
| 0
| 0
| 0.209003
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3adf530cc79f1ef10e5ff6f32271340c43c7203b
| 3,769
|
py
|
Python
|
main.py
|
aHeraud/cgp-tetris
|
a3483b279bf0bc53edcb3a871873dd576a33c01c
|
[
"MIT"
] | null | null | null |
main.py
|
aHeraud/cgp-tetris
|
a3483b279bf0bc53edcb3a871873dd576a33c01c
|
[
"MIT"
] | null | null | null |
main.py
|
aHeraud/cgp-tetris
|
a3483b279bf0bc53edcb3a871873dd576a33c01c
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3
import sys
from multiprocessing import Pool
from timeit import default_timer as timer
from config import Config
from cgp.functionset import FunctionSet
from cgp.genome import Genome
import numpy as np
import numpy.random
from random import randint
from tetris_learning_environment import Environment
from tetris_learning_environment import Key
import tetris_learning_environment.gym as gym
from cgp import functional_graph
import signal
import time
FRAME_SKIP = 120
DOWNSAMPLE = 8
PROCESSES = 3
CONFIG = Config()
FUNCTION_SET = FunctionSet()
def worker_init(rom_path):
global env
env = gym.TetrisEnvironment(rom_path, frame_skip=FRAME_SKIP)
def run_episode(genome):
pixels = env.reset()
done = False
rewardSum = 0
while not done:
grayscale = np.sum(pixels, axis = 2) / 3.0 / 255.0 # constrained to range [0, 1]
#rPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,0] +
#gPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,1] / 255.0
#bPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,2] / 255.0
output = genome.evaluate(grayscale)
action = np.argmax(output)
pixels, reward, done, info = env.step(action)
rewardSum += reward + 1
return (genome, rewardSum)
def render(env, genome):
pixels = env.reset()
import pygame
pygame.init()
size = (pixels.shape[1], pixels.shape[0])
display = pygame.display.set_mode(size)
pygame.display.set_caption('Tetris')
carryOn = True
clock = pygame.time.Clock()
done = False
while not done and carryOn:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
carryOn = False
pygame.surfarray.blit_array(display, np.flip(np.rot90(pixels), axis=0))
pygame.display.flip()
rPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,0] / 255.0
gPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,1] / 255.0
bPixels = pixels[::DOWNSAMPLE,::DOWNSAMPLE,2] / 255.0
output = genome.evaluate(rPixels, gPixels, bPixels)
action = np.argmax(output)
pixels, reward, done, info = env.step(action)
clock.tick(60)
pygame.quit()
def main():
if len(sys.argv) < 2:
print("Missing rom path argument.")
return
tetris_rom_path = sys.argv[1]
bestScore = 0
global elite
elite = Genome(CONFIG, FUNCTION_SET)
print('Starting CGP for ' + str(CONFIG.generations) + ' generations...')
with Pool(processes=PROCESSES, initializer=worker_init, initargs=(tetris_rom_path,)) as pool:
for generation in range(CONFIG.generations):
start = timer()
children = [elite.get_child() for _ in range(CONFIG.childrenPerGeneration)]
results = [pool.apply_async(run_episode, args=(child,)) for child in children]
results = [result.get() for result in results]
for (genome, score) in results:
if score >= bestScore:
bestScore = score
elite = genome
elite.save_to_file('elite.out')
end = timer()
timeElapsed = end - start
estimatedTimeSec = timeElapsed * (CONFIG.generations + 1 - generation)
estimatedTimeMin = estimatedTimeSec / 60.0
print('Generation ' + str(generation + 1) + ' of ' + str(CONFIG.generations) + ' complete, current best score = ', bestScore)
print('Est. minutes remaining: ' + str(estimatedTimeMin))
print("FINISHED")
print('Best Score: ', bestScore)
env = gym.TetrisEnvironment(tetris_rom_path, frame_skip=FRAME_SKIP)
while True:
render(env, elite)
if __name__ == '__main__':
main()
| 30.395161
| 137
| 0.644733
| 450
| 3,769
| 5.304444
| 0.328889
| 0.017595
| 0.065354
| 0.024298
| 0.204022
| 0.146628
| 0.125681
| 0.125681
| 0.125681
| 0.125681
| 0
| 0.020487
| 0.248872
| 3,769
| 123
| 138
| 30.642276
| 0.822678
| 0.065004
| 0
| 0.087912
| 0
| 0
| 0.048905
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043956
| false
| 0
| 0.175824
| 0
| 0.241758
| 0.065934
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ae33afc12f8987d0c85ee05a95ac1ec3a4be0c6
| 3,726
|
py
|
Python
|
image_classifier/model_lib.py
|
JMarcan/computer_vision_perception
|
a5aa7bfb316e7b45596d8c5916638f5ce2b6d654
|
[
"MIT"
] | null | null | null |
image_classifier/model_lib.py
|
JMarcan/computer_vision_perception
|
a5aa7bfb316e7b45596d8c5916638f5ce2b6d654
|
[
"MIT"
] | null | null | null |
image_classifier/model_lib.py
|
JMarcan/computer_vision_perception
|
a5aa7bfb316e7b45596d8c5916638f5ce2b6d654
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Function that loads a checkpoint and rebuilds the model
import torch
from torch import nn
from collections import OrderedDict
from torchvision import datasets, transforms, models
def save_checkpoint(model, checkpoint_path, output_categories):
'''
Save the trained deep learning model
Args:
model: trained deep learning model to be saved
checkpoint_path(str): file path where model will be saved
output_categories(int): number of output categories recognized by the model
Returns:
None
'''
model.cpu()
torch.save({'arch': 'vgg16',
'state_dict': model.state_dict(),
'class_to_idx': model.class_to_idx,
'output_categories': output_categories
},checkpoint_path)
def load_checkpoint(checkpoint_path, device='cuda'):
'''
Loads trained deep learning model
Args:
checkpoint_path(str): file path where model will be saved
Returns:
model: loaded deep learning model
'''
check = torch.load(checkpoint_path, map_location=device)
if check['arch'] == 'vgg16':
model = models.vgg16(pretrained = True)
elif check['arch'] == 'vgg13':
model = models.vgg13(pretrained = True)
else:
print("Error: LoadCheckpoint - Model not recognized")
return 0
output_categories = 2
try:
if check['output_categories'] >= 2:
output_categories = check['output_categories']
else:
print("Error: LoadCheckpoint - Saved model output categories has invalid value ({0}). Value needs to be 2 or higher.".format(check['output_categories']))
return 0
except Exception as e: # when ['output_categories'] is not part of save model
print("Error: LoadCheckpoint - Saved model does not contain information about output categories: {0}".format(e))
return 0
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = check['class_to_idx']
model.classifier = load_classifier(model, output_categories)
model.load_state_dict(check['state_dict'])
return model
def load_classifier(model, output_categories):
'''
Loads the classifier that we will train
Args:
model: deep learning model for which we create the classifier
output_categories(int): number of output categories
recognized by the model
Returns:
classifier: loaded classifier for a given model
'''
'''
# VGG16 classifier structure:
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace)
(2): Dropout(p=0.5)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace)
(5): Dropout(p=0.5)
(6): Linear(in_features=4096, out_features=1000, bias=True)
'''
#Classifier parameters
classifier_input = model.classifier[0].in_features #input layer of vgg16- has 25088
classifier_hidden_units = 4096 # 4096 default model value
classifier = nn.Sequential(
nn.Linear(classifier_input, classifier_hidden_units, bias=True),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(classifier_hidden_units, output_categories),
nn.LogSoftmax(dim=1)
# Log softmax activation function ensures that sum of all output probabilities is 1 \
# - With that we know the confidence the model has for a given class between 0-100%
)
return classifier
| 31.846154
| 165
| 0.630972
| 442
| 3,726
| 5.196833
| 0.319005
| 0.125381
| 0.037005
| 0.031345
| 0.227688
| 0.127993
| 0.101001
| 0.101001
| 0.101001
| 0.101001
| 0
| 0.030246
| 0.281267
| 3,726
| 116
| 166
| 32.12069
| 0.827483
| 0.285829
| 0
| 0.106383
| 0
| 0.021277
| 0.184798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.085106
| 0
| 0.255319
| 0.06383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ae4b2634727a7a3c18f2473fe0c51212182326b
| 7,689
|
py
|
Python
|
pytorch/utils/utils.py
|
XinyiYS/CollaborativeFairFederatedLearning
|
1372f74230b366a41243f809ce0fc15586cd40fe
|
[
"MIT"
] | 25
|
2020-07-29T03:46:12.000Z
|
2022-03-23T07:15:53.000Z
|
pytorch/utils/utils.py
|
lingjuanlv/CollaborativeFairFederatedLearning
|
1372f74230b366a41243f809ce0fc15586cd40fe
|
[
"MIT"
] | null | null | null |
pytorch/utils/utils.py
|
lingjuanlv/CollaborativeFairFederatedLearning
|
1372f74230b366a41243f809ce0fc15586cd40fe
|
[
"MIT"
] | 7
|
2020-09-15T19:06:27.000Z
|
2022-02-22T06:51:52.000Z
|
import copy
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchtext.data import Batch
def averge_models(models, device=None):
final_model = copy.deepcopy(models[0])
if device:
models = [model.to(device) for model in models]
final_model = final_model.to(device)
averaged_parameters = aggregate_gradient_updates([list(model.parameters()) for model in models], mode='mean')
for param, avg_param in zip(final_model.parameters(), averaged_parameters):
param.data = avg_param.data
return final_model
def compute_grad_update(old_model, new_model, device=None):
# maybe later to implement on selected layers/parameters
if device:
old_model, new_model = old_model.to(device), new_model.to(device)
return [(new_param.data - old_param.data) for old_param, new_param in zip(old_model.parameters(), new_model.parameters())]
def add_gradient_updates(grad_update_1, grad_update_2, weight = 1.0):
assert len(grad_update_1) == len(
grad_update_2), "Lengths of the two grad_updates not equal"
for param_1, param_2 in zip(grad_update_1, grad_update_2):
param_1.data += param_2.data * weight
def aggregate_gradient_updates(grad_updates, R, device=None, mode='sum', credits=None, shard_sizes=None):
if grad_updates:
len_first = len(grad_updates[0])
assert all(len(i) == len_first for i in grad_updates), "Different shapes of parameters. Cannot aggregate."
else:
return
grad_updates_ = [copy.deepcopy(grad_update) for i, grad_update in enumerate(grad_updates) if i in R]
if device:
for i, grad_update in enumerate(grad_updates_):
grad_updates_[i] = [param.to(device) for param in grad_update]
if credits is not None:
credits = [credit for i, credit in enumerate(credits) if i in R]
if shard_sizes is not None:
shard_sizes = [shard_size for i,shard_size in enumerate(shard_sizes) if i in R]
aggregated_gradient_updates = []
if mode=='mean':
# default mean is FL-avg: weighted avg according to nk/n
if shard_sizes is None:
shard_sizes = torch.ones(len(grad_updates))
for i, (grad_update, shard_size) in enumerate(zip(grad_updates_, shard_sizes)):
grad_updates_[i] = [(shard_size * update) for update in grad_update]
for i in range(len(grad_updates_[0])):
aggregated_gradient_updates.append(torch.stack(
[grad_update[i] for grad_update in grad_updates_]).mean(dim=0))
elif mode =='sum':
for i in range(len(grad_updates_[0])):
aggregated_gradient_updates.append(torch.stack(
[grad_update[i] for grad_update in grad_updates_]).sum(dim=0))
elif mode == 'credit-sum':
# first changes the grad_updates altogether
for i, (grad_update, credit) in enumerate(zip(grad_updates_, credits)):
grad_updates_[i] = [(credit * update) for update in grad_update]
# then compute the credit weight sum
for i in range(len(grad_updates_[0])):
aggregated_gradient_updates.append(torch.stack(
[grad_update[i] for grad_update in grad_updates_]).sum(dim=0))
return aggregated_gradient_updates
def add_update_to_model(model, update, weight=1.0, device=None):
if not update: return model
if device:
model = model.to(device)
update = [param.to(device) for param in update]
for param_model, param_update in zip(model.parameters(), update):
param_model.data += weight * param_update.data
return model
def compare_models(model1, model2):
for p1, p2 in zip(model1.parameters(), model2.parameters()):
if p1.data.ne(p2.data).sum() > 0:
return False # two models have different weights
return True
def flatten(grad_update):
return torch.cat([update.data.view(-1) for update in grad_update])
def unflatten(flattened, normal_shape):
grad_update = []
for param in normal_shape:
n_params = len(param.view(-1))
grad_update.append( torch.as_tensor(flattened[:n_params]).reshape(param.size()) )
flattened = flattened[n_params:]
return grad_update
def evaluate(model, eval_loader, device, loss_fn=None, verbose=True):
model.eval()
model = model.to(device)
correct = 0
total = 0
with torch.no_grad():
for i, batch in enumerate(eval_loader):
if isinstance(batch, Batch):
batch_data, batch_target = batch.text, batch.label
# batch_data.data.t_(), batch_target.data.sub_(1) # batch first, index align
batch_data = batch_data.permute(1, 0)
else:
batch_data, batch_target = batch[0], batch[1]
batch_data, batch_target = batch_data.to(device), batch_target.to(device)
outputs = model(batch_data)
if loss_fn:
loss = loss_fn(outputs, batch_target)
else:
loss = None
correct += (torch.max(outputs, 1)[1].view(batch_target.size()).data == batch_target.data).sum()
total += len(batch_target)
accuracy = correct.float() / total
if verbose:
print("Loss: {:.6f}. Accuracy: {:.4%}.".format(loss, accuracy))
return loss, accuracy
'''
def one_on_one_evaluate(participants, federated_model, grad_updates, unfiltererd_grad_updates, eval_loader, device):
val_accs = []
for i, participant in enumerate(participants):
if participant.theta == 1:
model_to_eval = copy.deepcopy(participant.model)
add_update_to_model(model_to_eval, unfiltererd_grad_updates[i], device=device)
else:
model_to_eval = copy.deepcopy(federated_model)
add_update_to_model(model_to_eval, grad_updates[i], device=device)
_, val_acc = evaluate(model_to_eval, eval_loader, device, verbose=False)
del model_to_eval
val_accs.append(val_acc)
return val_accs
def leave_one_out_evaluate(federated_model, grad_updates, eval_loader, device):
loo_model = copy.deepcopy(federated_model)
loo_losses, loo_val_accs = [], []
for grad_update in grad_updates:
loo_model = add_update_to_model(loo_model, grad_update, weight = -1.0, device=device)
loss, val_acc = evaluate(loo_model, eval_loader, device, verbose=False)
loo_losses.append(loss)
loo_val_accs.append(val_acc)
loo_model = add_update_to_model(loo_model, grad_update, weight = 1.0, device=device)
# scalar - 1D torch tensor subtraction -> 1D torch tensor
# marginal_contributions = curr_val_acc - torch.tensor(loo_val_accs)
return loo_val_accs
'''
import numpy as np
np.random.seed(1111)
def random_split(sample_indices, m_bins, equal=True):
sample_indices = np.asarray(sample_indices)
if equal:
indices_list = np.array_split(sample_indices, m_bins)
else:
split_points = np.random.choice(
n_samples - 2, m_bins - 1, replace=False) + 1
split_points.sort()
indices_list = np.split(sample_indices, split_points)
return indices_list
import random
from itertools import permutations
def compute_shapley(grad_updates, federated_model, test_loader, device, Max_num_sequences=50):
num_participants = len(grad_updates)
all_sequences = list(permutations(range(num_participants)))
if len(all_sequences) > Max_num_sequences:
random.shuffle(all_sequences)
all_sequences = all_sequences[:Max_num_sequences]
test_loss_prev, test_acc_prev = evaluate(federated_model, test_loader, device, verbose=False)
prev_contribution = test_acc_prev.data
marginal_contributions = torch.zeros((num_participants))
for sequence in all_sequences:
running_model = copy.deepcopy(federated_model)
curr_contributions = []
for participant_id in sequence:
running_model = add_update_to_model(running_model, grad_updates[participant_id])
test_loss, test_acc = evaluate(running_model, test_loader, device, verbose=False)
contribution = test_acc.data
if not curr_contributions:
marginal_contributions[participant_id] += contribution - prev_contribution
else:
marginal_contributions[participant_id] += contribution - curr_contributions[-1]
curr_contributions.append(contribution)
return marginal_contributions / len(all_sequences)
| 35.109589
| 123
| 0.757836
| 1,158
| 7,689
| 4.764249
| 0.163212
| 0.059815
| 0.015226
| 0.017401
| 0.287656
| 0.150082
| 0.107305
| 0.107305
| 0.082654
| 0.082654
| 0
| 0.009201
| 0.137729
| 7,689
| 219
| 124
| 35.109589
| 0.822926
| 0.038367
| 0
| 0.136691
| 0
| 0
| 0.023716
| 0
| 0
| 0
| 0
| 0
| 0.014388
| 1
| 0.079137
| false
| 0
| 0.057554
| 0.007194
| 0.223022
| 0.007194
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aea02ea9b227434bbb8b250a3ff42f885040255
| 17,501
|
py
|
Python
|
Interface/Windows/WildernessTravelManagerWindow.py
|
Snackhole/SerpentRPG
|
9e5ae019893592a46dd7681daba56af8e8e29744
|
[
"MIT"
] | 1
|
2021-02-27T16:33:53.000Z
|
2021-02-27T16:33:53.000Z
|
Interface/Windows/WildernessTravelManagerWindow.py
|
Snackhole/SerpentRPG
|
9e5ae019893592a46dd7681daba56af8e8e29744
|
[
"MIT"
] | null | null | null |
Interface/Windows/WildernessTravelManagerWindow.py
|
Snackhole/SerpentRPG
|
9e5ae019893592a46dd7681daba56af8e8e29744
|
[
"MIT"
] | null | null | null |
import math
import os
from PyQt5 import QtCore
from PyQt5.QtWidgets import QGridLayout, QLabel, QPushButton, QFrame, QTextEdit, QInputDialog, QSizePolicy, QAction, QMessageBox
from Core.DieClock import DieClock
from Core.WildernessTravelManager import WildernessTravelManager
from Interface.Widgets.LineEditMouseWheelExtension import LineEditMouseWheelExtension
from Interface.Windows.Window import Window
from SaveAndLoad.SaveAndOpenMixin import SaveAndOpenMixin
class WildernessTravelManagerWindow(Window, SaveAndOpenMixin):
def __init__(self, ScriptName, AbsoluteDirectoryPath):
# Store Absolute Directory Path for SaveAndOpenMixin
self.AbsoluteDirectoryPath = AbsoluteDirectoryPath
# Initialize
super().__init__(ScriptName, AbsoluteDirectoryPath)
# Create Wilderness Travel Manager
self.WildernessTravelManager = WildernessTravelManager()
# Set Up Save and Open
self.SetUpSaveAndOpen(".wildtrvl", "Wilderness Travel Manager", (WildernessTravelManager, DieClock))
# Update Display
self.UpdateDisplay()
def CreateInterface(self):
# Styles
self.LabelStyle = "QLabel {font-size: 20pt;}"
self.LineEditStyle = "QLineEdit {font-size: 20pt;}"
self.LineEditStyleYellow = "QLineEdit {font-size: 20pt; color: goldenrod;}"
self.LineEditStyleRed = "QLineEdit {font-size: 20pt; color: red;}"
self.PoolAndClockButtonStyle = "QPushButton {font-size: 20pt;}"
# Button and Line Edit Size Policy
self.ButtonAndLineEditSizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
# Pool and Clock Width
self.PoolAndClockWidth = 160
# Travel Actions Label
self.TravelActionsLabel = QLabel("Travel Actions")
self.TravelActionsLabel.setStyleSheet(self.LabelStyle)
self.TravelActionsLabel.setAlignment(QtCore.Qt.AlignCenter)
# Travel Action Buttons
self.MoveButton = QPushButton("Move")
self.MoveButton.clicked.connect(self.Move)
self.MoveButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.ForageButton = QPushButton("Forage")
self.ForageButton.clicked.connect(self.Forage)
self.ForageButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.SpendDaysButton = QPushButton("Spend Days")
self.SpendDaysButton.clicked.connect(self.SpendDays)
self.SpendDaysButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
# Wilderness Clock Label
self.WildernessClockLabel = QLabel("Wilderness Clock")
self.WildernessClockLabel.setStyleSheet(self.LabelStyle)
self.WildernessClockLabel.setAlignment(QtCore.Qt.AlignCenter)
# Wilderness Clock Current Value Line Edit
self.WildernessClockCurrentValueLineEdit = LineEditMouseWheelExtension(lambda event: self.ModifyWildernessClockCurrentValue(1 if event.angleDelta().y() > 0 else -1))
self.WildernessClockCurrentValueLineEdit.setReadOnly(True)
self.WildernessClockCurrentValueLineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.WildernessClockCurrentValueLineEdit.setStyleSheet(self.LineEditStyle)
self.WildernessClockCurrentValueLineEdit.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockCurrentValueLineEdit.setFixedWidth(self.PoolAndClockWidth)
# Wilderness Clock Current Value Buttons
self.WildernessClockCurrentValueIncreaseButton = QPushButton("+")
self.WildernessClockCurrentValueIncreaseButton.clicked.connect(lambda: self.ModifyWildernessClockCurrentValue(1))
self.WildernessClockCurrentValueIncreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockCurrentValueIncreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
self.WildernessClockCurrentValueDecreaseButton = QPushButton("-")
self.WildernessClockCurrentValueDecreaseButton.clicked.connect(lambda: self.ModifyWildernessClockCurrentValue(-1))
self.WildernessClockCurrentValueDecreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockCurrentValueDecreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
# Wilderness Clock Divider Label
self.WildernessClockDividerLabel = QLabel("/")
self.WildernessClockDividerLabel.setStyleSheet(self.LabelStyle)
self.WildernessClockDividerLabel.setAlignment(QtCore.Qt.AlignCenter)
# Wilderness Clock Maximum Value Line Edit
self.WildernessClockMaximumValueLineEdit = LineEditMouseWheelExtension(lambda event: self.ModifyWildernessClockMaximumValue(1 if event.angleDelta().y() > 0 else -1))
self.WildernessClockMaximumValueLineEdit.setReadOnly(True)
self.WildernessClockMaximumValueLineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.WildernessClockMaximumValueLineEdit.setStyleSheet(self.LineEditStyle)
self.WildernessClockMaximumValueLineEdit.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockMaximumValueLineEdit.setFixedWidth(self.PoolAndClockWidth)
# Wilderness Clock Maximum Value Buttons
self.WildernessClockMaximumValueIncreaseButton = QPushButton("+")
self.WildernessClockMaximumValueIncreaseButton.clicked.connect(lambda: self.ModifyWildernessClockMaximumValue(1))
self.WildernessClockMaximumValueIncreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockMaximumValueIncreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
self.WildernessClockMaximumValueDecreaseButton = QPushButton("-")
self.WildernessClockMaximumValueDecreaseButton.clicked.connect(lambda: self.ModifyWildernessClockMaximumValue(-1))
self.WildernessClockMaximumValueDecreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockMaximumValueDecreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
# Wilderness Clock Threshold Label
self.WildernessClockThresholdLabel = QLabel("Threshold")
self.WildernessClockThresholdLabel.setAlignment(QtCore.Qt.AlignCenter)
# Wilderness Clock Threshold Line Edit
self.WildernessClockThresholdLineEdit = LineEditMouseWheelExtension(lambda event: self.ModifyWildernessClockThreshold(1 if event.angleDelta().y() > 0 else -1))
self.WildernessClockThresholdLineEdit.setReadOnly(True)
self.WildernessClockThresholdLineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.WildernessClockThresholdLineEdit.setStyleSheet(self.LineEditStyle)
self.WildernessClockThresholdLineEdit.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockThresholdLineEdit.setFixedWidth(self.PoolAndClockWidth)
# Wilderness Clock Threshold Buttons
self.WildernessClockThresholdIncreaseButton = QPushButton("+")
self.WildernessClockThresholdIncreaseButton.clicked.connect(lambda: self.ModifyWildernessClockThreshold(1))
self.WildernessClockThresholdIncreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockThresholdIncreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
self.WildernessClockThresholdDecreaseButton = QPushButton("-")
self.WildernessClockThresholdDecreaseButton.clicked.connect(lambda: self.ModifyWildernessClockThreshold(-1))
self.WildernessClockThresholdDecreaseButton.setSizePolicy(self.ButtonAndLineEditSizePolicy)
self.WildernessClockThresholdDecreaseButton.setStyleSheet(self.PoolAndClockButtonStyle)
# Wilderness Log Label
self.WildernessLogLabel = QLabel("Wilderness Log")
self.WildernessLogLabel.setStyleSheet(self.LabelStyle)
self.WildernessLogLabel.setAlignment(QtCore.Qt.AlignCenter)
# Wilderness Log Text Edit
self.WildernessLogTextEdit = QTextEdit()
self.WildernessLogTextEdit.setReadOnly(True)
# Create Layout
self.Layout = QGridLayout()
# Travel Action Widgets in Layout
self.TravelActionsFrame = QFrame()
self.TravelActionsFrame.setFrameStyle(QFrame.Panel | QFrame.Plain)
self.TravelActionsLayout = QGridLayout()
self.TravelActionsLayout.addWidget(self.TravelActionsLabel, 0, 0)
self.TravelActionsLayout.addWidget(self.MoveButton, 1, 0)
self.TravelActionsLayout.addWidget(self.ForageButton, 2, 0)
self.TravelActionsLayout.addWidget(self.SpendDaysButton, 3, 0)
for Row in range(1, 4):
self.TravelActionsLayout.setRowStretch(Row, 1)
self.TravelActionsFrame.setLayout(self.TravelActionsLayout)
self.Layout.addWidget(self.TravelActionsFrame, 0, 0)
# Add Wilderness Clock Widgets to Layout
self.WildernessClockFrame = QFrame()
self.WildernessClockFrame.setFrameStyle(QFrame.Panel | QFrame.Plain)
self.WildernessClockLayout = QGridLayout()
self.WildernessClockLayout.addWidget(self.WildernessClockLabel, 0, 0, 1, 3)
self.WildernessClockLayout.addWidget(self.WildernessClockCurrentValueIncreaseButton, 1, 0)
self.WildernessClockLayout.addWidget(self.WildernessClockCurrentValueLineEdit, 2, 0)
self.WildernessClockLayout.addWidget(self.WildernessClockCurrentValueDecreaseButton, 3, 0)
self.WildernessClockLayout.addWidget(self.WildernessClockDividerLabel, 2, 1)
self.WildernessClockLayout.addWidget(self.WildernessClockMaximumValueIncreaseButton, 1, 2)
self.WildernessClockLayout.addWidget(self.WildernessClockMaximumValueLineEdit, 2, 2)
self.WildernessClockLayout.addWidget(self.WildernessClockMaximumValueDecreaseButton, 3, 2)
self.WildernessClockThresholdFrame = QFrame()
self.WildernessClockThresholdLayout = QGridLayout()
self.WildernessClockThresholdLayout.addWidget(self.WildernessClockThresholdLabel, 0, 0, 1, 3)
self.WildernessClockThresholdLayout.addWidget(self.WildernessClockThresholdDecreaseButton, 1, 0)
self.WildernessClockThresholdLayout.addWidget(self.WildernessClockThresholdLineEdit, 1, 1)
self.WildernessClockThresholdLayout.addWidget(self.WildernessClockThresholdIncreaseButton, 1, 2)
self.WildernessClockThresholdFrame.setLayout(self.WildernessClockThresholdLayout)
self.WildernessClockLayout.addWidget(self.WildernessClockThresholdFrame, 4, 0, 1, 3)
self.WildernessClockLayout.setRowStretch(1, 1)
self.WildernessClockLayout.setRowStretch(2, 2)
self.WildernessClockLayout.setRowStretch(3, 1)
self.WildernessClockFrame.setLayout(self.WildernessClockLayout)
self.Layout.addWidget(self.WildernessClockFrame, 0, 1)
# Add Wilderness Log Widgets to Layout
self.WildernessLogFrame = QFrame()
self.WildernessLogFrame.setFrameStyle(QFrame.Panel | QFrame.Plain)
self.WildernessLogLayout = QGridLayout()
self.WildernessLogLayout.addWidget(self.WildernessLogLabel, 0, 0)
self.WildernessLogLayout.addWidget(self.WildernessLogTextEdit, 1, 0)
self.WildernessLogFrame.setLayout(self.WildernessLogLayout)
self.Layout.addWidget(self.WildernessLogFrame, 0, 2)
# Set and Configure Layout
self.Layout.setColumnStretch(2, 1)
self.Frame.setLayout(self.Layout)
# Create Menu Actions
self.NewAction = QAction("New")
self.NewAction.setShortcut("Ctrl+N")
self.NewAction.triggered.connect(self.NewActionTriggered)
self.OpenAction = QAction("Open")
self.OpenAction.setShortcut("Ctrl+O")
self.OpenAction.triggered.connect(self.OpenActionTriggered)
self.SaveAction = QAction("Save")
self.SaveAction.setShortcut("Ctrl+S")
self.SaveAction.triggered.connect(self.SaveActionTriggered)
self.SaveAsAction = QAction("Save As")
self.SaveAsAction.setShortcut("Ctrl+Shift+S")
self.SaveAsAction.triggered.connect(self.SaveAsActionTriggered)
self.QuitAction = QAction("Quit")
self.QuitAction.setShortcut("Ctrl+Q")
self.QuitAction.triggered.connect(self.close)
self.AddToLogAction = QAction("Add to Log")
self.AddToLogAction.triggered.connect(self.AddToLog)
self.RemoveLastLogEntryAction = QAction("Remove Last Log Entry")
self.RemoveLastLogEntryAction.triggered.connect(self.RemoveLastLogEntry)
self.ClearLogAction = QAction("Clear Log")
self.ClearLogAction.triggered.connect(self.ClearLog)
# Menu Bar
self.MenuBar = self.menuBar()
self.FileMenu = self.MenuBar.addMenu("File")
self.FileMenu.addAction(self.NewAction)
self.FileMenu.addAction(self.OpenAction)
self.FileMenu.addSeparator()
self.FileMenu.addAction(self.SaveAction)
self.FileMenu.addAction(self.SaveAsAction)
self.FileMenu.addSeparator()
self.FileMenu.addAction(self.QuitAction)
self.LogMenu = self.MenuBar.addMenu("Log")
self.LogMenu.addAction(self.AddToLogAction)
self.LogMenu.addAction(self.RemoveLastLogEntryAction)
self.LogMenu.addAction(self.ClearLogAction)
# Modify Values Methods
def ModifyWildernessClockCurrentValue(self, Delta):
self.WildernessTravelManager.ModifyWildernessClockCurrentValue(Delta)
self.UpdateUnsavedChangesFlag(True)
def ModifyWildernessClockMaximumValue(self, Delta):
self.WildernessTravelManager.ModifyWildernessClockMaximumValue(Delta)
self.UpdateUnsavedChangesFlag(True)
def ModifyWildernessClockThreshold(self, Delta):
self.WildernessTravelManager.ModifyWildernessClockThreshold(Delta)
self.UpdateUnsavedChangesFlag(True)
# Travel Action Methods
def Move(self):
TravelTime, OK = QInputDialog.getInt(self, "Travel Time", "Travel time of movement:", 1, 1)
if OK:
self.WildernessTravelManager.Move(TravelTime)
self.UpdateUnsavedChangesFlag(True)
def Forage(self):
self.WildernessTravelManager.Forage()
self.UpdateUnsavedChangesFlag(True)
def SpendDays(self):
DaysSpent, DaysSpentOK = QInputDialog.getInt(self, "Spend Days", "Days spent:", 1, 1)
if DaysSpentOK:
Activity, ActivityOK = QInputDialog.getText(self, "Activity", "Spent " + str(DaysSpent) + " days...")
if ActivityOK:
if Activity == "":
Activity = None
self.WildernessTravelManager.SpendDays(DaysSpent, Activity=Activity, Log=True)
self.UpdateUnsavedChangesFlag(True)
# File Menu Action Methods
def NewActionTriggered(self):
if self.New(self.WildernessTravelManager):
self.WildernessTravelManager = WildernessTravelManager()
self.UpdateDisplay()
def OpenActionTriggered(self):
OpenData = self.Open(self.WildernessTravelManager)
if OpenData is not None:
self.WildernessTravelManager = OpenData
self.UpdateDisplay()
def SaveActionTriggered(self):
self.Save(self.WildernessTravelManager)
self.UpdateDisplay()
def SaveAsActionTriggered(self):
self.Save(self.WildernessTravelManager, SaveAs=True)
self.UpdateDisplay()
# Log Menu Action Methods
def AddToLog(self):
LogString, OK = QInputDialog.getText(self, "Add to Log", "Add this to the Wilderness Log:")
if OK:
self.WildernessTravelManager.Log(LogString)
self.UpdateUnsavedChangesFlag(True)
def RemoveLastLogEntry(self):
if self.DisplayMessageBox("Are you sure you want to remove the last log entry? This cannot be undone.", Icon=QMessageBox.Question, Buttons=(QMessageBox.Yes | QMessageBox.No)) == QMessageBox.Yes:
self.WildernessTravelManager.RemoveLastLogEntry()
self.UpdateUnsavedChangesFlag(True)
def ClearLog(self):
if self.DisplayMessageBox("Are you sure you want to clear the log? This cannot be undone.", Icon=QMessageBox.Question, Buttons=(QMessageBox.Yes | QMessageBox.No)) == QMessageBox.Yes:
self.WildernessTravelManager.ClearLog()
self.UpdateUnsavedChangesFlag(True)
# Display Update Methods
def UpdateDisplay(self):
# Wilderness Clock Display
self.WildernessClockCurrentValueLineEdit.setText(str(self.WildernessTravelManager.WildernessClock.Value))
self.WildernessClockMaximumValueLineEdit.setText(str(self.WildernessTravelManager.WildernessClock.MaximumValue))
self.WildernessClockThresholdLineEdit.setText(str(self.WildernessTravelManager.WildernessClock.ComplicationThreshold))
# Wilderness Log Display
WildernessLogString = ""
for LogEntry in reversed(self.WildernessTravelManager.WildernessLog):
WildernessLogString += LogEntry + "\n\n---\n\n"
self.WildernessLogTextEdit.setPlainText(WildernessLogString[:-7])
# Update Window Title
self.UpdateWindowTitle()
def UpdateWindowTitle(self):
CurrentFileTitleSection = " [" + os.path.basename(self.CurrentOpenFileName) + "]" if self.CurrentOpenFileName != "" else ""
UnsavedChangesIndicator = " *" if self.UnsavedChanges else ""
self.setWindowTitle("Wilderness Travel Manager - " + self.ScriptName + CurrentFileTitleSection + UnsavedChangesIndicator)
def UpdateUnsavedChangesFlag(self, UnsavedChanges):
self.UnsavedChanges = UnsavedChanges
self.UpdateDisplay()
| 52.241791
| 203
| 0.746186
| 1,415
| 17,501
| 9.223322
| 0.180919
| 0.021914
| 0.040457
| 0.040457
| 0.173933
| 0.086583
| 0.067045
| 0.031492
| 0.031492
| 0.024826
| 0
| 0.006633
| 0.172962
| 17,501
| 334
| 204
| 52.398204
| 0.895053
| 0.054797
| 0
| 0.0875
| 0
| 0
| 0.041745
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.0375
| 0
| 0.116667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aec38a277d65b28e5bc3af20165517902bdb701
| 1,189
|
py
|
Python
|
day03/part1.py
|
mtn/advent19
|
15d4ae84d248fcf66cb5ebdefee7cad4e6c4a9c2
|
[
"MIT"
] | null | null | null |
day03/part1.py
|
mtn/advent19
|
15d4ae84d248fcf66cb5ebdefee7cad4e6c4a9c2
|
[
"MIT"
] | null | null | null |
day03/part1.py
|
mtn/advent19
|
15d4ae84d248fcf66cb5ebdefee7cad4e6c4a9c2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
def getpts(path):
pts = set()
loc = [0, 0]
for step in path:
direction = step[0]
distance = int(step[1:])
if direction == "R":
for s in range(distance):
pts.add((loc[0] + s + 1, loc[1]))
loc[0] += distance
elif direction == "L":
for s in range(distance):
pts.add((loc[0] - s - 1, loc[1]))
loc[0] -= distance
elif direction == "U":
for s in range(distance):
pts.add((loc[0], loc[1] - s - 1))
loc[1] -= distance
elif direction == "D":
for s in range(distance):
pts.add((loc[0], loc[1] + s + 1))
loc[1] += distance
return pts
with open("input.txt") as f:
directions = f.read()
path1, path2 = map(lambda x: x.split(","), directions.strip().split("\n"))
pts1 = getpts(path1)
pts2 = getpts(path2)
intersections = pts1.intersection(pts2)
min_dist = None
closest = None
for i in intersections:
dist = abs(i[0]) + abs(i[1])
if min_dist is None or dist < min_dist:
closest = i
min_dist = dist
print(min_dist)
| 23.78
| 78
| 0.502103
| 164
| 1,189
| 3.609756
| 0.347561
| 0.047297
| 0.040541
| 0.074324
| 0.361486
| 0.361486
| 0.361486
| 0.361486
| 0.361486
| 0.361486
| 0
| 0.039846
| 0.345669
| 1,189
| 49
| 79
| 24.265306
| 0.72108
| 0.017662
| 0
| 0.108108
| 0
| 0
| 0.01371
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0
| 0
| 0.054054
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aece6336549f29b4f897302ef630eedb4bbc785
| 609
|
py
|
Python
|
tests/plugins/test_openrectv.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 5
|
2017-03-21T19:43:17.000Z
|
2018-10-03T14:04:29.000Z
|
tests/plugins/test_openrectv.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 7
|
2016-10-13T23:29:31.000Z
|
2018-06-28T14:04:32.000Z
|
tests/plugins/test_openrectv.py
|
bumplzz69/streamlink
|
34abc43875d7663ebafa241573dece272e93d88b
|
[
"BSD-2-Clause"
] | 2
|
2016-11-24T18:37:33.000Z
|
2017-03-21T19:43:49.000Z
|
import unittest
from streamlink.plugins.openrectv import OPENRECtv
class TestPluginOPENRECtv(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://www.openrec.tv/live/DXRLAPSGTpx',
'https://www.openrec.tv/movie/JsDw3rAV2Rj',
]
for url in should_match:
self.assertTrue(OPENRECtv.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://www.openrec.tv/',
]
for url in should_not_match:
self.assertFalse(OPENRECtv.can_handle_url(url))
| 29
| 59
| 0.648604
| 71
| 609
| 5.323944
| 0.43662
| 0.095238
| 0.126984
| 0.134921
| 0.343915
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004386
| 0.251232
| 609
| 20
| 60
| 30.45
| 0.824561
| 0
| 0
| 0
| 0
| 0
| 0.167488
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3aedf299041cb35f549d54f7d93a4515346863a2
| 6,474
|
py
|
Python
|
paginas/insights.py
|
Campos1989/AssureNextDataApp
|
65023e3e34a8bd8f80d53fce46778d2f4cf9b640
|
[
"MIT"
] | 1
|
2021-06-25T08:53:31.000Z
|
2021-06-25T08:53:31.000Z
|
paginas/insights.py
|
Campos1989/AssureNextDataApp
|
65023e3e34a8bd8f80d53fce46778d2f4cf9b640
|
[
"MIT"
] | null | null | null |
paginas/insights.py
|
Campos1989/AssureNextDataApp
|
65023e3e34a8bd8f80d53fce46778d2f4cf9b640
|
[
"MIT"
] | null | null | null |
# Script de criação do dashboard
# https://dash.plotly.com/dash-html-components
# Imports
import traceback
import pandas as pd
import plotly.express as px
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
# Módulos customizados
from app import app
from modulos import data_operations, constant
# Gera o layout
def get_layout():
try:
# Gera o container
layout = dbc.Container([
dbc.Row([
dbc.Col([
dbc.Card([dbc.CardHeader("Ano"),
dbc.CardBody([html.H5(data_operations.Ano2016, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Seguros Previstos"),
dbc.CardBody([html.H5(data_operations.TotalNewPolicies2016, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Maquinas Previstas"),
dbc.CardBody([html.H5(data_operations.MachinesInstalled2016, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Lucro Medio"),
dbc.CardBody([html.H5(data_operations.LucroMedio2016, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3)],
className= "pb-3"),
dbc.Row([
dbc.Col([
dbc.Card([dbc.CardHeader("Ano"),
dbc.CardBody([html.H5(data_operations.Ano2017, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Seguros Previstos"),
dbc.CardBody([html.H5(data_operations.TotalNewPolicies2017, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Maquinas Previstas"),
dbc.CardBody([html.H5(data_operations.MachinesInstalled2017, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3),
dbc.Col([
dbc.Card([dbc.CardHeader("Lucro Medio"),
dbc.CardBody([html.H5(data_operations.LucroMedio2017, className = "card-text")]),], className = "shadow p-3 bg-light rounded")], width = 3)],
className= "pb-3"),
dbc.Row([
dbc.Card([dbc.CardBody([html.H6("Na página visão geral temos o total de seguros vendidos, maquinas instaladas e lucro médio ao longo dos anos 2009 a 2015 nos cardes. No gráfico das contratações de seguros, percebe-se uma tendência, crescente de novas aquisições até o ano de 2013, depois uma leve queda entre os anos 2015 e 2016, porem algo interessante a se notar é que em todos os anos os picos de contratações ocorrem em março, seria interessante a empresa investigar o porquê. Em relação a instalação de máquinas seguem também um padrão quase constante, onde podemos notar picos de instalações maiores nos meses de dezembro. Embora nos últimos anos (2014,2015) a empresa tenho tido menos contratações, assim como instalações de máquinas, o seu lucro médio anual não caiu, aumenta a cada ano, isso mostra uma eficiência da empresa em manter clientes antigos.", className = "card-text")]),],className = "shadow p-3 bg-light rounded"),],
className= "pb-3"),
dbc.Row([
dbc.Card([dbc.CardBody([html.H6("Na página previsões, temos o primeiro gráfico mostrando as previsões (tendências) para aquisição de novas apólices, podemos ver as previsões do modelo para todos os anos, e os pontos pretos sendo os dados atuais, pode-se notar que o modelo fez um bom trabalho, levando em consideração que as previsões estão dentro da margem de erro que é a parte sombreada, já o segundo gráfico mostra apenas os valores para os anos a serem previstos. O mesmo ocorre nos gráficos 3 e 4, esses já com relação a instalações de novas maquinas. Com essas previsões os gestores podem se preparar para os próximos dois anos se baseando no que o modelo previu como tendência. ", className = "card-text")]),],className = "shadow p-3 bg-light rounded"),],
className= "pb-3"),
dbc.Row([
dbc.Card([dbc.CardBody([html.H6("Nessa página de insights, é mostrado resumidamente o total, de novas contratações e novas instalações de maquinas assim como o lucro médio dos anos previstos, todas as essas previsões com visto na página previsões seguem um padrão, identificado pelo modelo com relação aos anos anteriores, embora a previsão para novas contratações para 2017 não esteja tão alto, o lucro médio não caiu tanto, o modelo levou em consideração a tendência que vem ocorrendo em que a empresa tem uma boa qualidade de serviço fazendo com que os clientes antigos permaneçam com os serviços a cada ano. Todas as informações acima e os gráficos são valiosas, pois os gestores conseguem agora identificar padrões e possivelmente algumas falhas, e com isso entender o que pode vir a ocorrer, se manter o trabalho que vem feito, e até buscar melhorias para que atinja valores acima do previsto.", className = "card-text")]),],className = "shadow p-3 bg-light rounded"),],
className= "pb-3")
],
fluid = True)
return layout
except:
layout = dbc.Jumbotron(
[
html.Div([
html.H1("500: Internal Server Error", className = "text-danger"),
html.Hr(),
html.P(f"Following Exception Occured: "),
html.Code(traceback.format_exc())
],
style = constant.NAVITEM_STYLE)
]
)
return layout
| 78.95122
| 998
| 0.605653
| 796
| 6,474
| 4.903266
| 0.344221
| 0.019728
| 0.028183
| 0.073277
| 0.33359
| 0.33359
| 0.33359
| 0.33359
| 0.33359
| 0.33359
| 0
| 0.023354
| 0.30553
| 6,474
| 81
| 999
| 79.925926
| 0.844751
| 0.020853
| 0
| 0.446154
| 0
| 0.046154
| 0.462255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015385
| false
| 0
| 0.138462
| 0
| 0.184615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3af4df280e903825cd489383b9d45d6281eb6687
| 491
|
py
|
Python
|
test/log_check.py
|
talareq/selenium
|
302804aa34149ea38b42fe7b55d806211e9e4435
|
[
"Apache-2.0"
] | null | null | null |
test/log_check.py
|
talareq/selenium
|
302804aa34149ea38b42fe7b55d806211e9e4435
|
[
"Apache-2.0"
] | null | null | null |
test/log_check.py
|
talareq/selenium
|
302804aa34149ea38b42fe7b55d806211e9e4435
|
[
"Apache-2.0"
] | null | null | null |
def test_example(app):
app.login_admin()
app.get("http://localhost/litecart/admin/?app=catalog&doc=catalog&category_id=1")
menu=app.driver.find_elements_by_css_selector("tr .row")
for n in range(0,len(menu)):
element = app.driver.find_elements_by_css_selector("tr .row")
element[n].click()
for l in app.driver.get_log("browser"):
print(l)
app.driver.get("http://localhost/litecart/admin/?app=catalog&doc=catalog&category_id=1")
| 35.071429
| 96
| 0.672098
| 74
| 491
| 4.283784
| 0.486486
| 0.113565
| 0.100946
| 0.15142
| 0.624606
| 0.624606
| 0.624606
| 0.624606
| 0.624606
| 0.378549
| 0
| 0.007371
| 0.171079
| 491
| 14
| 96
| 35.071429
| 0.771499
| 0
| 0
| 0
| 0
| 0
| 0.328571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.1
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3af692d7975c4dc7bec14dbb6e213b560c3130d8
| 7,526
|
py
|
Python
|
harvest/detailedreports.py
|
rzuris/python-harvest_apiv2
|
1a4915c2772aa9d27b74a545b14138d418566832
|
[
"MIT"
] | null | null | null |
harvest/detailedreports.py
|
rzuris/python-harvest_apiv2
|
1a4915c2772aa9d27b74a545b14138d418566832
|
[
"MIT"
] | null | null | null |
harvest/detailedreports.py
|
rzuris/python-harvest_apiv2
|
1a4915c2772aa9d27b74a545b14138d418566832
|
[
"MIT"
] | 1
|
2022-03-28T10:47:37.000Z
|
2022-03-28T10:47:37.000Z
|
# Copyright 2020 Bradbase
import itertools
from datetime import datetime, timedelta, date
from calendar import monthrange
from harvest import Harvest
from .harvestdataclasses import *
class DetailedReports(Harvest):
def __init__(self, uri, auth):
super().__init__(uri, auth)
self.client_cache = {}
self.project_cache = {}
self.task_cache = {}
self.user_cache = {}
def timeframe(self, timeframe, from_date=None, to_date=None):
quarters = [None,
[1, 3], [1, 3], [1, 3],
[4, 6], [4, 6], [4, 6],
[7, 9], [7, 9], [7, 9],
[10, 12], [10, 12], [10, 12]]
today = datetime.now().date()
timeframe_upper = timeframe.upper()
if timeframe_upper == 'THIS WEEK':
start_date = today - timedelta(days=today.weekday())
end_date = start_date + timedelta(days=6)
elif timeframe_upper == 'LAST WEEK':
today = today - timedelta(days=7)
start_date = today - timedelta(days=today.weekday())
end_date = start_date + timedelta(days=6)
elif timeframe_upper == 'THIS SEMIMONTH':
if today.day <= 15:
start_date = today.replace(day=1)
end_date = today.replace(day=15)
else:
start_date = today.replace(day=16)
end_date = today.replace(
day=monthrange(today.year, today.month)[1])
elif timeframe_upper == 'LAST SEMIMONTH':
if today.day <= 15:
if today.month == 1:
start_date = today.replace(
year=today.year-1, month=12, day=16)
end_date = today.replace(
year=today.year-1,
month=12,
day=monthrange(today.year-1, 12)[1])
else:
start_date = today.replace(month=today.month-1, day=16)
end_date = today.replace(
month=today.month-1,
day=monthrange(today.year, today.month-1)[1])
else:
start_date = today.replace(day=1)
end_date = today.replace(day=15)
elif timeframe_upper == 'THIS MONTH':
start_date = today.replace(day=1)
end_date = today.replace(
day=monthrange(today.year, today.month)[1])
elif timeframe_upper == 'LAST MONTH':
if today.month == 1:
start_date = today.replace(year=today.year-1, month=12, day=1)
end_date = today.replace(
year=today.year-1,
month=12,
day=monthrange(today.year-1, 12)[1])
else:
start_date = today.replace(month=today.month-1, day=1)
end_date = today.replace(
month=today.month-1,
day=monthrange(today.year, today.month-1)[1])
elif timeframe_upper == 'THIS QUARTER':
quarter = quarters[today.month]
start_date = date(today.year, quarter[0], 1)
end_date = date(
today.year,
quarter[1],
monthrange(today.year, quarter[1])[1])
elif timeframe_upper == 'LAST QUARTER':
if today.month <= 3:
quarter = [10, 12]
today = today.replace(year=today.year-1)
else:
quarter = quarters[today.month-3]
start_date = date(today.year, quarter[0], 1)
end_date = date(
today.year,
quarter[1],
monthrange(today.year, quarter[1])[1])
elif timeframe_upper == 'THIS YEAR':
start_date = date(today.year, 1, 1)
end_date = date(today.year, 12, 31)
elif timeframe_upper == 'LAST YEAR':
start_date = date(today.year-1, 1, 1)
end_date = date(today.year-1, 12, 31)
elif timeframe_upper == 'ALL TIME':
return {}
# Not currently supported
elif timeframe_upper == 'CUSTOM':
raise ValueError("Custom timeframe not currently supported.")
else:
raise ValueError(
"unknown argument \'timeframe\': \'%s\'" % timeframe_upper)
return {'from_date': start_date, 'to_date': end_date}
# team is user
def detailed_time(self, time_frame='All Time', clients=[None], projects=[None], tasks=[None], team=[None], include_archived_items=False, group_by='Date', activeProject_only=False):
arg_configs = []
time_entry_results = DetailedTimeReport([])
for element in itertools.product(clients, projects, team):
kwargs = {}
if element[0] !=None:
kwargs['client_id'] = element[0]
if element[1] !=None:
kwargs['project_id'] = element[1]
if element[2] !=None:
kwargs['user_id'] = element[2]
kwargs = dict(self.timeframe(time_frame), **kwargs)
arg_configs.append(kwargs)
tmp_time_entry_results = []
if arg_configs == []:
time_entries = self.time_entries()
tmp_time_entry_results.extend(time_entries.time_entries)
if time_entries.total_pages > 1:
for page in range(2, time_entries.total_pages + 1):
time_entries = self.time_entries(page=page)
tmp_time_entry_results.extend(time_entries.time_entries)
else:
for config in arg_configs:
time_entries = self.time_entries(**kwargs)
tmp_time_entry_results.extend(time_entries.time_entries)
if time_entries.total_pages > 1:
for page in range(2, time_entries.total_pages + 1):
time_entries = self.time_entries(page=page, **kwargs)
tmp_time_entry_results.extend(time_entries.time_entries)
for time_entry in tmp_time_entry_results:
user = None
if time_entry.user.id not in self.user_cache.keys():
user = self.get_user(time_entry.user.id)
self.user_cache[time_entry.user.id] = user
else:
user = self.user_cache[time_entry.user.id]
hours = time_entry.hours
billable_amount = 0.0
cost_amount = 0.0
billable_rate = time_entry.billable_rate
cost_rate = time_entry.cost_rate
if hours is not None:
if billable_rate is not None:
billable_amount = billable_rate * hours
if cost_rate is not None:
cost_amount = cost_rate * hours
time_entry_results.detailed_time_entries.append( DetailedTimeEntry(date=time_entry.spent_date, client=time_entry.client.name, project=time_entry.project.name, project_code=time_entry.project.code, task=time_entry.task.name, notes=time_entry.notes, hours=hours, billable=str(time_entry.billable), invoiced='', approved='', first_name=user.first_name, last_name=user.last_name, roles=user.roles, employee='Yes', billable_rate=billable_rate, billable_amount=billable_amount, cost_rate=cost_rate, cost_amount=cost_amount, currency=time_entry.client.currency, external_reference_url=time_entry.external_reference) )
return time_entry_results
| 40.245989
| 622
| 0.558464
| 875
| 7,526
| 4.602286
| 0.152
| 0.058108
| 0.063571
| 0.041718
| 0.484728
| 0.455923
| 0.433077
| 0.39111
| 0.377204
| 0.365533
| 0
| 0.027032
| 0.336434
| 7,526
| 186
| 623
| 40.462366
| 0.779335
| 0.007972
| 0
| 0.385135
| 0
| 0
| 0.03458
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02027
| false
| 0
| 0.033784
| 0
| 0.081081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3af6f1205b131d37a985d1c51f9e6d5d18cb4383
| 328
|
py
|
Python
|
bibliopixel/commands/kill.py
|
rec/leds
|
ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a
|
[
"MIT"
] | 253
|
2015-01-03T23:17:57.000Z
|
2021-12-14T02:31:08.000Z
|
bibliopixel/commands/kill.py
|
rec/leds
|
ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a
|
[
"MIT"
] | 879
|
2015-01-11T16:07:25.000Z
|
2021-12-10T16:24:31.000Z
|
bibliopixel/commands/kill.py
|
rec/leds
|
ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a
|
[
"MIT"
] | 71
|
2015-01-04T01:02:47.000Z
|
2022-03-25T18:30:10.000Z
|
"""
Send a kill signal to a BiblioPixel process running on this
machine to abruptly kill it
DEPRECATED: use
.. code-block:: bash
$ kill -kill `bpa-pid`
"""
DESCRIPTION = """
Example:
.. code-block:: bash
$ bp kill
"""
from .. util.signal_handler import make_command
add_arguments, run = make_command('SIGKILL')
| 13.666667
| 59
| 0.682927
| 45
| 328
| 4.888889
| 0.733333
| 0.081818
| 0.118182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195122
| 328
| 23
| 60
| 14.26087
| 0.833333
| 0.469512
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3af703dc54a66f683dbd47d8d1a850161cd49620
| 5,645
|
py
|
Python
|
DataProcessing.py
|
manohar9600/The-Movies-Recommendation
|
138587220ff6bef4c856ea905af5b7e9574e5964
|
[
"MIT"
] | null | null | null |
DataProcessing.py
|
manohar9600/The-Movies-Recommendation
|
138587220ff6bef4c856ea905af5b7e9574e5964
|
[
"MIT"
] | null | null | null |
DataProcessing.py
|
manohar9600/The-Movies-Recommendation
|
138587220ff6bef4c856ea905af5b7e9574e5964
|
[
"MIT"
] | null | null | null |
# Standard imports
import json
import ast
# Third party imports
import pandas as pd
from tabulate import tabulate
# Local application imports
from utils.logger import logger
class Dataloding:
"""Loads movie lens and TMDB data from data folder.
"""
def __init__(self, data_folder='data'):
self.data_folder = data_folder.strip('/')
self.load_data()
def load_data(self) -> None:
ratings_file_path = self.data_folder + '/ratings.csv'
self.ratings_df = pd.read_csv(ratings_file_path)
logger.info("ratings: ")
logger.info(tabulate(self.ratings_df.head(), headers='keys',
tablefmt='pretty'))
logger.info("successfully loaded ratings. entries: %s" % \
self.ratings_df.shape[0])
movies_data_path = self.data_folder + '/movies_metadata.csv'
self.movies_df = pd.read_csv(movies_data_path)
self.movies_df = self.transform_movies_df(self.movies_df)
logger.info("successfully loaded movies metadata. entries: %s" % \
self.movies_df.shape[0])
keywords_data_path = self.data_folder + '/keywords.csv'
self.keywords_df = pd.read_csv(keywords_data_path)
self.keywords_df = self.transform_keywords_df(self.keywords_df)
logger.info("successfully loaded movie keywords data. entries: %s" \
% self.keywords_df.shape[0])
links_data_path = self.data_folder + '/links.csv'
self.links_df = pd.read_csv(links_data_path)
logger.info("movie links: ")
logger.info(tabulate(self.links_df.head(), headers='keys',
tablefmt='pretty'))
logger.info("successfully loaded movie links data. entries: %s" \
% self.links_df.shape[0])
credits_data_path = self.data_folder + '/credits.csv'
self.credits_df = pd.read_csv(credits_data_path)
self.credits_df = self.transform_credits_df(self.credits_df)
logger.info("successfully loaded credits data. entries: %s" \
% self.credits_df.shape[0])
logger.info("successfully loaded all data")
def transform_movies_df(self, movies_df) -> pd.DataFrame:
"""Converts non strings like jsons or other data types to string or list.
and also minimizes data size.
Args:
movies (DataFrame): movies data in df format
Returns:
DataFrame: dataframe with better data structures.
"""
self.id_collection = {}
self.id_genre = {}
for index, row in movies_df.iterrows():
if not pd.isna(row['belongs_to_collection']) and \
row['belongs_to_collection'].strip():
collection_str = row['belongs_to_collection']
collection_json = ast.literal_eval(collection_str)
movies_df.loc[index, 'belongs_to_collection'] = \
collection_json['id']
self.id_collection[collection_json['id']] = \
collection_json['name']
else:
movies_df.loc[index, 'belongs_to_collection'] = -1
if not pd.isna(row['genres']) and \
row['genres'].strip():
genres_str = row['genres']
genres_list = ast.literal_eval(genres_str)
movies_df.at[index, 'genres'] = [g['id'] for g in genres_list]
for genre in genres_list:
self.id_genre[genre['id']] = genre['name']
else:
movies_df.loc[index, 'genres'] = []
return movies_df
def transform_keywords_df(self, keywords_df) -> pd.DataFrame:
"""Converts keywords data in json format to list format.
storing only ids in keywords_df and separate dictionary for mappings
Args:
keywords_df (pd.DataFrame): raw keywords data
Returns:
pd.DataFrame: transformed dataframe
"""
self.id_keyword = {}
for index, row in keywords_df.iterrows():
keywords_json = row['keywords']
keyword_ids = []
if keywords_json.strip():
keywords_json = ast.literal_eval(keywords_json)
for key in keywords_json:
keyword_ids.append(key['id'])
self.id_keyword[key['id']] = key['name']
keywords_df.at[index, 'keywords'] = keyword_ids
return keywords_df
def transform_credits_df(self, credits_df) -> pd.DataFrame:
"""Converts json format in df to list format. Stores only ids in df
and ids mapping will be self.id_credit(dict)
Args:
credits_df (pd.DataFrame): raw credits data
Returns:
pd.DataFrame: transformed data
"""
self.id_credit = {}
for index, row in credits_df.iterrows():
cast_json = row['cast']
if cast_json.strip():
cast_json = ast.literal_eval(cast_json)
cast_ids = []
for cast in cast_json:
self.id_credit[cast['id']] = cast['name']
cast_ids.append(cast['id'])
credits_df.at[index, 'cast'] = cast_ids
crew_json = row['crew']
if crew_json.strip():
crew_json = ast.literal_eval(crew_json)
credits_df.at[index, 'crew'] = crew_json
return credits_df
class DataProcessing:
def __init__(self) -> None:
pass
if __name__ == '__main__':
data = Dataloding()
| 37.384106
| 81
| 0.583702
| 665
| 5,645
| 4.718797
| 0.183459
| 0.033142
| 0.03123
| 0.053537
| 0.232314
| 0.129382
| 0.059911
| 0.037604
| 0.037604
| 0.037604
| 0
| 0.001551
| 0.314615
| 5,645
| 150
| 82
| 37.633333
| 0.809512
| 0.137998
| 0
| 0.041237
| 0
| 0
| 0.124148
| 0.022359
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061856
| false
| 0.010309
| 0.051546
| 0
| 0.164948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aae6433bbacb013e1d4734b577daca4627358efe
| 421
|
py
|
Python
|
easy/1710-maximum-units-on-a-truck.py
|
changmeng72/leecode_python3
|
8384f52f0dd74b06b1b6aefa277dde6a228ff5f3
|
[
"MIT"
] | null | null | null |
easy/1710-maximum-units-on-a-truck.py
|
changmeng72/leecode_python3
|
8384f52f0dd74b06b1b6aefa277dde6a228ff5f3
|
[
"MIT"
] | null | null | null |
easy/1710-maximum-units-on-a-truck.py
|
changmeng72/leecode_python3
|
8384f52f0dd74b06b1b6aefa277dde6a228ff5f3
|
[
"MIT"
] | null | null | null |
class Solution:
def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:
boxTypes.sort(key=lambda x: x[1],reverse = True)
r = 0
remaining = truckSize
for boxType in boxTypes:
b = min(remaining,boxType[0])
r += b * boxType[1]
remaining -= b
if remaining==0:
break
return r
| 32.384615
| 78
| 0.489311
| 46
| 421
| 4.478261
| 0.586957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020161
| 0.410926
| 421
| 13
| 79
| 32.384615
| 0.810484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aae69a1c9858fa2062e072c3ac6fac72ce0dc685
| 334
|
py
|
Python
|
OpenCV/assign1_2.py
|
Aanal2901/Autumn-of-Automation
|
c6ea432d3608652254b841c392dde6aa466b2df4
|
[
"MIT"
] | null | null | null |
OpenCV/assign1_2.py
|
Aanal2901/Autumn-of-Automation
|
c6ea432d3608652254b841c392dde6aa466b2df4
|
[
"MIT"
] | null | null | null |
OpenCV/assign1_2.py
|
Aanal2901/Autumn-of-Automation
|
c6ea432d3608652254b841c392dde6aa466b2df4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 00:44:49 2020
@author: Aanal Sonara
"""
import cv2
cap = cv2.VideoCapture(0)
while cap.isOpened():
_, frame = cap.read()
cv2.imshow("live video", frame)
k = cv2.waitKey(1) and 0xFF
if k==27:
break
cap.release()
cv2.destroyAllWindows()
| 17.578947
| 36
| 0.577844
| 46
| 334
| 4.173913
| 0.804348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094262
| 0.269461
| 334
| 19
| 37
| 17.578947
| 0.692623
| 0.242515
| 0
| 0
| 0
| 0
| 0.04386
| 0
| 0
| 0
| 0.017544
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aae6ab08212b4b7afe1925bc3ddbf0db7587516e
| 5,515
|
py
|
Python
|
site_parser/site_parser.py
|
TheStalkerDen/Comp-Architecture-Lab1
|
ad92aed0c639cb223adc033aba5f79cc6a8f5344
|
[
"MIT"
] | null | null | null |
site_parser/site_parser.py
|
TheStalkerDen/Comp-Architecture-Lab1
|
ad92aed0c639cb223adc033aba5f79cc6a8f5344
|
[
"MIT"
] | null | null | null |
site_parser/site_parser.py
|
TheStalkerDen/Comp-Architecture-Lab1
|
ad92aed0c639cb223adc033aba5f79cc6a8f5344
|
[
"MIT"
] | null | null | null |
import configparser
import os
import tempfile
import urllib.request
import xml.dom.minidom
import xml.etree.ElementTree as ET
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from tinytag import TinyTag
import gevent
dir_path = os.path.dirname(os.path.realpath(__file__))
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(dir_path, '../setting.cfg'))
USE_GEVENT = CONFIG['common'].getboolean('use_gevent')
def get_site_list_from_file(file_name):
root = ET.parse(file_name).getroot()
site_list = []
for child in root:
if child.tag == "site":
site_list.append(child.text)
return site_list
def get_mp3_genre_and_title(mp3_filename):
audio_tag = TinyTag.get(mp3_filename)
if audio_tag.genre is None:
audio_tag.genre = "Undefined"
if audio_tag.title is None:
audio_tag.title = "No-title"
return audio_tag.genre, audio_tag.title
def collect_all_links_from_html(html_page):
soup = BeautifulSoup(html_page, 'html.parser')
return [x.get('href') for x in soup.find_all('a')]
def get_all_links_from_url(url):
try:
main_page_req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
html_page = urllib.request.urlopen(main_page_req)
return collect_all_links_from_html(html_page)
except urllib.error.HTTPError:
return []
def convert_link_to_absolute(base_url, link):
url = urllib.parse.urljoin(base_url, link)
parsed_url = urllib.request.urlparse(url)
if parsed_url.scheme != "file":
return parsed_url.scheme + "://" + parsed_url.netloc + urllib.parse.quote(parsed_url.path)
else:
return url
def convert_links_to_absolute(base_url, links):
return [convert_link_to_absolute(base_url, link) for link in links]
def get_mp3_links(links, digest_level, *, use_gevent):
visited_links = set()
mp3_links = []
def _get_mp3_links(url, level):
visited_links.add(url)
_links = convert_links_to_absolute(url, get_all_links_from_url(url))
links_to_visit = []
for link in _links:
if link.endswith(".mp3"):
mp3_links.append(link)
elif level > 1:
req = urllib.request.Request(url, method="HEAD", headers={'User-Agent': 'Mozilla/5.0'})
response = urllib.request.urlopen(req)
if link.endswith("html") or response.getheader("Content-Type").startswith("text/html"):
links_to_visit.append(link)
if level > 1:
for link in links_to_visit:
if link not in visited_links:
_get_mp3_links(link, level - 1)
if use_gevent:
jobs = [gevent.spawn(_get_mp3_links, url, digest_level) for url in links]
gevent.joinall(jobs)
else:
for url in links:
_get_mp3_links(url, digest_level)
return mp3_links
def analyze_mp3_from_links(mp3_links, *, use_gevent):
analyzed_mp3_sorted_by_genre = {}
tmp_dir = tempfile.TemporaryDirectory(suffix='mp3')
def _analyze_mp3(mp3_link):
file_name = os.path.basename(urllib.parse.urlparse(mp3_link).path)
try:
print(f"Load {file_name}")
req = urllib.request.Request(mp3_link, headers={'User-Agent': 'Mozilla/5.0', "Range": "bytes:0-4000"})
with urllib.request.urlopen(req) as response, \
tempfile.NamedTemporaryFile(mode="w+b", delete=False, dir=tmp_dir.name) as out_file:
data = response.read()
out_file.write(data)
tmp_filename = out_file.name
genre, title = get_mp3_genre_and_title(tmp_filename)
if genre not in analyzed_mp3_sorted_by_genre:
analyzed_mp3_sorted_by_genre[genre] = []
analyzed_mp3_sorted_by_genre[genre].append({"filename": file_name, "title": title, "link": mp3_link})
except URLError:
pass
if use_gevent:
jobs = [gevent.spawn(_analyze_mp3, mp3_link) for mp3_link in mp3_links]
gevent.joinall(jobs)
else:
for mp3_link in mp3_links:
_analyze_mp3(mp3_link)
tmp_dir.cleanup()
return analyzed_mp3_sorted_by_genre
def generate_xml_res_string(sorted_by_genre_mp3):
root = ET.Element('Playlist')
for key, value in sorted_by_genre_mp3.items():
genre_node = ET.SubElement(root, 'Genre', {'name': key})
for mp3_info in value:
mp3_info_node = ET.SubElement(genre_node, 'music')
ET.SubElement(mp3_info_node, 'filename').text = mp3_info['filename']
ET.SubElement(mp3_info_node, 'title').text = mp3_info['title']
ET.SubElement(mp3_info_node, 'link').text = mp3_info['link']
mydata = ET.tostring(root, encoding="unicode")
preparsed = xml.dom.minidom.parseString(mydata)
return preparsed.toprettyxml().encode("utf-8")
def generate_xml_result_in_result_file(sorted_by_genre_mp3, result_file):
final_res = generate_xml_res_string(sorted_by_genre_mp3)
result_file.write(final_res)
def scrape_mp3_from_sites(input_filename, digest_level):
site_list = get_site_list_from_file(input_filename)
mp3_links = get_mp3_links(site_list, digest_level, use_gevent=USE_GEVENT)
analyzed_res = analyze_mp3_from_links(mp3_links, use_gevent=USE_GEVENT)
with open("../result.xml", "wb") as res_file:
generate_xml_result_in_result_file(analyzed_res, res_file)
| 36.282895
| 114
| 0.677244
| 766
| 5,515
| 4.562663
| 0.223238
| 0.032046
| 0.033476
| 0.027182
| 0.296996
| 0.210587
| 0.095279
| 0.041202
| 0
| 0
| 0
| 0.015527
| 0.217588
| 5,515
| 151
| 115
| 36.523179
| 0.794438
| 0
| 0
| 0.07377
| 0
| 0
| 0.056029
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106557
| false
| 0.008197
| 0.090164
| 0.008197
| 0.286885
| 0.008197
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aae7a8e4bea1bfaeb77207d29d33884bce510446
| 4,341
|
py
|
Python
|
project/encoder_toy.py
|
tkosht/wikiencoder
|
c1744e60e902949e1926c9efe0c24eb3ac5f00fd
|
[
"MIT"
] | null | null | null |
project/encoder_toy.py
|
tkosht/wikiencoder
|
c1744e60e902949e1926c9efe0c24eb3ac5f00fd
|
[
"MIT"
] | null | null | null |
project/encoder_toy.py
|
tkosht/wikiencoder
|
c1744e60e902949e1926c9efe0c24eb3ac5f00fd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import numpy
import torch
import torchnet
from tqdm import tqdm
from torchnet.engine import Engine
from torchnet.logger import VisdomPlotLogger, VisdomLogger
import project.deco as deco
from project.sequoder import SequenceEncoder, get_loss
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true", default=False,
help="if you specified, execute as debug mode. default: 'False'")
parser.add_argument("--trace", action="store_true", default=False,
help="if you specified, execute as trace mode. default: 'False'")
# parser.add_argument("-i", "--indir", type=str, default="data/parsed",
# help="you can specify the string of the input directory"
# " must includes subdir 'doc/', and 'title/'. default: 'data/parsed'")
parser.add_argument("--epochs", type=int, default="500")
parser.add_argument("--lr", type=float, default="0.001")
parser.add_argument("--weight-decay", type=float, default="0")
args = parser.parse_args()
return args
def get_toydata(n_data, device):
toydata = []
for _n in range(n_data):
t = numpy.random.randint(5) + 2
seq = [torch.randn(1, 3) for _t in range(t)] # make a sequence of length 5
seq = torch.stack(seq)
seq = seq.to(device)
toydata.append(seq)
return toydata
def reverse_tensor(tensor, device=torch.device("cpu")):
indices = [i for i in range(tensor.size(0)-1, -1, -1)]
indices = torch.LongTensor(indices).to(device)
rev_tensor = tensor.index_select(0, indices)
return rev_tensor
@deco.trace
@deco.excep(return_code=True)
def main():
args = get_args()
device = torch.device("cuda:1")
# device = torch.device("cpu")
model = SequenceEncoder(3, 2, device)
n_data = 10
data = get_toydata(n_data, device)
teacher = [reverse_tensor(seq, device) for seq in data]
training_data = (data, teacher)
optim_params = {
"params": model.parameters(),
"weight_decay": args.weight_decay,
"lr": args.lr,
}
optimizer = torch.optim.Adam(**optim_params)
meter_loss = torchnet.meter.AverageValueMeter()
port = 8097
train_loss_logger = VisdomPlotLogger(
'line', port=port, opts={'title': 'encoder_toy - train loss'})
def network(sample):
x = sample[0] # sequence
t = sample[1] # target sequence
y, mu, logvar = model(x)
loss = get_loss(y, t, mu, logvar)
o = y, mu, logvar
return loss, o
def reset_meters():
meter_loss.reset()
def on_sample(state):
state['sample'] = list(state['sample'])
state['sample'].append(state['train'])
model.zero_grad()
model.init_hidden()
def on_forward(state):
loss_value = state['loss'].data
meter_loss.add(state['loss'].data)
def on_start_epoch(state):
reset_meters()
if 'dataset' not in state:
dataset = state['iterator']
state['dataset'] = dataset
dataset = state['dataset']
state['iterator'] = tqdm(zip(*dataset))
def on_end_epoch(state):
loss_value = meter_loss.value()[0]
epoch = state['epoch']
print(f'loss[{epoch}]: {loss_value:.4f}')
train_loss_logger.log(epoch, loss_value)
dataset = state['dataset']
state['iterator'] = tqdm(zip(*dataset))
engine = Engine()
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.train(network, training_data, maxepoch=args.epochs, optimizer=optimizer)
# loss_records = model.do_train(training_data, args.epochs, optimizer)
# def save_fig(x, img_file):
# pyplot.plot(range(len(x)), x)
# pathlib.Path(img_file).parent.mkdir(parents=True, exist_ok=True)
# pyplot.savefig(img_file)
# save_fig(loss_records, "results/loss_toydata.png")
if __name__ == '__main__':
r = main()
if r != 0:
logfile = deco.logger.logger.handlers[0].baseFilename
print(f"Abort with error. see logfile '{logfile}'")
exit(r)
| 31.919118
| 95
| 0.628657
| 562
| 4,341
| 4.701068
| 0.33452
| 0.020439
| 0.038607
| 0.028388
| 0.116578
| 0.100681
| 0.0757
| 0.0757
| 0.040878
| 0.040878
| 0
| 0.010235
| 0.234739
| 4,341
| 135
| 96
| 32.155556
| 0.785069
| 0.147431
| 0
| 0.041237
| 0
| 0
| 0.124254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103093
| false
| 0
| 0.092784
| 0
| 0.237113
| 0.020619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aae827e1c08cf7a4934daf6680f0a298b8d6f043
| 18,420
|
py
|
Python
|
families/supplychain_python/sawtooth_supplychain/processor/handler.py
|
trust-tech/sawtooth-core
|
fcd66ff2f13dba51d7642049e0c0306dbee3b07d
|
[
"Apache-2.0"
] | null | null | null |
families/supplychain_python/sawtooth_supplychain/processor/handler.py
|
trust-tech/sawtooth-core
|
fcd66ff2f13dba51d7642049e0c0306dbee3b07d
|
[
"Apache-2.0"
] | null | null | null |
families/supplychain_python/sawtooth_supplychain/processor/handler.py
|
trust-tech/sawtooth-core
|
fcd66ff2f13dba51d7642049e0c0306dbee3b07d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import json
from sawtooth_sdk.processor.state import StateEntry
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
import sawtooth_supplychain.addressing as addressing
LOGGER = logging.getLogger(__name__)
SUPPLYCHAIN_VERSION = '0.5'
SUPPLYCHAIN_NAMESPACE = 'Supplychain'
def state_get_single(state, uid):
entries_list = state.get([uid])
if entries_list:
return json.loads(entries_list[0].data.decode())
return None
def state_put_single(state, uid, data):
addresses = state.set(
[StateEntry(address=uid,
data=json.dumps(data, sort_keys=True).encode())])
if not addresses or uid not in addresses:
raise InternalError("Error setting state, addresses returned: %s.",
addresses)
class SupplychainHandler(object):
def __init__(self):
pass
@property
def family_name(self):
return 'sawtooth_supplychain'
@property
def family_versions(self):
return ['1.0']
@property
def encodings(self):
return ['application/json']
@property
def namespaces(self):
return [addressing.get_namespace()]
def apply(self, transaction, state):
payload = json.loads(transaction.payload.decode())
LOGGER.debug("SupplychainHandler.apply: %s", repr(payload))
if payload['MessageType'] == 'Record':
RecordHandler.apply(transaction, state)
elif payload['MessageType'] == 'Agent':
AgentHandler.apply(transaction, state)
class RecordHandler(object):
@classmethod
def apply(cls, transaction, state):
payload = json.loads(transaction.payload.decode())
LOGGER.debug("apply payload: %s", repr(payload))
tnx_action = payload.get('Action', None)
txnrecord_id = payload.get('RecordId', None)
header = TransactionHeader()
header.ParseFromString(transaction.header)
tnx_originator = addressing.get_agent_id(header.signer_pubkey)
# Retrieve the stored record data if an ID is provided.
record_id = txnrecord_id
record_store_key = record_id
record_store = state_get_single(state, record_store_key)
# Check Action
if tnx_action == 'Create':
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for CreateRecord')
record_store = {}
cls.create_record(tnx_originator, record_id, payload,
state, record_store)
elif tnx_action == "CreateApplication":
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for create_application')
cls.create_application(tnx_originator, record_id, payload,
state, record_store)
elif tnx_action == "AcceptApplication":
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for accept_application')
cls.accept_application(tnx_originator, record_id, payload,
state, record_store)
elif tnx_action == "RejectApplication":
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for reject_application')
cls.reject_application(tnx_originator, record_id, payload,
state, record_store)
elif tnx_action == "CancelApplication":
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for cancel_application')
cls.cancel_application(tnx_originator, record_id, payload,
state, record_store)
elif tnx_action == "Finalize":
if txnrecord_id is None:
raise InvalidTransaction(
'Record id expected for Finalize')
cls.finalize_record(tnx_originator, record_id, payload,
state, record_store)
else:
raise InvalidTransaction('Action {} is not valid'.
format(tnx_action))
# Store the record data back
state_put_single(state, record_store_key, record_store)
@classmethod
def create_record(cls, originator, record_id, payload, state, my_store):
sensor_id = payload.get('Sensor', None)
sensor_idx = None
if sensor_id is not None:
sensor_idx = addressing.get_sensor_id(sensor_id)
record_info = {}
# Owner set below
record_info['CurrentHolder'] = originator
# Custodians set below
record_info['Parents'] = payload.get('Parents', None)
record_info['Timestamp'] = payload.get('Timestamp')
record_info['Sensor'] = sensor_idx
record_info['Final'] = False
record_info['ApplicationFrom'] = None
record_info['ApplicationType'] = None
record_info['ApplicationTerms'] = None
record_info['ApplicationStatus'] = None
record_info['EncryptedConsumerAcccessible'] = None
record_info['EncryptedOwnerAccessible'] = None
my_store['RecordInfo'] = record_info
my_store['StoredTelemetry'] = payload.get('Telemetry', {})
my_store['DomainAttributes'] = payload.get('DomainAttributes', {})
# Determine if this record has parents
has_parents = record_info['Parents'] is not None and \
len(record_info['Parents']) > 0
# If there are parents update Owner and Custodian depending on the
# ApplicationType
if has_parents:
# Use the first parent
parent_id = record_info['Parents'][0]
parent_store = state_get_single(state, parent_id)
if parent_store['RecordInfo']['ApplicationType'] == "Owner":
# Transfer ownership - in this case there should be
# no custodians.
if not parent_store['RecordInfo']['Custodians']:
raise InvalidTransaction(
"Cannot transfer ownership when custodian is present")
record_info['Owner'] = originator
record_info['Custodians'] = []
else:
# Transfer custodianship
record_info['Owner'] = \
parent_store['RecordInfo']['Owner']
record_info['Custodians'] = \
list(parent_store['RecordInfo']['Custodians'])
# Check the next to last element of the Custodians array. If it
# is the new holder, then this is a 'pop' operation. It's also
# a pop if here is one custodian and the applicant is the
# owner.
is_pop = False
if len(record_info['Custodians']) > 1 and \
record_info['Custodians'][-2] == originator:
is_pop = True
elif len(record_info['Custodians']) == 1 and \
record_info['Owner'] == originator:
is_pop = True
if is_pop:
record_info['Custodians'].pop()
else:
record_info['Custodians'].append(originator)
else:
# No parents, just create a new record
record_info['Owner'] = originator
record_info['Custodians'] = []
# If there are parents mark them as final.
if has_parents:
for parent in record_info['Parents']:
parent_store = state_get_single(state, parent)
parent_store['RecordInfo']['Final'] = True
state_put_single(state, parent, parent_store)
# Remove the record from the former owner - even if this
# is a custodian transfer we need to store the new
# record ID with the owner.
AgentHandler.remove_record_owner(
state,
parent_store['RecordInfo']["Owner"],
parent)
# Remove the previous holder
AgentHandler.remove_record_holder(
state,
parent_store['RecordInfo']["CurrentHolder"],
parent)
# Remove the accepted application from the new owner
AgentHandler.remove_accepted_application(
state,
parent_store['RecordInfo']['ApplicationFrom'],
parent)
# Record the owner of the new record in the agent
AgentHandler.add_record_owner(
state, record_info["Owner"], record_id,
record_info["Owner"] == record_info["CurrentHolder"])
# Record the new record holder in the agent
AgentHandler.add_record_holder(
state, record_info["CurrentHolder"], record_id)
# Register the sensor
if sensor_id is not None:
if state_get_single(state, sensor_idx) is not None:
sensor_store = state_get_single(state, sensor_idx)
else:
sensor_store = {}
sensor_store["Record"] = record_id
sensor_store["Name"] = sensor_id
state_put_single(state, sensor_idx, sensor_store)
@classmethod
def create_application(cls, originator, record_id,
payload, state, my_store):
LOGGER.debug('create_application: %s', my_store)
record_info = my_store['RecordInfo']
LOGGER.debug(record_info)
# Agent ID who initiated the application
record_info['ApplicationFrom'] = originator
# custodian or owner
record_info['ApplicationType'] = payload['ApplicationType']
# Should be encrypted?
record_info['ApplicationTerms'] = payload['ApplicationTerms']
# To indicate acceptance (or not) of the application.
record_info['ApplicationStatus'] = "Open"
LOGGER.debug(record_info)
# Record the new application in the current holder
AgentHandler.add_open_application(state,
record_info['ApplicationFrom'],
record_info['CurrentHolder'],
record_id)
@classmethod
def accept_application(cls, originator, record_id, payload, state,
my_store):
# Mark the application as accepted. After this the new
# owner/custodian is able to make a new record with this
# record as the parent.
record_info = my_store['RecordInfo']
record_info['ApplicationStatus'] = "Accepted"
# Record the accepted application in the new holder
AgentHandler.remove_open_application(state,
record_info['ApplicationFrom'],
record_info['CurrentHolder'],
record_id)
AgentHandler.add_accepted_application(state,
record_info['ApplicationFrom'],
record_id,
record_info['Sensor'])
@classmethod
def reject_application(cls, originator, record_id, payload, state,
my_store):
# Mark the application as rejected.
record_info = my_store['RecordInfo']
record_info['ApplicationStatus'] = "Rejected"
# Record the rejected application in the agent
AgentHandler.remove_open_application(state,
record_info['ApplicationFrom'],
record_info['CurrentHolder'],
record_id)
@classmethod
def cancel_application(cls, originator, record_id, payload, state,
my_store):
# Mark the application as cancelled.
record_info = my_store['RecordInfo']
record_info['ApplicationStatus'] = "Cancelled"
# Record the cancelled application in the agent
AgentHandler.remove_open_application(state,
record_info['ApplicationFrom'],
record_info['CurrentHolder'],
record_id)
@classmethod
def finalize_record(cls, originator, record_id, payload, state, my_store):
record_info = my_store['RecordInfo']
record_info['Final'] = True
# Remove the record from the agent
if record_info['Owner'] != originator:
raise InvalidTransaction('Only the current owner can finalize')
if record_info['CurrentHolder'] != originator:
raise InvalidTransaction('Only the current holder can finalize')
AgentHandler.remove_record_owner(state, originator, record_id)
AgentHandler.remove_record_holder(state, originator, record_id)
class AgentHandler(object):
@classmethod
def apply(cls, transaction, state):
payload = json.loads(transaction.payload.decode())
LOGGER.debug("AgentHandler.apply payload: %s", repr(payload))
tnx_action = payload.get('Action', None)
tnx_name = payload.get('Name', None)
tnx_type = payload.get('Type', None)
tnx_url = payload.get('Url', None)
header = TransactionHeader()
header.ParseFromString(transaction.header)
uid = addressing.get_agent_id(header.signer_pubkey)
if tnx_name is None or tnx_name == '':
raise InvalidTransaction('Name not set')
if tnx_action == "Create":
LOGGER.debug("AgentHandler.apply CREATE")
if state_get_single(state, uid) is not None:
raise InvalidTransaction('Agent ID already registered')
my_store = {}
my_store['Name'] = tnx_name
my_store['Type'] = tnx_type
my_store['Url'] = tnx_url
my_store['OwnRecords'] = {}
my_store['HoldRecords'] = {}
my_store['OpenApplications'] = {}
my_store['AcceptedApplications'] = {}
state_put_single(state, uid, my_store)
else:
raise InvalidTransaction('Action {} is not valid'.
format(tnx_action))
@classmethod
def update_record_tracking(cls, state, agent_id, updates):
state_id = agent_id
my_store = state_get_single(state, state_id)
if my_store is None:
raise InvalidTransaction("Identifer {} is not present in store".
format(state_id))
for update in updates:
(field, record_id, value, exists_is_ok) = update
if value == "del":
if record_id not in my_store[field]:
raise InvalidTransaction(
"Record {} is not present in state".format(record_id))
del my_store[field][record_id]
else:
if not exists_is_ok and record_id in my_store[field]:
raise InvalidTransaction(
"Record {} is already present in state".
format(record_id))
my_store[field][record_id] = value
state_put_single(state, state_id, my_store)
@classmethod
def add_record_owner(cls, state, identifier, record_id, own_and_hold):
value = 1 if own_and_hold else 0
AgentHandler.update_record_tracking(
state, identifier, [("OwnRecords", record_id, value, True)])
@classmethod
def remove_record_owner(cls, state, identifier, record_id):
AgentHandler.update_record_tracking(
state, identifier, [("OwnRecords", record_id, "del", False)])
@classmethod
def add_record_holder(cls, state, identifier, record_id):
AgentHandler.update_record_tracking(
state, identifier, [("HoldRecords", record_id, 0, False)])
@classmethod
def remove_record_holder(cls, state, identifier, record_id):
AgentHandler.update_record_tracking(
state, identifier, [("HoldRecords", record_id, "del", False)])
@classmethod
def add_open_application(cls, state, applier_id, holder_id, record_id):
AgentHandler.update_record_tracking(
state, applier_id, [("OpenApplications", record_id, 1, False)])
AgentHandler.update_record_tracking(
state, holder_id, [("HoldRecords", record_id, 1, True)])
@classmethod
def remove_open_application(cls, state, applier_id, holder_id, record_id):
AgentHandler.update_record_tracking(
state, applier_id,
[("OpenApplications", record_id, "del", False)])
AgentHandler.update_record_tracking(
state, holder_id,
[("HoldRecords", record_id, 0, True)])
@classmethod
def add_accepted_application(cls, state, identifier, record_id, sensor_id):
AgentHandler.update_record_tracking(
state, identifier,
[("AcceptedApplications", record_id, sensor_id, False)])
@classmethod
def remove_accepted_application(cls, state, identifier, record_id):
AgentHandler.update_record_tracking(
state, identifier,
[("AcceptedApplications", record_id, "del", False)])
| 40.394737
| 80
| 0.591205
| 1,862
| 18,420
| 5.640172
| 0.145542
| 0.057132
| 0.023995
| 0.028566
| 0.458198
| 0.410303
| 0.369073
| 0.311845
| 0.280804
| 0.254428
| 0
| 0.002002
| 0.321987
| 18,420
| 455
| 81
| 40.483516
| 0.838898
| 0.115418
| 0
| 0.360856
| 0
| 0
| 0.128764
| 0.004742
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076453
| false
| 0.003058
| 0.021407
| 0.012232
| 0.125382
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aaebd6d86a473c46810168a0f679eb02f758b767
| 875
|
py
|
Python
|
pretoinf.py
|
nirmalya8/CalculateAndConvert
|
07eb954e2ac5960363637079bc8c179edec37a69
|
[
"CC-BY-3.0"
] | 1
|
2021-01-11T09:01:51.000Z
|
2021-01-11T09:01:51.000Z
|
pretoinf.py
|
nirmalya8/CalculateAndConvert
|
07eb954e2ac5960363637079bc8c179edec37a69
|
[
"CC-BY-3.0"
] | null | null | null |
pretoinf.py
|
nirmalya8/CalculateAndConvert
|
07eb954e2ac5960363637079bc8c179edec37a69
|
[
"CC-BY-3.0"
] | 1
|
2021-01-10T09:25:45.000Z
|
2021-01-10T09:25:45.000Z
|
class prefixtoinfix:
def prefixToInfix(self,prefix):
stack = []
l = []
# read prefix in reverse order
i = len(prefix) - 1
for j in prefix:
if j == ' ':
return [],False
while i >= 0:
if not self.isOperator(prefix[i]):
# symbol is operand
stack.append(prefix[i])
i -= 1
else:
# symbol is operator
str = "(" + stack.pop() + prefix[i] + stack.pop() + ")"
l.append(str)
stack.append(str)
i -= 1
return l,stack.pop()
def isOperator(self,c):
if c == "*" or c == "+" or c == "-" or c == "/" or c == "^" or c == "(" or c == ")":
return True
else:
return False
| 29.166667
| 92
| 0.377143
| 89
| 875
| 3.707865
| 0.382022
| 0.054545
| 0.072727
| 0.090909
| 0.057576
| 0.057576
| 0.057576
| 0.057576
| 0.057576
| 0.057576
| 0
| 0.008969
| 0.490286
| 875
| 30
| 93
| 29.166667
| 0.730942
| 0.074286
| 0
| 0.173913
| 0
| 0
| 0.012392
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aaf0aa3cbdbb1c81c191ac04d6a56e4b822a4b99
| 980
|
py
|
Python
|
src/weapon.py
|
gcairesdev/zelda
|
33fce4196c306d0a840aa189b0213f2879058090
|
[
"MIT"
] | 2
|
2022-03-10T22:22:19.000Z
|
2022-03-24T14:42:55.000Z
|
src/weapon.py
|
gcairesdev/zelda
|
33fce4196c306d0a840aa189b0213f2879058090
|
[
"MIT"
] | null | null | null |
src/weapon.py
|
gcairesdev/zelda
|
33fce4196c306d0a840aa189b0213f2879058090
|
[
"MIT"
] | null | null | null |
import pygame
class Weapon(pygame.sprite.Sprite):
def __init__(self, player, groups):
super().__init__(groups)
self.spriteType = 'weapon'
direction = player.status.split('_')[0]
# graphic
fullPath = f'./src/img/weapons/{player.weapon}/{direction}.png'
self.image = pygame.image.load(fullPath).convert_alpha()
# placement
if direction == 'right':
self.rect = self.image.get_rect(
midleft=player.rect.midright + pygame.math.Vector2(0, 16))
elif direction == 'left':
self.rect = self.image.get_rect(
midright=player.rect.midleft + pygame.math.Vector2(0, 16))
elif direction == 'down':
self.rect = self.image.get_rect(
midtop=player.rect.midbottom + pygame.math.Vector2(0, 0))
else:
self.rect = self.image.get_rect(
midbottom=player.rect.midtop + pygame.math.Vector2(-10, 0))
| 36.296296
| 75
| 0.586735
| 112
| 980
| 5.008929
| 0.392857
| 0.080214
| 0.085562
| 0.121212
| 0.28877
| 0.28877
| 0.117647
| 0
| 0
| 0
| 0
| 0.022663
| 0.279592
| 980
| 26
| 76
| 37.692308
| 0.771955
| 0.017347
| 0
| 0.2
| 0
| 0
| 0.071875
| 0.051042
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aaf368c0cbb0ab66f42b16908ff73d7af84048da
| 1,224
|
py
|
Python
|
humans-in-the-loop-files/machine-learning-scripts/ImageDownloader.py
|
LibraryOfCongress/hitl
|
8b054f1433b2129bfbaf16fcb09df637335a04a0
|
[
"MIT"
] | 3
|
2021-12-06T16:44:16.000Z
|
2022-03-30T05:45:48.000Z
|
humans-in-the-loop-files/machine-learning-scripts/ImageDownloader.py
|
LibraryOfCongress/hitl
|
8b054f1433b2129bfbaf16fcb09df637335a04a0
|
[
"MIT"
] | 8
|
2022-02-14T22:39:19.000Z
|
2022-03-31T01:54:06.000Z
|
humans-in-the-loop-files/machine-learning-scripts/ImageDownloader.py
|
LibraryOfCongress/hitl
|
8b054f1433b2129bfbaf16fcb09df637335a04a0
|
[
"MIT"
] | 1
|
2022-02-15T18:59:44.000Z
|
2022-02-15T18:59:44.000Z
|
#
# Download images from the LOC IIIF server and store them locally
#
import requests
from pathlib import Path
import shutil
import time
base = 'https://www.loc.gov/'
iiifbase = 'https://tile.loc.gov/image-services/iiif/'
def getImages(item, dest_dir):
downloaded_images = list()
Path(dest_dir).mkdir(parents=True, exist_ok=True)
imagenum = item['start']
while imagenum <= item['end']:
imgurl = iiifbase + item['service'].format(str(imagenum).zfill(4))
r = requests.get(imgurl, stream=True)
if r.status_code == 200:
imgname = item['lc_id'] + '_' + str(imagenum).zfill(4) + '.jpg'
imgpath = dest_dir + '/' + imgname
image_info = {
"image_name": imgname,
"image_location": dest_dir,
"source": imgurl,
"image_url": "https://www.loc.gov/resource/{}/?sp={}".format(item['lc_id'], str(imagenum).zfill(4)).replace("gdcustel", "usteledirec")
}
downloaded_images.append(image_info)
with open(imgpath, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
time.sleep(1)
imagenum += 1
print(imgurl)
return downloaded_images
| 36
| 146
| 0.607843
| 152
| 1,224
| 4.776316
| 0.546053
| 0.038567
| 0.066116
| 0.070248
| 0.068871
| 0.068871
| 0.068871
| 0
| 0
| 0
| 0
| 0.008677
| 0.246732
| 1,224
| 34
| 147
| 36
| 0.778742
| 0.051471
| 0
| 0
| 0
| 0
| 0.164076
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.133333
| 0
| 0.2
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aaf4cca94bb840d24c6ed43cf92a6175ba126324
| 1,291
|
py
|
Python
|
Task2E.py
|
bendomb/IA-Flood-Warning-System
|
8e476010e83b64aca8a05dc31f88fe2d6fbd3c9f
|
[
"MIT"
] | null | null | null |
Task2E.py
|
bendomb/IA-Flood-Warning-System
|
8e476010e83b64aca8a05dc31f88fe2d6fbd3c9f
|
[
"MIT"
] | null | null | null |
Task2E.py
|
bendomb/IA-Flood-Warning-System
|
8e476010e83b64aca8a05dc31f88fe2d6fbd3c9f
|
[
"MIT"
] | null | null | null |
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list, update_water_levels
import floodsystem.flood as flood
import floodsystem.plot as plot
from datetime import datetime, timedelta
import datetime
stations = build_station_list()
update_water_levels(stations)
# plots the water levels over the past 10 days for the 5 stations at which the current relative water level is greatest.
def run():
"""Requirements for Task 2E"""
# makes a list of the 5 stations with the highest relative water level in descending order
top_five = flood.stations_highest_rel_level(stations, 5)
for i in range(5):
station_name = top_five[i][0].name
station_check = None
for station in stations:
if station.name == station_name:
station_check = station
break
if not station_check:
print("Station {} could not be found".format(station_name))
dt = 10
dates, levels = fetch_measure_levels(station_check.measure_id, dt = datetime.timedelta(days=dt))
plot.plot_water_levels(station, dates, levels)
if __name__ == "__main__":
print("*** Task 2E: CUED Part IA Flood Warning System ***")
run()
| 32.275
| 120
| 0.691712
| 172
| 1,291
| 4.988372
| 0.424419
| 0.051282
| 0.041958
| 0.051282
| 0.076923
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0.011202
| 0.239349
| 1,291
| 39
| 121
| 33.102564
| 0.862525
| 0.18048
| 0
| 0
| 0
| 0
| 0.082778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.24
| 0
| 0.28
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aaf844dccdd148febc58c248704051fea8ef7efb
| 1,256
|
py
|
Python
|
gui_components/cell.py
|
FilipRistic2922/SudokuPy
|
7098530d2fd9d82cc2e66649c993630ef6e5774a
|
[
"MIT"
] | null | null | null |
gui_components/cell.py
|
FilipRistic2922/SudokuPy
|
7098530d2fd9d82cc2e66649c993630ef6e5774a
|
[
"MIT"
] | null | null | null |
gui_components/cell.py
|
FilipRistic2922/SudokuPy
|
7098530d2fd9d82cc2e66649c993630ef6e5774a
|
[
"MIT"
] | null | null | null |
import pygame
from gui_components.gui_util import get_font, BLACK, BLUE, GRAY
class Cell:
def __init__(self, value, row, col, width, height):
self.value = value
self.temp = 0
self.row = row
self.col = col
self.width = width
self.height = height
self.set_by_user = False
self.selected = False
def draw(self, win):
font = get_font("arial", 40)
gap = self.width / 9
x = self.col * gap
y = self.row * gap
if self.temp != 0 and self.value == 0:
text = font.render(str(self.temp), 1, GRAY)
win.blit(text, (x + 45, y + 5))
elif not (self.value == 0):
color = BLACK
if self.set_by_user:
color = BLUE
text = font.render(str(self.value), 1, color)
win.blit(text, (x + (gap / 2 - text.get_width() / 2), y + (gap / 2 - text.get_height() / 2)))
if self.selected:
pygame.draw.rect(win, BLUE, (x, y, gap, gap), 5)
def set_value(self, val, set_by_user: bool = False):
self.value = val
self.temp = 0
self.set_by_user = set_by_user
def set_temp(self, val):
self.value = 0
self.temp = val
| 27.304348
| 105
| 0.527866
| 179
| 1,256
| 3.581006
| 0.290503
| 0.098284
| 0.070203
| 0.060842
| 0.065523
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023313
| 0.351115
| 1,256
| 45
| 106
| 27.911111
| 0.76319
| 0
| 0
| 0.057143
| 0
| 0
| 0.003981
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.057143
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aaf88ec65f719215938c906b09236be307dd6034
| 2,246
|
py
|
Python
|
chapter2/remove_dups.py
|
MubashirullahD/cracking-the-coding-interview
|
f9595886967e7c63cec19028239e4289e9cd1f9e
|
[
"MIT"
] | 1
|
2021-12-01T13:26:10.000Z
|
2021-12-01T13:26:10.000Z
|
chapter2/remove_dups.py
|
MubashirullahD/cracking-the-coding-interview
|
f9595886967e7c63cec19028239e4289e9cd1f9e
|
[
"MIT"
] | null | null | null |
chapter2/remove_dups.py
|
MubashirullahD/cracking-the-coding-interview
|
f9595886967e7c63cec19028239e4289e9cd1f9e
|
[
"MIT"
] | null | null | null |
"""
Remove Dups: Write code to remove duplicates from an unsorted linked list.
FOLLOW UP
How would you solve this problem if a temporary buffer is not allowed?
"""
from linkedlist import linkedlist
def remove_dup(linked_list):
placeholder = dict()
pointer1 = linked_list.top # This guy deletes the dublicate nodes
pointer2 = linked_list.top.next # This guy finds the nodes to delete
if pointer2 is None: # Only one variable
return
placeholder[pointer1.data] = 1
while(pointer2.next is not None):
placeholder[pointer2.data] = placeholder.get(pointer2.data, 0) + 1
if placeholder[pointer2.data] > 1:
pointer1.next = pointer2.next
pointer2 = pointer2.next
else:
pointer1 = pointer2
pointer2 = pointer2.next
# Last node case
placeholder[pointer2.data] = placeholder.get(pointer2.data, 0) + 1
if placeholder[pointer2.data] > 1:
pointer1.next = pointer2.next
def _sort(linked_list):
#bubble sort
sorted = False
while(not sorted):
node = linked_list.top
sorted = True
while(node.next is not None):
if node.data > node.next.data:
sorted = False
tmp = node.data
node.data = node.next.data
node.next.data = tmp
node = node.next
def remove_dub_no_buff(linked_list):
# We may have to sort
_sort(linked_list)
pointer1 = linked_list.top
while (pointer1.next is not None):
if (pointer1.data == pointer1.next.data):
pointer1.next = pointer1.next.next
else:
pointer1 = pointer1.next
if __name__ == "__main__":
test_list = linkedlist(10)
test_list.top.append_to_tail(20)
test_list.top.append_to_tail(30)
test_list.top.append_to_tail(20) #
test_list.top.append_to_tail(40)
test_list.top.append_to_tail(20) #
test_list.top.append_to_tail(50)
test_list.top.append_to_tail(40) #
test_list.top.append_to_tail(50) #
print("Before removing ")
test_list.print_all()
remove_dub_no_buff(test_list)
print("After removing ")
test_list.print_all()
| 27.060241
| 77
| 0.629564
| 295
| 2,246
| 4.60678
| 0.284746
| 0.06181
| 0.064754
| 0.100074
| 0.381163
| 0.294334
| 0.294334
| 0.291391
| 0.291391
| 0.291391
| 0
| 0.033582
| 0.284061
| 2,246
| 82
| 78
| 27.390244
| 0.811567
| 0.130899
| 0
| 0.425926
| 0
| 0
| 0.020176
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.018519
| 0
| 0.092593
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aafaed7bd2fdeb1c2bbe286e7b1293532edfc8c8
| 2,516
|
py
|
Python
|
api/models.py
|
mz-techops/banhammer
|
02476db3d2bb617dbe50827687065fbea7553caf
|
[
"BSD-3-Clause"
] | 3
|
2018-03-09T23:29:25.000Z
|
2020-11-25T15:34:13.000Z
|
api/models.py
|
whyallyn/banhammer
|
59fc81b15d9950a7a40279a9d1df8101c58df569
|
[
"BSD-3-Clause"
] | 3
|
2018-05-08T01:10:43.000Z
|
2021-03-19T21:56:36.000Z
|
api/models.py
|
whyallyn/banhammer
|
59fc81b15d9950a7a40279a9d1df8101c58df569
|
[
"BSD-3-Clause"
] | 2
|
2018-05-10T15:07:24.000Z
|
2018-06-20T16:24:00.000Z
|
"""API Django models."""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Target(models.Model):
"""Definition of a Target."""
BAN = 'ban'
ALLOW = 'allow'
TARGET_ACTION_CHOICES = (
(BAN, "Ban"),
(ALLOW, "Allow"),
)
target_action = models.CharField(
max_length=5,
choices=TARGET_ACTION_CHOICES,
)
IPADDR = 'ip'
DOMAIN = 'domain'
URL = 'url'
HASH = 'hash'
USER = 'user'
TARGET_TYPE_CHOICES = (
(IPADDR, 'IP Address'),
(DOMAIN, 'Domain'),
(URL, 'URL'),
(HASH, 'Hash'),
(USER, 'User'),
)
target_type = models.CharField(
max_length=6,
choices=TARGET_TYPE_CHOICES,
)
target = models.CharField(max_length=900)
reason = models.CharField(max_length=50)
method = models.CharField(max_length=50)
user = models.CharField(max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta:
permissions = (
('target_all_read', 'Read access for all Target types'),
('target_all_write', 'Write access for all Target types'),
('target_ipaddr_read', 'Read access for IP Target types'),
('target_ipaddr_write', 'Write access for IP Target types'),
('target_domain_read', 'Read access for Domain Target types'),
('target_domain_write', 'Write access for Domain Target types'),
('target_url_read', 'Read access for URL Target types'),
('target_url_write', 'Write access for URL Target types'),
('target_hash_read', 'Read access for Hash Target types'),
('target_hash_write', 'Write access for Hash Target types'),
('target_user_read', 'Read access for User Target types'),
('target_user_write', 'Write access for User Target types'),
)
def __str__(self):
return self.target
@python_2_unicode_compatible
class TargetIpAddr(models.Model):
"""Definition of an IP Address Target."""
ipaddr = models.CharField(max_length=45, unique=True)
ipaddr_action = models.CharField(
max_length=5,
choices=Target.TARGET_ACTION_CHOICES,
)
target = models.ManyToManyField(Target)
method = models.CharField(max_length=50)
def __str__(self):
return self.ipaddr
| 31.848101
| 76
| 0.636328
| 297
| 2,516
| 5.144781
| 0.228956
| 0.070681
| 0.122382
| 0.141361
| 0.496073
| 0.383508
| 0.115183
| 0.115183
| 0.057592
| 0.057592
| 0
| 0.010621
| 0.25159
| 2,516
| 78
| 77
| 32.25641
| 0.80085
| 0.031002
| 0
| 0.123077
| 0
| 0
| 0.273328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030769
| false
| 0
| 0.046154
| 0.030769
| 0.476923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aafb2505abc576fb91ec98a082757451092677b3
| 1,795
|
py
|
Python
|
src/ranking_utils/scripts/preprocess.py
|
fknauf/ranking-utils
|
ce1a0be4e560d5f156a76cb5c0e3751793c67648
|
[
"MIT"
] | null | null | null |
src/ranking_utils/scripts/preprocess.py
|
fknauf/ranking-utils
|
ce1a0be4e560d5f156a76cb5c0e3751793c67648
|
[
"MIT"
] | null | null | null |
src/ranking_utils/scripts/preprocess.py
|
fknauf/ranking-utils
|
ce1a0be4e560d5f156a76cb5c0e3751793c67648
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
import argparse
from pathlib import Path
from pytorch_lightning import seed_everything
from ranking_utils.datasets.antique import ANTIQUE
from ranking_utils.datasets.fiqa import FiQA
from ranking_utils.datasets.insuranceqa import InsuranceQA
from ranking_utils.datasets.trecdl import TRECDL2019Passage, TRECDL2019Document
from ranking_utils.datasets.trec import TREC
def main():
ap = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ap.add_argument("SAVE", help="Where to save the results")
ap.add_argument(
"--num_neg_point",
type=int,
default=1,
help="Number of negatives per positive (pointwise training)",
)
ap.add_argument(
"--num_neg_pair",
type=int,
default=16,
help="Number of negatives per positive (pairwise training)",
)
ap.add_argument(
"--query_limit_pair",
type=int,
default=64,
help="Maximum number of training examples per query (pairwise training)",
)
ap.add_argument("--random_seed", type=int, default=123, help="Random seed")
subparsers = ap.add_subparsers(help="Choose a dataset", dest="dataset")
subparsers.required = True
DATASETS = [ANTIQUE, FiQA, InsuranceQA, TRECDL2019Passage, TRECDL2019Document, TREC]
for c in DATASETS:
c.add_subparser(subparsers, c.__name__.lower())
args = ap.parse_args()
if args.random_seed:
seed_everything(args.random_seed)
ds = None
for c in DATASETS:
if args.dataset == c.__name__.lower():
ds = c(args)
break
save_path = Path(args.SAVE)
ds.save(save_path, args.num_neg_point, args.num_neg_pair, args.query_limit_pair)
if __name__ == "__main__":
main()
| 29.42623
| 88
| 0.690808
| 224
| 1,795
| 5.3125
| 0.361607
| 0.02521
| 0.067227
| 0.10084
| 0.134454
| 0.053782
| 0
| 0
| 0
| 0
| 0
| 0.017668
| 0.211699
| 1,795
| 60
| 89
| 29.916667
| 0.823322
| 0.012256
| 0
| 0.170213
| 0
| 0
| 0.169865
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0.042553
| 0.170213
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aafcb95092c87ce209c68768cc38ba847d90f715
| 945
|
py
|
Python
|
lightcycle-backend/lightcycle/basebot.py
|
Onapsis/pytron
|
2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58
|
[
"MIT"
] | null | null | null |
lightcycle-backend/lightcycle/basebot.py
|
Onapsis/pytron
|
2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58
|
[
"MIT"
] | null | null | null |
lightcycle-backend/lightcycle/basebot.py
|
Onapsis/pytron
|
2ed0622ae13f010bcd8fdbbd2f1e9cba3d2e3d58
|
[
"MIT"
] | null | null | null |
# encoding=utf-8
import random
from collections import namedtuple
Point = namedtuple('Point', 'x y')
DIRECTIONS = {
'N': Point(0, -1),
'E': Point(1, 0),
'S': Point(0, 1),
'W': Point(-1, 0),
}
class LightCycleBaseBot(object):
def get_next_step(self, arena, x, y, direction):
raise NotImplementedError('Should return one Direction.')
class LightCycleRandomBot(LightCycleBaseBot):
def get_next_step(self, arena, x, y, direction):
possible_movements = [key for key, value in DIRECTIONS.items()
if 0 <= x + value.x < arena.shape[0]
and 0 <= y + value.y < arena.shape[1]
and not arena[x + value.x, y + value.y]]
#print possible_directions
if direction in possible_movements:
return direction
else:
return random.choice(possible_movements or DIRECTIONS.keys())
| 27
| 73
| 0.583069
| 114
| 945
| 4.763158
| 0.438596
| 0.014733
| 0.025783
| 0.051565
| 0.12523
| 0.12523
| 0.12523
| 0.12523
| 0.12523
| 0
| 0
| 0.019727
| 0.302646
| 945
| 34
| 74
| 27.794118
| 0.804249
| 0.04127
| 0
| 0.090909
| 0
| 0
| 0.044297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aafd6a05995c3be1c704460d3a42582a855ce32e
| 6,629
|
py
|
Python
|
TD1/utils/extrinsic.py
|
AntoineOrgerit/Web-Scrapping
|
552f2f85d775ada9e85f897713d20de09c0919ed
|
[
"BSD-3-Clause"
] | null | null | null |
TD1/utils/extrinsic.py
|
AntoineOrgerit/Web-Scrapping
|
552f2f85d775ada9e85f897713d20de09c0919ed
|
[
"BSD-3-Clause"
] | null | null | null |
TD1/utils/extrinsic.py
|
AntoineOrgerit/Web-Scrapping
|
552f2f85d775ada9e85f897713d20de09c0919ed
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module allows to perform a specific extrinsic evaluation of files by a specified criteria.
Antoine Orgerit - François Gréau - Lisa Fougeron
La Rochelle Université - 2019-2020
"""
import langid
import json
import copy
import subprocess
from os import listdir, remove
from os.path import isfile, join
from utils.daniel.evaluate import get_results, get_dic
def print_TP_FP_FN_TN(tools_criterias_data):
"""
Outputs TP, FP, FN and TN results of the evaluated files.
"""
print("TOOLS\t\t|TP\t|FP\t|FN\t|TN")
print("------------------------------------------------")
for tool in tools_criterias_data:
if len(tool) > 7:
print(tool + "\t|", end="")
else:
print(tool + "\t\t|", end="")
print(str(tools_criterias_data[tool][0]["TP"]) + "\t|" + str(tools_criterias_data[tool][0]["FP"]) + "\t|" + str(tools_criterias_data[tool][0]["FN"]) + "\t|" + str(tools_criterias_data[tool][0]["TN"]))
print()
def print_FRP(tools_criterias_data, default_header_key):
"""
Outputs F-score, Recall and Precision results of the evaluated files.
"""
print("TOOLS\t\t|\t\tAll\t\t", end="")
add_spacing = []
for criteria in tools_criterias_data[default_header_key][2]:
if len(criteria) >= 24:
print("|" + criteria + "\t", end="")
if len(criteria) >= 31:
add_spacing.append(criteria)
elif len(criteria) >= 16:
print("|\t" + criteria + "\t", end="")
elif len(criteria) >= 8:
print("|\t" + criteria + "\t\t", end="")
else:
print("|\t\t" + criteria + "\t\t", end="")
print()
print("\t\t|\tF\tR\tP\t", end="")
for criteria in tools_criterias_data[default_header_key][2]:
print("|\tF\tR\tP\t", end="")
if criteria in add_spacing:
print("\t", end="")
print()
print("------------------------------------------------", end="")
for criteria in tools_criterias_data[default_header_key][2]:
print("--------------------------------", end="")
if criteria in add_spacing:
print("--------", end="")
print()
for tool in tools_criterias_data:
if len(tool) > 7:
print(tool + "\t", end="")
else:
print(tool + "\t\t", end="")
print("|\t" + str(format(tools_criterias_data[tool][1]["F1-measure"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][1]["Recall"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][1]["Precision"], ".2f")) + "\t", end="")
for criteria in tools_criterias_data[tool][2]:
print("|\t" + str(format(tools_criterias_data[tool][2][criteria]["F1-measure"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][2][criteria]["Recall"], ".2f")) + "\t" + str(format(tools_criterias_data[tool][2][criteria]["Precision"], ".2f")) + "\t", end="")
if criteria in add_spacing:
print("\t", end="")
print()
print()
def detect_language(file_path):
"""
Allows to detect the language used in a file using the langid module.
"""
file = open(file_path, "r", encoding="utf8")
language = langid.classify(file.read())
file.close()
return language
def delete_unused_files(clean_repository_json_path, files_to_evaluate):
"""
Allows to remove unused files in the JSON file at clean_repository_json_path path that are not
present in the JSON object files_to_evaluate.
"""
clean_repository = json.load(open(clean_repository_json_path, "r", encoding="utf8"))
for id in list(clean_repository):
if not clean_repository[id]["path"] in files_to_evaluate:
clean_repository.pop(id)
return clean_repository
def prepare_json(json_content, path):
"""
Allows to prepare a JSON object from the clean result json_content
and specific tool files path.
"""
prepared_json = {}
for id, infos in json_content.items():
new_infos = copy.copy(infos)
new_infos["document_path"] = path + new_infos["path"]
new_infos["language"] = new_infos["langue"]
new_infos.pop("langue")
prepared_json[id] = new_infos
return prepared_json
def process_corpus():
"""
Allows to process the files present in eval.json using Daniel process_corpus.py file.
"""
out = subprocess.check_output(['python', '../utils/daniel/process_corpus.py', '-c ../../exo5/eval.json'])
composed_out = out.decode('ascii').split("\r\n")
composed_out = composed_out[len(composed_out) - 2].split("/")
return composed_out[len(composed_out) - 1]
def evaluate(processed_file, criteria_extraction):
"""
Allows to evaluate the result of the eval.json file with the gold.json reference file
using Daniel evaluate.py file.
"""
gold = get_dic('./gold.json')
eval = get_dic('./' + processed_file)
return get_results(gold, eval, criteria_extraction)
def perform_extrinsic_evaluation(clean_repository_path_and_json, source_repositories_name_and_path, criteria_extraction, print_header_key=None):
"""
Allows to perform an extrinsic evaluation from reference files path and json file clean_repository_path_and_json,
files to evaluate linked to their generator tool source_repositories_name_and_path, using an extraction criteria
criteria_extraction.
"""
global_data = {}
for source_repository_name_and_path in source_repositories_name_and_path:
files_to_evaluate = [f for f in listdir(source_repository_name_and_path[1]) if isfile(join(source_repository_name_and_path[1], f))]
clean_repository = delete_unused_files(clean_repository_path_and_json[1], files_to_evaluate)
gold_json = prepare_json(clean_repository, clean_repository_path_and_json[0])
eval_json = prepare_json(clean_repository, source_repository_name_and_path[1])
gold_file = open("./gold.json", "w")
gold_file.write(json.dumps(gold_json))
gold_file.close()
eval_file = open("./eval.json", "w")
eval_file.write(json.dumps(eval_json))
eval_file.close()
processed_file = process_corpus()
global_data[source_repository_name_and_path[0]] = evaluate(processed_file, criteria_extraction)
remove("./gold.json")
remove("./eval.json")
remove("./test.out")
remove("./tmp")
remove("./" + processed_file)
print_TP_FP_FN_TN(global_data)
if print_header_key != None:
print_FRP(global_data, print_header_key)
return global_data
| 38.994118
| 276
| 0.629808
| 875
| 6,629
| 4.534857
| 0.185143
| 0.063508
| 0.081653
| 0.060988
| 0.416079
| 0.261593
| 0.225302
| 0.197329
| 0.178679
| 0.137097
| 0
| 0.009586
| 0.213154
| 6,629
| 169
| 277
| 39.224852
| 0.75115
| 0.160959
| 0
| 0.192661
| 0
| 0
| 0.102697
| 0.038604
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073395
| false
| 0
| 0.06422
| 0
| 0.192661
| 0.302752
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aafdf4b1d0c84500a3e64c7af727b3f3bc824d1a
| 5,002
|
py
|
Python
|
cinder/tests/unit/policies/test_quotas.py
|
arunvinodqmco/cinder
|
62cb72c6890e458427ba0601646b186b7b36dc01
|
[
"Apache-2.0"
] | 571
|
2015-01-01T17:47:26.000Z
|
2022-03-23T07:46:36.000Z
|
cinder/tests/unit/policies/test_quotas.py
|
arunvinodqmco/cinder
|
62cb72c6890e458427ba0601646b186b7b36dc01
|
[
"Apache-2.0"
] | 37
|
2015-01-22T23:27:04.000Z
|
2021-02-05T16:38:48.000Z
|
cinder/tests/unit/policies/test_quotas.py
|
arunvinodqmco/cinder
|
62cb72c6890e458427ba0601646b186b7b36dc01
|
[
"Apache-2.0"
] | 841
|
2015-01-04T17:17:11.000Z
|
2022-03-31T12:06:51.000Z
|
# Copyright 2021 Red Hat, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from cinder.api.contrib import quotas
from cinder.api import microversions as mv
from cinder.policies import quotas as policy
from cinder.tests.unit.api import fakes as fake_api
from cinder.tests.unit.policies import base
@ddt.ddt
class QuotasPolicyTest(base.BasePolicyTest):
authorized_users = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
]
unauthorized_users = [
'system_member',
'system_reader',
'system_foo',
'other_project_member',
'other_project_reader',
]
authorized_admins = [
'legacy_admin',
'system_admin',
'project_admin',
]
unauthorized_admins = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'project_member',
'project_reader',
'project_foo',
'other_project_member',
'other_project_reader',
]
unauthorized_exceptions = []
# Basic policy test is without enforcing scope (which cinder doesn't
# yet support) and deprecated rules enabled.
def setUp(self, enforce_scope=False, enforce_new_defaults=False,
*args, **kwargs):
super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs)
self.controller = quotas.QuotaSetsController()
self.api_path = '/v3/os-quota-sets'
self.api_version = mv.BASE_VERSION
@ddt.data(*base.all_users)
def test_show_policy(self, user_id):
rule_name = policy.SHOW_POLICY
req = fake_api.HTTPRequest.blank(self.api_path,
version=self.api_version)
self.common_policy_check(user_id, self.authorized_users,
self.unauthorized_users,
self.unauthorized_exceptions,
rule_name, self.controller.show,
req, id=self.project_id)
@ddt.data(*base.all_users)
def test_update_policy(self, user_id):
rule_name = policy.UPDATE_POLICY
req = fake_api.HTTPRequest.blank(self.api_path,
version=self.api_version)
req.method = 'PUT'
body = {
"quota_set": {
"groups": 11,
"volumes": 5,
"backups": 4
}
}
self.common_policy_check(user_id, self.authorized_admins,
self.unauthorized_admins,
self.unauthorized_exceptions,
rule_name, self.controller.update,
req, id=self.project_id, body=body)
@ddt.data(*base.all_users)
def test_delete_policy(self, user_id):
rule_name = policy.DELETE_POLICY
req = fake_api.HTTPRequest.blank(self.api_path,
version=self.api_version)
req.method = 'DELETE'
self.common_policy_check(user_id, self.authorized_admins,
self.unauthorized_admins,
self.unauthorized_exceptions,
rule_name, self.controller.delete,
req, id=self.project_id)
class QuotasPolicySecureRbacTest(QuotasPolicyTest):
authorized_users = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
'project_reader',
]
unauthorized_users = [
'legacy_owner',
'system_member',
'system_foo',
'project_foo',
'other_project_member',
'other_project_reader',
]
# NOTE(Xena): The authorized_admins and unauthorized_admins are the same
# as the QuotasPolicyTest's. This is because in Xena the "admin only"
# rules are the legacy RULE_ADMIN_API. This will change in Yoga, when
# RULE_ADMIN_API will be deprecated in favor of the SYSTEM_ADMIN rule that
# is scope based.
def setUp(self, *args, **kwargs):
# Test secure RBAC by disabling deprecated policy rules (scope
# is still not enabled).
super().setUp(enforce_scope=False, enforce_new_defaults=True,
*args, **kwargs)
| 33.797297
| 78
| 0.595162
| 548
| 5,002
| 5.213504
| 0.315693
| 0.019601
| 0.015401
| 0.024151
| 0.433322
| 0.40042
| 0.325516
| 0.236262
| 0.155408
| 0.155408
| 0
| 0.003834
| 0.322071
| 5,002
| 147
| 79
| 34.027211
| 0.838691
| 0.217713
| 0
| 0.542857
| 0
| 0
| 0.137275
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.057143
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aafe6ad15866307b4277788796934306b2fe5812
| 2,207
|
py
|
Python
|
build/lib/like_spider/excel.py
|
wuyingjie1002/like_spider
|
379354a362a693d45513aee4a8d871e79d7f8de4
|
[
"MIT"
] | 3
|
2019-02-23T08:19:41.000Z
|
2021-01-07T08:05:29.000Z
|
build/lib/like_spider/excel.py
|
wuyingjie1002/like_spider
|
379354a362a693d45513aee4a8d871e79d7f8de4
|
[
"MIT"
] | null | null | null |
build/lib/like_spider/excel.py
|
wuyingjie1002/like_spider
|
379354a362a693d45513aee4a8d871e79d7f8de4
|
[
"MIT"
] | 1
|
2019-02-23T08:19:43.000Z
|
2019-02-23T08:19:43.000Z
|
import time, os
from openpyxl import load_workbook
from openpyxl import Workbook
from .config import *
class Excel():
"""This is a class that saves data to an excel file."""
def loadFile(self, fileName):
"""load excel file"""
self.wb = load_workbook(fileName)
self.sheets = self.wb.get_sheet_names()
def loadSheet(self, sheet):
"""load a sheet"""
self.table = self.wb[sheet]
self.rows = self.table.max_row
self.cols = self.table.max_column
def getValue(self, row, col):
"""get a value"""
return self.table.cell(row, col).value
def saveFile(self, data, fileName):
"""save data to an excel file."""
if fileName == "":
print('file error')
return False
totalRow = len(data)
if totalRow > 0:
wb = Workbook()
ws = wb.active
for row in range(1, (totalRow + 1)):
totalCol = len(data[(row - 1)])
if totalCol > 0:
for col in range(1, (totalCol + 1)):
cell = ws.cell(row = row, column = col)
cell.value = data[(row - 1)][(col - 1)]
else:
print('col data error')
break
if totalCol > 0:
wb.save(fileName)
else:
print('row data error')
def appendFile(self, data, fileName, sheet = ''):
"""append data to an excel file."""
if fileName == "":
print('file error')
return False
if os.path.exists(fileName):
self.loadFile(fileName)
if sheet == '':
sheet = self.sheets[0]
self.loadSheet(sheet)
if self.rows > 0 and self.cols > 0:
fileData = []
for row in range(1, self.rows + 1):
rowData = []
for col in range(1, self.cols + 1):
rowData.append(self.getValue(row, col))
fileData.append(rowData)
fileData.extend(data)
data = fileData
self.saveFile(data, fileName)
| 33.439394
| 63
| 0.487993
| 248
| 2,207
| 4.318548
| 0.266129
| 0.033613
| 0.029879
| 0.036415
| 0.165266
| 0.097106
| 0.097106
| 0.097106
| 0.097106
| 0.097106
| 0
| 0.01283
| 0.399638
| 2,207
| 66
| 64
| 33.439394
| 0.795472
| 0.067059
| 0
| 0.185185
| 0
| 0
| 0.023657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092593
| false
| 0
| 0.074074
| 0
| 0.240741
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c90218f051fff4ea8d9d74263bd6864628d0ed69
| 2,172
|
py
|
Python
|
code/parsing/parsing_args.py
|
mdheller/SPARQA
|
3678798491abeb350d9500182291b9a73da75bed
|
[
"MIT"
] | 1
|
2020-06-20T12:27:11.000Z
|
2020-06-20T12:27:11.000Z
|
code/parsing/parsing_args.py
|
mdheller/SPARQA
|
3678798491abeb350d9500182291b9a73da75bed
|
[
"MIT"
] | null | null | null |
code/parsing/parsing_args.py
|
mdheller/SPARQA
|
3678798491abeb350d9500182291b9a73da75bed
|
[
"MIT"
] | null | null | null |
from common.bert_args import BertArgs
from sutime import SUTime
from parsing.nltk_nlp_utils import NLTK_NLP
from common import globals_args
from common import hand_files
parser_mode = globals_args.parser_mode
wh_words_set = {"what", "which", "whom", "who", "when", "where", "why", "how", "how many", "how large", "how big"}
bert_args = BertArgs(globals_args.root, globals_args.q_mode)
nltk_nlp = NLTK_NLP(globals_args.argument_parser.ip_port)
sutime = SUTime(jars=globals_args.argument_parser.sutime_jar_files, mark_time_ranges=True)
unimportantwords = hand_files.read_set(globals_args.argument_parser.unimportantwords)
unimportantphrases = hand_files.read_list(globals_args.argument_parser.unimportantphrases)
stopwords_dict = hand_files.read_set(globals_args.argument_parser.stopwords_dir)
ordinal_lines_dict = hand_files.read_ordinal_file(globals_args.argument_parser.ordinal_fengli) #2 {'second', '2ndis_equal_wh_word'}
count_phrases = ['Count', 'How many', 'how many', 'the number of', 'the count of', 'the amount of', 'total number of', 'count']
count_ner_tags = ['count']
dayu_phrases = ['more', 'more than' ,'greater', 'higher', 'longer than', 'taller than'] #'over',
dayu_dengyu_phrases = ['at least', 'not less than', 'or more']
# dengyu_phrases = ['equal', 'same']
xiaoyu_phrases = ['earlier', 'less than', 'smaller', 'less', 'no higher than', 'fewer', 'fewer than']
xiaoyu_dengyu_phrases = ['at most', 'maximum', 'or less', 'no larger than']
comparative_ner_tags = ['>', '>=', '<', '<=']
argmin_phrases = ['smallest', 'least', 'weakest', 'minimum', 'minimal', 'youngest',
'closest', 'shortest', 'thinnest','tiniest','hollowest',
'narrowest','shallowest','simplest','latest','last','poorest','littlest']
argmax_phrases = ['largest', 'brightest', 'heaviest', 'most',
'most', 'maximum', 'maximal', 'ultimate', 'totally', 'hugest',
'longest', 'biggest', 'fattest', 'fastest',
'greatest', 'quickest', 'tallest', 'oldest',
'eldest', 'heaviest', 'farthest', 'furthest', 'richest', 'best']
arg_ner_tags = ['argmax', 'argmin']
| 65.818182
| 133
| 0.683702
| 265
| 2,172
| 5.350943
| 0.490566
| 0.077574
| 0.080395
| 0.105783
| 0.057828
| 0.057828
| 0.057828
| 0.057828
| 0
| 0
| 0
| 0.001084
| 0.150552
| 2,172
| 33
| 134
| 65.818182
| 0.76748
| 0.035451
| 0
| 0
| 0
| 0
| 0.301795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.233333
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c90237976b3a9200b3841c1dddf956cf22c21271
| 1,378
|
py
|
Python
|
torchtext/datasets/amazonreviewfull.py
|
parmeet/text
|
1fb2aedb48b5ecc5e81741e7c8504486b91655c6
|
[
"BSD-3-Clause"
] | 3,172
|
2017-01-18T19:47:03.000Z
|
2022-03-27T17:06:03.000Z
|
torchtext/datasets/amazonreviewfull.py
|
parmeet/text
|
1fb2aedb48b5ecc5e81741e7c8504486b91655c6
|
[
"BSD-3-Clause"
] | 1,228
|
2017-01-18T20:09:16.000Z
|
2022-03-31T04:42:35.000Z
|
torchtext/datasets/amazonreviewfull.py
|
parmeet/text
|
1fb2aedb48b5ecc5e81741e7c8504486b91655c6
|
[
"BSD-3-Clause"
] | 850
|
2017-01-19T03:19:54.000Z
|
2022-03-29T15:29:52.000Z
|
from torchtext.data.datasets_utils import (
_RawTextIterableDataset,
_wrap_split_argument,
_add_docstring_header,
_download_extract_validate,
_create_dataset_directory,
_create_data_from_csv,
)
import os
import logging
URL = 'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbZVhsUnRWRDhETzA'
MD5 = '57d28bd5d930e772930baddf36641c7c'
NUM_LINES = {
'train': 3000000,
'test': 650000,
}
_PATH = 'amazon_review_full_csv.tar.gz'
_EXTRACTED_FILES = {
'train': f'{os.sep}'.join(['amazon_review_full_csv', 'train.csv']),
'test': f'{os.sep}'.join(['amazon_review_full_csv', 'test.csv']),
}
_EXTRACTED_FILES_MD5 = {
'train': "31b268b09fd794e0ca5a1f59a0358677",
'test': "0f1e78ab60f625f2a30eab6810ef987c"
}
DATASET_NAME = "AmazonReviewFull"
@_add_docstring_header(num_lines=NUM_LINES, num_classes=5)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(('train', 'test'))
def AmazonReviewFull(root, split):
path = _download_extract_validate(root, URL, MD5, os.path.join(root, _PATH), os.path.join(root, _EXTRACTED_FILES[split]),
_EXTRACTED_FILES_MD5[split], hash_type="md5")
logging.info('Creating {} data'.format(split))
return _RawTextIterableDataset(DATASET_NAME, NUM_LINES[split],
_create_data_from_csv(path))
| 30.622222
| 125
| 0.716255
| 157
| 1,378
| 5.859873
| 0.414013
| 0.034783
| 0.052174
| 0.061957
| 0.063043
| 0.063043
| 0.063043
| 0.063043
| 0
| 0
| 0
| 0.071737
| 0.160377
| 1,378
| 44
| 126
| 31.318182
| 0.723423
| 0
| 0
| 0
| 0
| 0
| 0.25254
| 0.122642
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.085714
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c90243bd480bf830b8eea8819352fe119d1a48da
| 2,962
|
py
|
Python
|
code/examples/VsevolodTymofyeyev/example.py
|
TrackerSB/MasterThesis
|
2792203d28d6c7b62f54545344ee6772d2ec5b64
|
[
"MIT"
] | null | null | null |
code/examples/VsevolodTymofyeyev/example.py
|
TrackerSB/MasterThesis
|
2792203d28d6c7b62f54545344ee6772d2ec5b64
|
[
"MIT"
] | null | null | null |
code/examples/VsevolodTymofyeyev/example.py
|
TrackerSB/MasterThesis
|
2792203d28d6c7b62f54545344ee6772d2ec5b64
|
[
"MIT"
] | null | null | null |
import os
from threading import Thread
from typing import List
from aiExchangeMessages_pb2 import SimulationID
def _handle_vehicle(sid: SimulationID, vid: str, requests: List[str]) -> None:
vid_obj = VehicleID()
vid_obj.vid = vid
i = 0
while i < 10:
i += 1
print(sid.sid + ": Test status: " + service.get_status(sid))
print(vid + ": Wait")
sim_state = service.wait_for_simulator_request(sid, vid_obj) # wait()
if sim_state is SimStateResponse.SimState.RUNNING:
print(vid + ": Request data")
request = DataRequest()
request.request_ids.extend(requests)
data = service.request_data(sid, vid_obj, request) # request()
print(data)
print(vid + ": Wait for control")
control = Control()
while not is_pressed("space"): # Wait for the user to trigger manual drive
pass
print(vid + ": Control")
if is_pressed("s"):
control.simCommand.command = Control.SimCommand.Command.SUCCEED
elif is_pressed("f"):
control.simCommand.command = Control.SimCommand.Command.FAIL
elif is_pressed("c"):
control.simCommand.command = Control.SimCommand.Command.CANCEL
else:
accelerate = 0
steer = 0
brake = 0
if is_pressed("up"):
accelerate = 1
if is_pressed("down"):
brake = 1
if is_pressed("right"):
steer = steer + 1
if is_pressed("left"):
steer = steer - 1
control.avCommand.accelerate = accelerate
control.avCommand.steer = steer
control.avCommand.brake = brake
service.control(sid, vid_obj, control) # control()
else:
print(sid.sid + ": The simulation is not running anymore (State: "
+ SimStateResponse.SimState.Name(sim_state) + ").")
print(sid.sid + ": Final result: " + service.get_result(sid))
break
control = Control()
control.simCommand.command = Control.SimCommand.Command.FAIL
service.control(sid, vid_obj, control)
if __name__ == "__main__":
from AIExchangeService import get_service
from aiExchangeMessages_pb2 import SimStateResponse, Control, SimulationID, VehicleID, DataRequest
from keyboard import is_pressed
service = get_service()
# Send tests
sids = service.run_tests("test", "test", "envs/criteriaA.dbc.xml", "envs/environmentA.dbe.xml")
# Interact with a simulation
if not sids:
exit(1)
sid = SimulationID()
sid.sid = sids.sids[0]
ego_requests = ["egoSpeed"]
ego_vehicle = Thread(target=_handle_vehicle, args=(sid, "ego", ego_requests))
ego_vehicle.start()
ego_vehicle.join()
| 36.121951
| 102
| 0.581702
| 319
| 2,962
| 5.253919
| 0.322884
| 0.048329
| 0.114558
| 0.073986
| 0.155131
| 0.155131
| 0.062053
| 0
| 0
| 0
| 0
| 0.007448
| 0.320054
| 2,962
| 81
| 103
| 36.567901
| 0.824727
| 0.035787
| 0
| 0.117647
| 0
| 0
| 0.078975
| 0.016497
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0.014706
| 0.102941
| 0
| 0.117647
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c906663e816567788a872d79ad4e2f03fb4244fb
| 12,019
|
py
|
Python
|
python/loom_viewer/loom_cli.py
|
arao11/pattern_viz
|
3123f19a127c9775fadcca25f83aebfc8dc3b9f9
|
[
"BSD-2-Clause"
] | 34
|
2017-10-18T06:09:16.000Z
|
2022-03-21T18:53:16.000Z
|
python/loom_viewer/loom_cli.py
|
arao11/pattern_viz
|
3123f19a127c9775fadcca25f83aebfc8dc3b9f9
|
[
"BSD-2-Clause"
] | 52
|
2017-10-19T13:35:39.000Z
|
2021-06-03T08:54:55.000Z
|
python/loom_viewer/loom_cli.py
|
arao11/pattern_viz
|
3123f19a127c9775fadcca25f83aebfc8dc3b9f9
|
[
"BSD-2-Clause"
] | 6
|
2018-05-28T06:16:26.000Z
|
2020-08-17T11:49:34.000Z
|
#!/usr/bin/env python
# Copyright (c) 2016 Sten Linnarsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import *
from mypy_extensions import NoReturn
import sys
import os
import argparse
import logging
import warnings
import loompy
from ._version import __version__
from .loom_expand import LoomExpand
from .loom_datasets import def_dataset_dir, LoomDatasets
from .loom_server import start_server
class VerboseArgParser(argparse.ArgumentParser):
def error(self, message: str) -> NoReturn:
self.print_help()
sys.stderr.write("\nerror: %s\n" % message)
sys.exit(2)
def tile_command(
datasets: LoomDatasets,
filenames: List[str],
projects: List[str],
all_files: bool,
truncate: bool) -> None:
# do not expand tiles more than once for any given filename
matches = set() # type: Set[Tuple[str, str, str]]
filenamesNone = filenames is None
projectsNone = projects is None
logging.warn("""
%s
%s
""" % (filenamesNone, projectsNone))
if all_files:
matches = datasets.list.all_files()
else:
if filenames is not None:
for filename in filenames:
matches |= datasets.list.matching_filenames(filename)
if projects is not None:
for project in projects:
matches |= datasets.list.files_in_project(project)
if not matches:
logging.warn("""
Must explicitly state what to tile! See also:
loom tile --help
To generate tiles for every loom file in the default dataset folder, type:
loom tile --all
To use a different dataset path, use `--dataset-path DATASET_PATH`. Note that
this must be put before the tile command:
loom --dataset-path DATASET_PATH tile [input for tile command]
To generate tiles for any loom file in the default dataset folder that matches
the names of FILE1, FILE2, etc, type:
loom tile FILE1 FILE2
To replace old tiles with new ones, add the -t or --truncate flag
loom tile FILE -t
To generate tiles only for one specific file, even if there are multiple files
with the same name, use the absolute path:
loom tile /path/to/FILE1 FILE2
To tile all files in one or more project folders, type:
loom tile --project PROJECT1 PROJECT2
Combining file and project paths is possible:
loom /path/to/FILE1 FILE2 --project PROJECT
Putting it all together: the following points to a non-default dataset path,
and generates tiles for one specific FILE, as well as all files in PROJECT,
while discarding any previously generated tiles:
loom --dataset-path DATASET_PATH tile /path/to/FILE --project PROJECT -t
""")
else:
for project, filename, file_path in matches:
logging.info("Tiling {file_path}")
datasets.tile(project, file_path, truncate)
def expand_command(
datasets: LoomDatasets,
filenames: List[str],
projects: List[str],
all_files: bool,
clear: bool,
metadata: bool,
attributes: bool,
rows: bool,
cols: bool,
truncate: bool) -> None:
if not (clear or metadata or attributes or rows or cols):
logging.warn("""
`loom expand` pre-generates cache for the loom-viewer, for faster serving.
This is a slow process, so that the command requires that you explicitly state
which cache to generate ("expand"), and for which loom file(s).
See also:
loom expand --help
Currently, the following separate types of cache can be expanded with these flags:
-m, --metadata general metadata
-a, --attributes row and column attributes
-r, --rows rows (genes)
-c, --cols columns (cells, currently not used)
In the following examples, we will expand metadata, attributes and all rows
all at once via -mar.
To expand all loom files matching the name FILE1, FILE2, etc in the default
loom datasets folder, type:
loom expand FILE1 FILE2 -mar
To expand a specific file, even if there are multiple files
with the same name, use the absolute path:
loom tile /path/to/FILE1 FILE2
To use a different dataset path, use `--dataset-path DATASET_PATH`. Note that
this must be put before the tile command:
loom --dataset-path DATASET_PATH expand FILE -mar
To apply expansion to all loom files, use --all or -A:
loom expand -marA
To apply expansion to all loom files in one or more project folders, type:
loom expand --project PROJECT1 PROJECT2 -mar
By default, previously expanded metadata is left alone. To force replacing this
expanded data, use --truncate or -t:
loom expand FILE -marT
To remove ALL previously generated cache (except tiles), use --clear or -C
loom expand FILE -C
Putting it all together: the following points to a non-default dataset path,
finds one specific FILE, as well as all files in PROJECT. For these files,
any existing expanded metadata is first deleted, then new general metadata and
attributes are expanded (but not rows)
while discarding any previously generated tiles:
loom --dataset-path DATASET_PATH expand /path/to/FILE --project PROJECT -maC
""")
return
matches = set() # type: Set[Tuple[str, str, str]]
if all_files:
matches = datasets.list.all_files()
else:
for filename in filenames:
matches |= datasets.list.matching_filenames(filename)
for project in projects:
matches |= datasets.list.files_in_project(project)
for project, filename, file_path in matches:
try:
expand = LoomExpand(project, filename, file_path)
if not expand.closed:
if clear:
expand.clear_metadata()
expand.clear_attributes()
expand.clear_rows()
expand.clear_columns()
if metadata:
expand.metadata(truncate)
if attributes:
expand.attributes(truncate)
if rows:
expand.rows(truncate)
if cols:
expand.columns(truncate)
expand.close()
except Exception as e:
expand.close()
raise e
def parse_args(def_dir: str) -> Any:
parser = VerboseArgParser(description="Loom command-line tool.")
parser.add_argument(
"--debug",
action="store_true",
help="Show verbose debug output (False by default)"
)
parser.add_argument(
"--dataset-path",
help="Path to datasets directory (default: %s)" % def_dir,
nargs='?',
const=def_dir,
default=def_dir
)
subparsers = parser.add_subparsers(title="subcommands", dest="command")
# loom version
version_parser = subparsers.add_parser("version", help="Print version")
# loom server
server_parser = subparsers.add_parser(
"server",
help="Launch loom server (default command)"
)
server_parser.add_argument(
"--show-browser",
help="Automatically launch browser (False by default)",
action="store_true"
)
server_parser.add_argument(
"-p",
"--port",
help="Port",
type=int,
nargs='?',
const=8003,
default=8003
)
# loom tile
tile_parser = subparsers.add_parser("tile", help="Precompute heatmap tiles")
tile_parser.add_argument(
"file",
help="""Loom file(s) to expand.
Expands all files matching the provided file names.
To avoid this, use an absolute path to specify a single file.
""",
nargs='*',
)
tile_parser.add_argument(
"--project",
help="Project(s) for which to expand all files.",
nargs='*',
)
tile_parser.add_argument(
"-A",
"--all",
help="Expand all loom files.",
action="store_true"
)
tile_parser.add_argument(
"-t",
"--truncate",
help="Remove previously expanded tiles if present (false by default)",
action="store_true"
)
# loom expand
expand_help = "Expands data to compressed json files. Processes all matching loom filenames in dataset_path, unless absolute path is passed"
expand_parser = subparsers.add_parser(
"expand",
help=expand_help
)
expand_parser.add_argument(
"file",
help="""Loom file(s) to expand.
Expands all files matching the provided file names.
To avoid this, use an absolute path to specify a single file.
When combined with --clear it clears all expanded files instead.
""",
nargs='*',
)
expand_parser.add_argument(
"--project",
help="Project(s) for which to expand all files (or clear expansion with --clear).",
nargs='*',
)
expand_parser.add_argument(
"-A",
"--all",
help="Expand all loom files (or clear expansion with --clear).",
action="store_true"
)
expand_parser.add_argument(
"-C",
"--clear",
help="Remove previously expanded files.",
action="store_true"
)
expand_parser.add_argument(
"-t",
"--truncate",
help="Replace previously expanded files if present (false by default). Only does something in combination with expansion (-m, -a, -r or -c).",
action="store_true"
)
expand_parser.add_argument(
"-m",
"--metadata",
help="Expand metadata (false by default)",
action="store_true"
)
expand_parser.add_argument(
"-a",
"--attributes",
help="Expand attributes (false by default)",
action="store_true"
)
expand_parser.add_argument(
"-r",
"--rows",
help="Expand rows (false by default)",
action="store_true"
)
expand_parser.add_argument(
"-c",
"--cols",
help="Expand columns (false by default)",
action="store_true"
)
return parser.parse_args()
def main() -> None:
def_dir = def_dataset_dir()
# Create a fake args object with default settings
# to handle the special case of no arguments.
if len(sys.argv) == 1:
args = argparse.Namespace()
setattr(args, "debug", False)
setattr(args, "dataset_path", def_dir)
# handled below
# setattr(args, "port", 8003)
# setattr(args, "command", "server")
# setattr(args, "show_browser", True)
else:
args = parse_args(def_dir)
if args.debug:
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(module)s, %(lineno)d - %(message)s")
else:
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(message)s")
# If only --debug or --dataset-path is passed,
# we still want to default to the server command
if 'command' not in args:
setattr(args, "command", "server")
if 'port' not in args:
setattr(args, "port", 8003)
if 'show_browser' not in args:
setattr(args, "show_browser", True)
if args.debug:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s - %(module)s, %(lineno)d: %(message)s')
else:
logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(message)s')
if args.command == "version":
print("loom v%s" % __version__)
sys.exit(0)
else:
if args.command == "tile":
logging.warn("test")
datasets = LoomDatasets(args.dataset_path)
tile_command(datasets, args.file, args.project, args.all, args.truncate)
elif args.command == "expand":
datasets = LoomDatasets(args.dataset_path)
expand_command(datasets, args.file, args.project, args.all, args.clear, args.metadata, args.attributes, args.rows, args.cols, args.truncate)
else: # args.command == "server":
start_server(args.dataset_path, args.show_browser, args.port, args.debug)
if __name__ == "__main__":
main()
| 26.473568
| 144
| 0.719611
| 1,708
| 12,019
| 4.986534
| 0.215457
| 0.029705
| 0.033932
| 0.024304
| 0.398145
| 0.358225
| 0.334742
| 0.303393
| 0.291417
| 0.262299
| 0
| 0.004147
| 0.177469
| 12,019
| 453
| 145
| 26.532009
| 0.857374
| 0.150179
| 0
| 0.36246
| 0
| 0.009709
| 0.499115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016181
| false
| 0.003236
| 0.038835
| 0
| 0.064725
| 0.006472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c907566de3410b8c828deb59e531487549202dc6
| 1,260
|
py
|
Python
|
test_function.py
|
will-huynh/process_controller
|
e193c80976ef1d35fb9e661425bf609a86a313c8
|
[
"MIT"
] | 1
|
2021-12-25T04:08:53.000Z
|
2021-12-25T04:08:53.000Z
|
test_function.py
|
will-huynh/process_controller
|
e193c80976ef1d35fb9e661425bf609a86a313c8
|
[
"MIT"
] | null | null | null |
test_function.py
|
will-huynh/process_controller
|
e193c80976ef1d35fb9e661425bf609a86a313c8
|
[
"MIT"
] | null | null | null |
import logging
import tcp_log_socket
logging_socket = tcp_log_socket.local_logging_socket(__name__)
logger = logging_socket.logger
#Test method simulating a method with required arguments; division is used to test exception handling
def test_args(div1, div2):
logger.info("Simulating a method with arguments and exceptions.")
quotient = div1 / div2
logger.info("Quotient is: {}".format(quotient))
return quotient
#Test method simulating a method with no required arguments
def test_no_args():
result = True
logger.info("Simulating methods without arguments.")
logger.info("Expected result: {}.".format(result))
return result
#Test method simulating an argument with keyworded and optional arguments
def test_keyword(def_num=10, **kwargs):
logger.info("Simulating methods with optional and keyworded arguments.")
allowed_key = "key"
value = False
list_keys = list(kwargs.keys())
logger.info("Default argument is {}.".format(def_num))
for kw in list_keys:
if kw == allowed_key:
logger.info("Keyword found.")
value = kwargs.pop(kw)
logger.info("Keyword and value are {0} : {1}.".format(kw, value))
return (def_num, value)
| 37.058824
| 102
| 0.692857
| 164
| 1,260
| 5.182927
| 0.371951
| 0.094118
| 0.070588
| 0.074118
| 0.072941
| 0.072941
| 0
| 0
| 0
| 0
| 0
| 0.008048
| 0.211111
| 1,260
| 33
| 103
| 38.181818
| 0.847082
| 0.18254
| 0
| 0
| 0
| 0
| 0.252515
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.076923
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c908908fcda77dbed54b6f285d7d03c69d799dc0
| 3,154
|
py
|
Python
|
users/views.py
|
elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML-
|
6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759
|
[
"Unlicense"
] | 1
|
2020-09-10T11:26:05.000Z
|
2020-09-10T11:26:05.000Z
|
users/views.py
|
elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML-
|
6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759
|
[
"Unlicense"
] | null | null | null |
users/views.py
|
elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML-
|
6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759
|
[
"Unlicense"
] | null | null | null |
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
#####################################################################
from django.http import HttpResponse
from django.contrib.auth import login, authenticate
from .forms import UserRegisterForm
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .tokens import account_activation_token
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
mail_subject = 'Activate your account.'
message = render_to_string('acc_active_email.html',{
'user':user,
'domain': current_site.domain,
'uid':urlsafe_base64_encode(force_bytes(user.pk)),
'token':account_activation_token.make_token(user),
})
to_email = form.cleaned_data.get('email')
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
return render(request, 'users/activation_info.html')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form,
}
return render(request, 'users/profile.html', context)
def activate(request, uidb64, token):
try:
uid = urlsafe_base64_decode(uidb64).decode()
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
# return redirect('home')
return render(request,'analyzer/home.html',{'message1':'Succesfull'})
else:
return render(request,'users/email_confirm_complete.html',{'message1':'Failed'})
| 38.463415
| 88
| 0.642676
| 354
| 3,154
| 5.562147
| 0.30226
| 0.055866
| 0.043169
| 0.048756
| 0.068055
| 0.041646
| 0
| 0
| 0
| 0
| 0
| 0.005848
| 0.240964
| 3,154
| 81
| 89
| 38.938272
| 0.816625
| 0.007292
| 0
| 0.098592
| 0
| 0
| 0.089216
| 0.026144
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042254
| false
| 0
| 0.197183
| 0
| 0.323944
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9098d28bd2a0a51fc33c4cd5fecc41dc7fc38ec
| 2,196
|
py
|
Python
|
stats/monitor.py
|
pawankaushal/crossbar-examples
|
b6e0cc321bad020045c4fafec091f78abd938618
|
[
"Apache-2.0"
] | 97
|
2016-12-14T16:48:49.000Z
|
2021-09-12T17:48:10.000Z
|
stats/monitor.py
|
pawankaushal/crossbar-examples
|
b6e0cc321bad020045c4fafec091f78abd938618
|
[
"Apache-2.0"
] | 38
|
2016-12-13T09:42:38.000Z
|
2020-07-05T11:58:07.000Z
|
stats/monitor.py
|
pawankaushal/crossbar-examples
|
b6e0cc321bad020045c4fafec091f78abd938618
|
[
"Apache-2.0"
] | 118
|
2016-12-12T21:36:40.000Z
|
2021-11-17T11:49:33.000Z
|
import argparse
from pprint import pformat
import txaio
txaio.use_twisted()
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class ClientSession(ApplicationSession):
async def onJoin(self, details):
print('MONITOR session joined: {}'.format(details))
xbr_config = self.config.extra['xbr']
# {'market-url': '', 'market-realm': '', 'delegate-key': '../.xbr.key'}
print(xbr_config)
def on_session_join(session_details):
self.log.info('>>>>>> MONITOR : session joined\n{session_details}\n',
session_details=pformat(session_details))
await self.subscribe(on_session_join, 'wamp.session.on_join')
def on_session_stats(session_details, stats):
self.log.info('>>>>>> MONITOR : session stats\n{session_details}\n{stats}\n',
session_details=pformat(session_details), stats=pformat(stats))
await self.subscribe(on_session_stats, 'wamp.session.on_stats')
def on_session_leave(session_id):
self.log.info('>>>>>> MONITOR : session {session_id} left',
session_id=session_id)
await self.subscribe(on_session_leave, 'wamp.session.on_leave')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d',
'--debug',
action='store_true',
help='Enable debug output.')
parser.add_argument('--url',
dest='url',
type=str,
default="ws://localhost:8080/ws",
help='The router URL (default: "ws://localhost:8080/ws").')
parser.add_argument('--realm',
dest='realm',
type=str,
default="realm1",
help='The realm to join (default: "realm1").')
args = parser.parse_args()
if args.debug:
txaio.start_logging(level='debug')
else:
txaio.start_logging(level='info')
runner = ApplicationRunner(url=args.url, realm=args.realm)
runner.run(ClientSession, auto_reconnect=True)
| 31.826087
| 89
| 0.583789
| 234
| 2,196
| 5.282051
| 0.337607
| 0.090615
| 0.048544
| 0.043689
| 0.223301
| 0.058252
| 0
| 0
| 0
| 0
| 0
| 0.006382
| 0.28643
| 2,196
| 68
| 90
| 32.294118
| 0.782387
| 0.031421
| 0
| 0.044444
| 0
| 0
| 0.206118
| 0.071529
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.088889
| 0
| 0.177778
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c90c7861eaff4add66e4d61ef78a76a073959d73
| 29,349
|
py
|
Python
|
spirou/sandbox/fits2ramp.py
|
clairem789/apero-utils
|
68ed0136a36b6badeaf15eb20d673052ad79a949
|
[
"MIT"
] | 2
|
2020-10-08T17:03:45.000Z
|
2021-03-09T17:49:44.000Z
|
spirou/sandbox/fits2ramp.py
|
clairem789/apero-utils
|
68ed0136a36b6badeaf15eb20d673052ad79a949
|
[
"MIT"
] | 17
|
2020-09-24T17:35:38.000Z
|
2020-12-11T16:10:13.000Z
|
spirou/sandbox/fits2ramp.py
|
clairem789/apero-utils
|
68ed0136a36b6badeaf15eb20d673052ad79a949
|
[
"MIT"
] | 5
|
2020-04-10T06:41:00.000Z
|
2020-12-16T21:09:14.000Z
|
#!/usr/bin/env python2.7
# Version date : Aug 21, 2018
#
# --> very minor correction compared to previous version. As keywords may change in files through time, when we delete
# a keyword, we first check if the keyword is preseent rather than "blindly" deleting it
# --> also corrected integer vs float divisions in refpixcor. This ensures python3 compatibility
#
# Version date : May 29, 2018
#
# --> The first frame is used as a "bias" for all subsequent readouts
# Subsequent frames are corrected for reference pixels
# This significantly improves the quality of the error measurement
# --> The top/bottom reference pixels are always corrected in odd/even manner, not as a constant offset for odd/even columns
# --> We now perform the non-linearity measurement
# --> All the "print" statement have been made consistent with the python3
# --> Add the "selfbias" keyword. This option uses the 1st readout as a bias estimate. This allows ref pixel correction per frame
#
# Version date : Mar 23, 2018
#
# --> corrects an error in the ref pixels
# --> Nothing changed to the input syntax compared to previous versions
#
# - accepts both H2RG and H4RG data. The size of the images is determined
# from the calibration files given in input, avoiding hardcoding the size
# of the input images. I removed all references to dim1 and dim2 (x and y size of
# images) as we will always have square images. This is now simply imdim. Imdim can
# only be equal to 2048 or 4096. If not, then something is really wrong and the code exits
# with a message.
#
# - uses pixels on the side of the array and not only top/bottom ones
# filters 1/f noise with side pixels. Important for the H4RG data
#
# - ramp algorithm significantly faster as we took some variable handling out the big loop. Does not
# change the output values in the end. sx and sx2 are now determined only at the end of the
# loop on image by using the timestamp vector combined with the n variable. Saves ~0.5s per readout
#
# - medians are now handling nans properly; avoids problems in rare cases when a nan appears in the
# ref pixel region. nanmedian exists in python3 but not python2, so I defined the function
# here. When we'll switch to p3, we can simply delete this function and we won't
# need to modify the code itself. We'll juste need : import numpy.nanmedian as nanmedian
#
# - if the bias frame is set entirely to zero (mostly for debugging purpose), then we avoid
# subtracting zeros to the entire image and save ~0.1s per image.
#
# - ref pixel filtering is defined as a function. This was done at two places in the
# code.
#
# - the reference pixel function is much faster thanks to some more clever handling
# of variables.
#
# - the flux in the "mask" region used now uses np.nanmean instead of mean. This avoids
# having a NaN flux measurement in the posemeter. It also avoids problems when writing
# the posemeter values in the header as one cannot have a NaN as a keyword value.
#
# - we now have an ascii output per iteration that tell you how long each frame took to
# process and how long is left before the end of the big loop. On our machine, the
# average for an H2RG image with the "-noerror" keyword (faster) is slightly less than
# 1 s per image.
#
#
# Now includes the following options :
#
# -n=XXX -> Will only perform the ramp fitting on the first XXX readouts of the array
# This can be used to simulate a shorter sequence. This could be useful to get the
# dark that exactly matches the integration time of a given science sequence. Say you
# have a dark of 100 frames but a science sequence of 20 frames, you may want to only use
# the first 20 frames of the dark to get exactly the same statistical properties as in your
# science sequence.
# -cube -> set this to get an output cube with all readouts. Use only if you want to examine the readouts.
# -linearize -> corrects for non-linearity. Do not use this keyword to speed things up. We don't have the liearity coefficients in hand anyway
# -noerror -> do not compute the error on slope. This seeds-up the code as we need to read the images only once.
# -noref -> Skip all reference pixel corrections entirely
# -selfbias -> subtract the 1st readout from all subsequent readouts to allow ref pixel correction per frame
# -*- coding: utf-8 -*-
from scipy import stats
import numpy as np
from array import *
import glob
import os
# import pyfits --> rendered obsolete by the use of the more recent astropy.io.fits
import time
import sys
import scipy.ndimage.filters
from astropy.io import fits as pyfits
from scipy.stats.stats import pearsonr
def nanmedian(data):
# this function returns the median of finite values within
# a vector. This is for python2 only and we will replace by
# the python3 version np.nanmedian that does exactly the same
# thing. When swithing to python3, we will simply add :
#
# import np.nanmedian as nanmedian
#
# and it should be completely transparent for the rest of the code.
#
data2=np.asarray(data)
g=np.isfinite(data2)
if np.max(g)==False:
return(np.nan)
return(np.median(data2[g]))
def refpixcorr(im,oddeven=False):
# function that corrects with reference pixels on the sides of the H2RG and H4RG.
#
# On the periphery of the arrays, there are 4 pixels that are not light-sensitive
# and that track drifts in the amplifiers. These are reference pixels and they can
# reduce the effective readout noise by a factor of at least 2 if properly used.
#
# The top and bottom pixels of each output (one of 32 vertical "ribbons") see the
# start and end of each readout. To filter noise on a readout timescale, we measure
# the median of the top and bottom reference pixels. We then define a "slope"
# (matrix y_frac) that forces interpolates the gradient through the light-sensitive
# pixels.
#
# For some arrays (e.g., the H2RG used for the AT4), the odd and even pixels within
# each amplifier differ in behaviour. We therefore measure and correct this "slope"
# independently for odd and even pixels. This is done by setting oddeven=True in the
# function call. The default is oddeven=False
#
# The side (x=0-3 and x=N-3:N) of the HxRG arrays see the "faster" 1/f noise that
# affects all amplifier. We therefore need to subtract the mean of the side reference
# pixels to remove (most of) the 1/f noise. As the reference pixels are themselves
# noisy, we apply a median filter to these pixels before subtracting.
# The size of this running median filter is set with the "medfilterwidth"
# variable.
#
imdim=(np.shape(im))[0]
# x position of the side deference pixels
ref_sides = [0, 1, 2, 3,imdim - 4, imdim - 3, imdim - 2, imdim - 1]
# filtering with ref pixels on either side of image
medfilterwidth = 15 # value used for JWST H2RGs. Could be modified
ref=np.zeros(imdim) # contains the median-filter, mean value of the vertical ref pixels
for xpix in ref_sides:
ref+=scipy.ndimage.filters.median_filter(im[:,xpix], medfilterwidth)/np.size(ref_sides)
# pad the ref pixel value into a imdim x imdim square and subtract from image
im-=np.repeat(ref,imdim).reshape(imdim,imdim) # correct an error, used to be "tile" instead of "repeat", which pads in the wrong direction
# we filter independently the odd and even pixels in the bottom and top reference regions
odd_bottom=np.zeros([imdim,imdim//32],dtype=float) # contains a range from 0 to 1 on odd pixels, 1 at bottom, 0 at top
even_bottom=np.zeros([imdim,imdim//32],dtype=float)
odd_top=np.zeros([imdim,imdim//32],dtype=float)
even_top=np.zeros([imdim,imdim//32],dtype=float)
g_odd_bottom=np.zeros([imdim,imdim//32],dtype=bool) # contains a range from 0 to 1 on odd pixels, 1 at bottom, 0 at top
g_even_bottom=np.zeros([imdim,imdim//32],dtype=bool)
g_odd_top=np.zeros([imdim,imdim//32],dtype=bool)
g_even_top=np.zeros([imdim,imdim//32],dtype=bool)
frac=np.asarray(range(imdim))/(imdim-1.0)
for j in range(imdim//64):
odd_bottom[:,j*2+1]=1-frac
even_bottom[:,j*2]=1-frac
odd_top[:,j*2+1]=frac
even_top[:,j*2]=frac
g_odd_bottom[0:4,j*2+1]=True # contains a range from 0 to 1 on odd pixels, 1 at bottom, 0 at top
g_even_bottom[0:4,j*2]=True
g_odd_top[imdim-4:imdim,j*2+1]=True
g_even_top[imdim-4:imdim,j*2]=True
for j in range(32): # looping through the 32 outputs
# subtract median value of ref unilluminated pixels
ribbon = im[:,j*imdim//32:(j+1)*imdim//32]
y_even_bottom = nanmedian( ribbon[g_even_bottom])
y_odd_bottom = nanmedian( ribbon[g_odd_bottom])
y_even_top = nanmedian( ribbon[g_even_top])
y_odd_top = nanmedian( ribbon[g_odd_top])
im[:,j*imdim//32:(j+1)*imdim//32]-=( y_even_bottom*even_bottom+y_odd_bottom*odd_bottom+y_odd_top*odd_top+y_even_top*even_top)
return(im)
def patch_shift(im,bias):
# this bit of code
index=np.asarray(range(4,60,2))
cut1 = 0.2 # max CC for shifts that are invalid
cut2 = 0.9 # min CC for shifts that is valid
ccs = np.zeros(3)
print(np.shape(im))
i=0
for off in range(-1,2):
ccs[i]= (pearsonr(im[0,index],bias[0,off+index]))[0]
i+=1
message = 'Ambiguous Pearson correlation with bias... suspicious data!'
if (ccs[2] >= cut2) and (ccs[1]<=cut1) and (ccs[0]<=cut1):
message='We have a pixel shift problem... we correct it!'
xpix2=np.asarray(range(2048))
xpix=np.asarray(range(2048))
x64=np.asarray(range(64))
for i in range(32):
xpix[i*64:i*64+64]=(i*32)+((x64+(2*(i % 2)-1) ) % 64)
im[:,xpix2]=im[:,xpix]
if (ccs[1] >= cut2) and (ccs[2]<=cut1) and (ccs[0]<=cut1):
message = 'all good, there is no mischievous pixel shift in your data!'
print(message)
return(im)
# will be set to True if selfbias=True. If we use a file for bias (later update?) then this will also
# change the dobias to True
dobias = False
arg=np.asarray(sys.argv)
arg=arg[1:] # first argument is simply the name of the program and needs to be removed
write_cube = sum(arg=='-cube') ==1. # if set, then we will write cube, if not, then we skip this step that may be long
skip_error = sum(arg=='-noerror') ==1. # if set, we skip slope error
skip_ref = sum(arg=='-noref') ==1. # if set, we skip reference pixel corrections
linearize = sum(arg=='-linearize') ==1. # if set, we correct for non-linearity
selfbias = sum(arg=='-selfbias') ==1. # if set, we correct ref pixels on a frame-to-frame basis
nmax_set=False
for argn in arg:
if (argn)[0:3] == '-n=':
nmax_set=True
dim3=np.int( (argn)[3:] )
# here we remove arguments with a "-"
keep=np.zeros(len(arg))
for i in range(len(arg)):
keep[i] = (arg[i])[0] != '-'
arg=arg[keep ==1] # keep only params not beginning with a "-"
if len(arg)>=1:
odometer = arg[0] # first argument after program and flags is the output name
fic = arg[1:]
if len(fic)>=1:
h = pyfits.getheader(fic[0])
h2=h
mef_flag=0 # file is a MEF flag
cubefits_flag=0 # file is a CUBE flag
if len(fic) ==1:
naxis =h['naxis']
if naxis ==0:
mef_flag=1# we have a flag to know that the input file is a MEF and that extensions need to be read from there
if naxis==3:
cubefits_flag=1#this is a cuube
exists = np.zeros(len(fic),dtype=bool)
for i in range(len(fic)):
exists[i] = os.path.isfile(fic[i])
if np.sum(exists ==0) !=0:
print('some files given as inputs do not exist')
print('missing file(s) --')
print('')
missing=fic[exists !=1]
for i in range(len(missing)):
print(missing[i])
print('')
print('... you way also have given some erroneous input, double check your inputs dude!')
sys.exit()
if len(sys.argv) <=2:
print('***** !!! warning, something went wrong !!! *****')
print('')
print(' ----- you can provide a list of files as an input -----')
print('')
print('syntax : python fits2ramp.py outname directory/file*.fits -cube -noerror -linearize')
print('')
print('')
print(' the argument after the "outname" must be the files to combine')
print(' with the ramp-fitting algorithm. ex: 20170322140210/H2RG_R01_M01_N08*.fits ')
print(' should also accept *.fits.gz files')
print(' you need at least two files in the wildcard. You can also expliclty')
print(' name the files you combine.')
print(' The syntax would be :')
print(' python fits2ramp.py outname file1.fits file2.fits ... fileN.fits')
print('')
print(' ----- you can also provide a single file that has a MEF format -----')
print('')
print('syntax : python fits2ramp.py outname mef_file*.fits -cube -noerror -linearize')
print('')
print(' if you provide an outname and a single fits file, then we know its a MEF')
print('')
print(' if you provide a -n=XXXX then only the first XXXX readouts within the MEF')
print('')
print(' will be used for slope fitting')
print(' ---- some more options ----' )
print('')
print(' -cube saves all slices in a cube. This is slower and takes disk space')
print(' -noerror does not compute the slope error. This is faster.' )
print(' -linearize corrects for non-linearity. This is slower but more accurate.')
print('')
print(' If all goes well, the programs outputs 2 files: ')
print(' outnameo.fits ')
print(' ... ext=1, ramp frame' )
print(' ... ext=2, ramp intercept')
print(' ... ext=3, ramp error' )
print(' ... ext=4, ramp # valid frames')
print(' ... every where, NaN values trace saturated pixel')
print(' outnamer.fits.gz')
print(' ... cube with as many slices as there are files in the wildcard above')
print(' ... outnamer.fits.gz contains the same info as the files' )
print(' ... this is only done if we pass the "-cube" argument')
print('')
sys.exit()
#################################################################
#################################################################
# We need the size of the image. Should be 2048 or 4096 (H2RG/H4RG)
imdim=(np.shape(pyfits.getdata(fic[0])))[1]
if (imdim!=2048) and (imdim!=4096):
print('')
print('')
print(' something is really wrong with the size of the input image')
print(' the image '+fic[0]+' has a width of :',imdim,' pixel(s)')
print(' and we should only have values of 2048 or 4096 pixels')
print('')
print('')
sys.exit()
# reading the relevant calibrations
#mask = getdata(calibdir+'/mask.fits') # 0/1 mask defining the area of the science array used as pose-meter
mask=np.zeros([imdim,imdim],dtype=float) # dummy ~~~>>> will need to be changed for the H4RG
# this is the region used for the posemeter
# For SPIRou, we will have a binary mask selecting the H-band orders (science and not ref channel)
mask[1912:1938,572:777]=1
mask=np.where(mask ==1)
# non-linearity cube with 4 slices. The linearized flux will be derived from the measured flux with the
# following relation :
# F_lin = a0 + a1*(F_mea - bias) + a2*(F_mea - bias)**2 + a3*(F_mea - bias)**3
# where aN is the Nth slice of the linearity cube
# ... bias is the super-bias
# ... F_lin is the linearised flux
# ... F_mea is the measured flux
#linearity = getdata(calibdir+'/non_lin.fits') # we will use files with non-linearity correction here
# This is an operation that may be done if we do not have a bias in hand and want to
# correct non-linearity. Lets consider this under development and set it to False for now
#
linearity_saturation = pyfits.getdata('nonlin.fits')
# Slice 1 - 2nd ordre term of non-linearity correction
# Slice 2 - 3rd ordre term of non-linearity correction
linearity = linearity_saturation[0:2,:,:]
# Slice 3 - dynamical range for <20% non-linearity
saturation = linearity_saturation[2,:,:]
if mef_flag==0 and cubefits_flag==0:
if nmax_set == False:
dim3 = len(fic)
else:
if len(fic) < dim3:
print('You requested a ramp of ',dim3,' readouts... ')
print(' ... but you have only ',len(fic),' files')
sys.exit()
if mef_flag==1:
hdulist = pyfits.open(fic[0],memmap=False) ## We will use memmap when CFHT gets rid of BZERO/BSCALE/BLANK header keywords
dims=np.shape(hdulist[1])
if nmax_set == False:
dim3= len(hdulist)-1
else:
if (len(hdulist)-1) < dim3:
print('You requested a ramp of ',dim3,' readouts... ')
print(' ... but you have only ',len(hdulist)-1,' slices in your MEF')
sys.exit()
if cubefits_flag==1:
if nmax_set == False:
dim3 = h['naxis3']
else:
if (h['naxis3']) < dim3:
print('You requested a ramp of ',dim3,' readouts... ')
print(' ... but you have only ',len(hdulist)-1,' slices in your cube')
sys.exit()
# delete all keywords from the reference file
del_keywords=['DATLEVEL', 'ASICGAIN', 'NOMGAIN', 'AMPRESET', 'KTCREMOV', 'SRCCUR',\
'AMPINPUT', 'V4V3V2V1', 'PDDECTOR', 'CLKOFF', 'NADCS', 'INTTIME',\
'TSTATION', 'SEQNUM_N', 'SEQNUM_M', 'CLOCKING', 'NEXTRAP','NEXTRAL', 'SEQNNAME']
for key in del_keywords:
if key in h: # as keywords may change from version to version, we check if the keyword we want to delete is present
del h[key]
del h['bias*']
timestamp=np.zeros(dim3,dtype=float)
# loop to check image size and populate header with time stamps
for i in range(dim3):
if mef_flag==0 and cubefits_flag==0: # we have a mef file, info is in the ith extension
h_tmp = pyfits.getheader(fic[i])
if 'frmtime' not in h_tmp:
h_tmp['frmtime'] = 5.24288, 'assumed integration time (s)'
if 'inttime' not in h_tmp:
h_tmp['inttime'] = 5.24288*(i+1), 'assumed frame time (s)'
timestamp[i]=h_tmp['inttime']
if cubefits_flag==1: # we have a cube, calculate from FRMTIME
timestamp[i]= (i+1)*h['frmtime'] # sets zero time at the time of reset
if mef_flag==1: # we read the ith extension
h_tmp = hdulist[i+1].header
timestamp[i]=h_tmp['inttime']
if mef_flag==0 and cubefits_flag==0:
order = np.argsort(timestamp) # who knows, the files may not be in the right order! Lets sort them according to their timestamps
fic=fic[order]
timestamp=timestamp[order]
for i in range(dim3):
tag0 = str(i+1)
if len(tag0) < 4:
tag = '0'*(4-len(tag0))+tag0
tag = 'INTT'+tag
h[tag] = (timestamp[i],'Timestamp, '+tag0+'/'+str(dim3))
if mef_flag==1:
write_cube=False
if write_cube:
cube=np.zeros([dim3,dim2,dim1],dtype=float)
print('loading all files in cube')
for i in range(dim3):
print(i+1,'/',len(fic),fic[i])
im=pyfits.getdata(fic[i])
cube[i,:,:] = im
print('writing the cube file --> '+odometer+'r.fits ')
t1 = time.time()
hcube=h2
hcube['NAXIS'] = 3
hcube['NAXIS3'] = dim3
pyfits.writeto(odometer+'r.fits', cube,header=hcube)
# This operation is somewhat long and could lead to back-log of files on a slow machine
# ... for the code development, we time it. This may be remove at a later point.
print('Duration of file writting : '+str(float(time.time()-t1))+' s')
# zipping the .fits file. Normally this could be done within pyfits.writeto, but its much, much slower
os.system('gzip -f '+odometer+'r.fits &')
print('done writing the cube file --> '+odometer+'r.fits')
print(' compressing file in background ... ')
del cube # removing cube from memory to make things lighter... unclear in necessary
else:
print('we do not write the cube file for this ramp')
# place htimestampolders for some arithmetics for the linear fit
#sx = 0#np.zeros([dim2,dim1])
#sx2 = 0#np.zeros([dim2,dim1])
sy = np.zeros([imdim,imdim],dtype=float)
n = np.zeros([imdim,imdim],dtype=np.int16)
sxy = np.zeros([imdim,imdim],dtype=float)
fmask = np.zeros(dim3,dtype=float)
# mask for pixels that are valid
goodmask = np.full((imdim,imdim),True,dtype=bool)
# when a pixels goes above saturation, it remains invalid for the rest of the ramp
if skip_error == False:
savname=['']*dim3
print(mef_flag,cubefits_flag,linearize)
t_start=time.time()
for i in range(dim3):
t0=time.time()
print(i+1,'/',dim3,' ~~~> Computing slope')
if mef_flag==0 and cubefits_flag==0: # this is a set with N files
im = pyfits.getdata(fic[i])
if mef_flag==1:
im=hdulist[i+1].data # reading the Nth extension
if cubefits_flag==1:
if i ==0:
bigcube=pyfits.getdata(fic[0]) # that's dangerous as it may overfill memory
im=bigcube[i,:,:]
im = np.array(im,dtype='float')
if selfbias and (i ==0):
bias = np.array(im)
print('setting 1st extension as a bias file')
dobias=True
goodmask = (im <= saturation)*goodmask
if dobias:
if selfbias:
print('bias subtraction with 1st readout')
else:
print('bias subtraction with provided bias file')
im-=bias
if linearize:
print('applying non-lin correction')
# first we linearize the data by applying the non-linearity coefficients and bias correction
for j in range(2):
im += linearity[j,:,:]*(im)**(j+2)
if selfbias and (skip_ref == False):
print('as we applied self-bias, we correct ref pixels')
im=refpixcorr(im)
n+= goodmask
fmask[i]=np.nanmean( im[mask])
# m*=goodmask # starting now, only the product of the two is needed. saves one multipltication
# Actually, best not fill what used to be saturated elements in the array with
# 0, which is what this did. Then, if the errslope calculation wants to check
# im <= saturation as it used to do, it will come up with the wrong answer.
# Since the first check for im <= saturation (about 20 lines above) does so
# before linearity correction and this check would be after, they could also
# come up with different answers though, unless the linearity function is
# is guaranteed to apply a correction that keeps saturation values at the same
# ADU. Since we already have n[], when the errslope calculation happens, it
# uses that, now with a simple "goodmask = (n > i)" for each i on that pass.
sy[goodmask]+= im[goodmask]#*goodmask
sxy[goodmask]+=(im[goodmask]*timestamp[i])
# here we save the non-linearity corrected images as python npz files
# we could just dump everything into a big cube to be used in the slope
# error determination. We opt to write these files to disk to avoid overfilling
# the memory. This should be safer for very large number of reads.
#
# We cannot simply re-read the fits files are the "im" variable saved in the npz has been corrected for
# non-linearity, which is NOT the case for the .fits.gz. We save the NPZ only if the data is linearized
#
# We also corrected for the bias regions of the detector, so a temporary file is necessary if we want to properly compute slope error
# and cannot afford to keep everything in memory. Keeping everything in memory may be fine for small datasets, but we want
# to avoid having a code that crashes for long sequences or on machines with less memory!
if skip_error == False:
savname[i]='.tmp'+str(i)+'.npz'
np.savez(savname[i],im=im) # this file is temporary and will be deleted after computing the slope error
dt=(time.time()-t_start)/(i+1.0)
print('dt[last image] ','{:5.2f}'.format(time.time()-t0),'s; dt[mean/image] ','{:5.2f}'.format(dt),'s; estimated time left '+'{:3.0f}'.format(np.floor((dim3-i)*dt/60))+'m'+'{:2.0f}'.format(np.floor((dim3-i)*dt % 60))+'s')
# we now have these variables outside the loop. We keep n that contains the
# number of valid reads, and directely interpolate the vector with the cumulative
# sum of timestamp and timestamp**2. Previously, we added these values to the sx and sx2
# matrices for each frame. This operation is much, much faster and equivalent.
sx=np.where(n>0,(np.cumsum(timestamp))[n-1],0)
sx2=np.where(n>0,(np.cumsum(timestamp**2))[n-1],0)
if mef_flag==1:
hdulist.close()
fmask-=fmask[0]
for i in range(dim3):
tag0 = str(i+1)
if len(tag0) < 4:
tag = '0'*(4-len(tag))+tag0
tag = 'POSE'+tag
h[tag] = (fmask[i],'Posemeter, '+tag0+'/'+str(len(fic)))
a = np.zeros([imdim,imdim],dtype=float)+np.nan # slope, NaN if not enough valid readouts
b = np.zeros([imdim,imdim],dtype=float)+np.nan # intercept
valid=n>1 # only valid where there's more than one good readout(s)
b[valid] = (sx*sxy-sx2*sy)[valid]/(sx**2-n*sx2)[valid] # algebra of the linear fit
a[valid] = (sy-n*b)[valid]/sx[valid]
# For the sake of consistency, we fix the slope, error and intercept to NaN for
# pixels that have 0 or 1 valid (i.e., not saturated) values and for which
# one cannot determine a valid slope
errslope = np.zeros([imdim,imdim],dtype=float)+np.nan
goodmask = np.full((imdim,imdim),True,dtype=bool)
if skip_error == False:
varx2 = np.zeros([imdim,imdim],dtype=float)
vary2 = np.zeros([imdim,imdim],dtype=float)
xp = np.zeros([imdim,imdim],dtype=float)
valid = (n>2)
xp[valid]=sx[valid]/n[valid] # used in the determination of error below
print('we now compute the standard error on the slope')
for i in range(dim3):
# we read the npz as this file has been linearized (if the -linearize keyword has been set)
# and we subtracted the reference regions on the array
data=np.load(savname[i])
os.system('rm '+savname[i])
im=data['im']
goodmask = (n > i)
yp = b+a*timestamp[i]
print(i+1,'/',dim3,' ~~~> Computing slope error')
varx2+= ((timestamp[i]-xp)**2)*goodmask # we multiply by goodmask so that only
vary2+= ((im-yp)**2)*goodmask
valid*=(varx2!=0) # avoid diving by zero
errslope[valid] = np.sqrt(vary2[valid]/(n[valid]-2))/np.sqrt(varx2[valid])
# deleting the temporary npz
else:
print(' We do not calculate the error on slope.')
print(' This is faster and intended for debugging but ')
print(' ultimately we will want to compute slope error ')
print(' for all files')
h['satur1']=(nanmedian(saturation),'median saturation limit in ADU')
h['satur2']=(nanmedian(saturation)/max(timestamp),'median saturation limit in ADU/s')
dfmask = fmask[1:]-fmask[0:-1] # flux received between readouts
dtimestamp = timestamp[1:]+0.5*(timestamp[-1]-timestamp[0])/(len(timestamp)-1) # mid-time of Nth readout
### we estimate the RON by checking the slope error in pixels receiving little flux
### as the orders cover ~50% of the science array, we take the median slope error of
### pixels that are below the median slope. We assume that these pixels have an RMS that is
### dominated by readout noise (TO BE CONFIRMED).
### we also clip pixels that are above 3x the median RMS
pseudodark = 0.0 # (a < np.median(a))*(errslope < 3*np.median(errslope))
ron_estimate = 0.0 #np.median(errslope[pseudodark])*(max(timestamp)-min(timestamp)) # converted into ADU instead of ADU/s
#### Standard FITS Keywords BITPIX = 16 / 16bit
h['BSCALE']=(1.0 , 'Scale factor')
#### FITS keyword related to the detector
h['RON_EST']=(ron_estimate , '[ADU] read noise estimate')
h['NSUBEXPS']=(len(fic) , 'Total number of sub-exposures of 5.5s ')
#h['TMID']= (np.sum(dtimestamp*dfmask)/np.sum(dfmask) , '[s] Flux-weighted mid-exposure time ' )
#h['CMEAN']= ( np.mean(dfmask)/(timestamp[1]-timestamp[0]), '[ADU/s] Average count posemeter' )
if skip_ref == False:
a=refpixcorr(a,oddeven=True)
a=np.float32(a)
if dobias:
# we subtracted the bias from all frames, we need to add it to the intercept
b+=bias
b=np.float32(b)
errslope=np.float32(errslope)
hdu1 = pyfits.PrimaryHDU()
hdu1.header = h
hdu1.header['NEXTEND'] = 4
hdu2 = pyfits.ImageHDU(a)
hdu2.header['UNITS'] = ('ADU/S','Slope of fit, flux vs time')
hdu2.header['EXTNAME'] = ('slope','Slope of fit, flux vs time')
hdu3 = pyfits.ImageHDU(b)
hdu3.header['UNITS'] = ('ADU','Intercept of the pixel/time fit.')
hdu3.header['EXTNAME'] = ('intercept','Intercept of the pixel/time fit.')
hdu4 = pyfits.ImageHDU(errslope)
hdu4.header['UNITS'] = ('ADU/S','Formal error on slope fit')
hdu4.header['EXTNAME'] = ('errslope','Formal error on slope fit')
hdu5 = pyfits.ImageHDU(n)
hdu5.header['UNITS'] = ('Nimages','N readouts below saturation')
hdu5.header['EXTNAME'] = ('count','N readouts below saturation')
new_hdul = pyfits.HDUList([hdu1, hdu2, hdu3, hdu4, hdu5])
# just to avoid an error message with writeto
if os.path.isfile(odometer+'.fits'):
print('file : '+odometer+'.fits exists, we are overwriting it')
os.system('rm '+odometer+'.fits')
new_hdul.writeto(odometer +'.fits', clobber=True)
print('Elapsed time for entire fits2ramp : '+str(float(time.time()-t0))+' s')
| 40.20411
| 225
| 0.665474
| 4,689
| 29,349
| 4.137983
| 0.182981
| 0.007731
| 0.011751
| 0.015771
| 0.149925
| 0.102561
| 0.072772
| 0.058857
| 0.027625
| 0.027625
| 0
| 0.024119
| 0.213125
| 29,349
| 729
| 226
| 40.259259
| 0.816056
| 0.461958
| 0
| 0.185864
| 0
| 0
| 0.257759
| 0.002402
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007853
| false
| 0.002618
| 0.026178
| 0
| 0.034031
| 0.225131
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c912b5b1a08a02d640553311c19b5c840ef97729
| 4,651
|
py
|
Python
|
web_app/api_service.py
|
shayan-taheri/sql_python_deep_learning
|
ceb2c41bcb1fed193080f64ba4da018d76166222
|
[
"MIT"
] | 23
|
2017-11-29T17:33:30.000Z
|
2021-10-15T14:51:12.000Z
|
web_app/api_service.py
|
shayan-taheri/sql_python_deep_learning
|
ceb2c41bcb1fed193080f64ba4da018d76166222
|
[
"MIT"
] | 1
|
2017-10-12T11:23:08.000Z
|
2017-10-12T11:23:08.000Z
|
web_app/api_service.py
|
isabella232/sql_python_deep_learning
|
ceb2c41bcb1fed193080f64ba4da018d76166222
|
[
"MIT"
] | 16
|
2017-12-21T08:55:09.000Z
|
2021-03-21T20:17:40.000Z
|
from api import app, BAD_PARAM, STATUS_OK, BAD_REQUEST
from flask import request, jsonify, abort, make_response,render_template, json
import sys
from lung_cancer.connection_settings import get_connection_string, TABLE_SCAN_IMAGES, TABLE_GIF, TABLE_MODEL, TABLE_FEATURES, LIGHTGBM_MODEL_NAME, DATABASE_NAME,NUMBER_PATIENTS
from lung_cancer.lung_cancer_utils import get_patients_id, get_patient_id_from_index, select_entry_where_column_equals_value, get_features, get_lightgbm_model, prediction
import pyodbc
import cherrypy
from paste.translogger import TransLogger
def run_server():
# Enable WSGI access logging via Paste
app_logged = TransLogger(app)
# Mount the WSGI callable object (app) on the root directory
cherrypy.tree.graft(app_logged, '/')
# Set the configuration of the web server
cherrypy.config.update({
'engine.autoreload_on': True,
'log.screen': True,
'log.error_file': "cherrypy.log",
'server.socket_port': 5000,
'server.socket_host': '0.0.0.0',
'server.thread_pool': 50, # 10 is default
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
# Connection
connection_string = get_connection_string()
conn = pyodbc.connect(connection_string)
cur = conn.cursor()
# Model
model = get_lightgbm_model(TABLE_MODEL, cur, LIGHTGBM_MODEL_NAME)
# Functions
@app.route("/")
def index():
cherrypy.log("CHERRYPY LOG: /")
return render_template('index.html')
@app.route('/gif/<patient_index>')
def patient_gif(patient_index):
patient_index = int(patient_index)
if patient_index > NUMBER_PATIENTS:
abort(BAD_REQUEST)
cherrypy.log("CHERRYPY LOG: /gif/<patient_index>")
gif_url = manage_gif(patient_index)
return make_response(jsonify({'status': STATUS_OK, 'gif_url': gif_url}), STATUS_OK)
@app.route('/predict/<patient_index>')
def predict_patient(patient_index):
patient_index = int(patient_index)
if patient_index > NUMBER_PATIENTS:
abort(BAD_REQUEST)
cherrypy.log("CHERRYPY LOG: /predict/<patient_index>")
prob = manage_prediction(patient_index)
return make_response(jsonify({'status': STATUS_OK, 'prob': prob}), STATUS_OK)
@app.route('/patient_info', methods=['POST'])
def patient_info():
cherrypy.log("CHERRYPY LOG: /patient_info")
patient_index = manage_request_patient_index(request.form['patient_index'])
gif_url = manage_gif(patient_index)
return render_template('patient.html', patient_index=patient_index, gif_url=gif_url)
@app.route('/patient_prob', methods=['POST'])
def patient_prob():
cherrypy.log("CHERRYPY LOG: /patient_prob")
patient_index = manage_request_patient_index(request.form['patient_index'])
prob = manage_prediction_store_procedure(patient_index)
gif_url = manage_gif(patient_index)
return render_template('patient.html', patient_index=patient_index, prob=round(prob,2), gif_url=gif_url)
def is_integer(s):
try:
int(s)
return True
except ValueError:
return False
def manage_request_patient_index(patient_request):
patient1 = "Anthony Embleton".lower()
patient2 = "Ana Fernandez".lower()
if patient_request.lower() in patient1:
patient_index = 1
elif patient_request.lower() in patient2:
patient_index = 175
else:
if is_integer(patient_request):
patient_index = int(patient_request)
if patient_index > NUMBER_PATIENTS:
patient_index = NUMBER_PATIENTS - 1
else:
patient_index = 7
return patient_index
def manage_gif(patient_index):
patient_id = get_patient_id_from_index(TABLE_SCAN_IMAGES, cur, patient_index)
print(patient_id)
resp = select_entry_where_column_equals_value(TABLE_GIF, cur, 'patient_id', patient_id)
gif_url = resp[1]
print("gif_url: ",gif_url)
return gif_url
def manage_prediction(patient_index):
patient_id = get_patient_id_from_index(TABLE_SCAN_IMAGES, cur, patient_index)
feats = get_features(TABLE_FEATURES, cur, patient_id)
probability_cancer = prediction(model, feats)
prob = float(probability_cancer)*100
return prob
def manage_prediction_store_procedure(patient_index):
query = "DECLARE @PredictionResultSP FLOAT;"
query += "EXECUTE " + DATABASE_NAME + ".dbo.PredictLungCancer @PatientIndex = ?, @ModelName = " + \
LIGHTGBM_MODEL_NAME + ", @PredictionResult = @PredictionResultSP;"
cur.execute(query, patient_index)
prob = cur.fetchone()[0]
return prob
if __name__ == "__main__":
run_server()
conn.close()
| 33.221429
| 176
| 0.723285
| 598
| 4,651
| 5.301003
| 0.255853
| 0.155205
| 0.033123
| 0.0347
| 0.334385
| 0.295899
| 0.24511
| 0.24511
| 0.24511
| 0.204416
| 0
| 0.007299
| 0.175231
| 4,651
| 140
| 177
| 33.221429
| 0.819082
| 0.045367
| 0
| 0.176471
| 0
| 0
| 0.138989
| 0.015794
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107843
| false
| 0
| 0.078431
| 0
| 0.294118
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9144a2b1a0cbf40a3d765da71a5f9435588a292
| 335
|
py
|
Python
|
10-blood/scripts/bloodMeasure.py
|
antl-mipt-ru/get
|
c914bd16131639e1af4452ae7351f2554ef83ce9
|
[
"MIT"
] | null | null | null |
10-blood/scripts/bloodMeasure.py
|
antl-mipt-ru/get
|
c914bd16131639e1af4452ae7351f2554ef83ce9
|
[
"MIT"
] | null | null | null |
10-blood/scripts/bloodMeasure.py
|
antl-mipt-ru/get
|
c914bd16131639e1af4452ae7351f2554ef83ce9
|
[
"MIT"
] | 1
|
2021-10-11T16:24:32.000Z
|
2021-10-11T16:24:32.000Z
|
import bloodFunctions as blood
import time
try:
samples = []
blood.initSpiAdc()
start = time.time()
while (time.time() - start) < 60:
samples.append(blood.getAdc())
finish = time.time()
blood.deinitSpiAdc()
blood.save(samples, start, finish)
finally:
print("Blood measure script finished")
| 17.631579
| 42
| 0.641791
| 38
| 335
| 5.657895
| 0.552632
| 0.111628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007782
| 0.232836
| 335
| 19
| 42
| 17.631579
| 0.828794
| 0
| 0
| 0
| 0
| 0
| 0.08631
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|