hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
66d8ba6f365049a80533d4986a5c2cf0bb77bfb0
| 2,561
|
py
|
Python
|
config/jupyter/jupyterhub_config.py
|
mhwasil/jupyterhub-on-gcloud
|
9cfe935772d7599fa36c5b998cebb87c17e24277
|
[
"MIT"
] | 3
|
2018-10-06T20:35:08.000Z
|
2019-03-02T08:04:52.000Z
|
config/jupyter/jupyterhub_config.py
|
mhwasil/jupyterhub-on-gcloud
|
9cfe935772d7599fa36c5b998cebb87c17e24277
|
[
"MIT"
] | 4
|
2019-05-15T11:36:43.000Z
|
2019-07-23T09:34:45.000Z
|
config/jupyter/jupyterhub_config.py
|
mhwasil/jupyterhub-on-gcloud
|
9cfe935772d7599fa36c5b998cebb87c17e24277
|
[
"MIT"
] | 2
|
2020-01-09T21:03:44.000Z
|
2020-11-22T16:47:00.000Z
|
c = get_config()
c.JupyterHub.ip = u'127.0.0.1'
c.JupyterHub.port = 8000
c.JupyterHub.cookie_secret_file = u'/srv/jupyterhub/jupyterhub_cookie_secret'
c.JupyterHub.db_url = u'/srv/jupyterhub/jupyterhub.sqlite'
#c.JupyterHub.proxy_auth_token = u'/srv/jupyterhub/proxy_auth_token'
c.ConfigurableHTTPProxy.auth_token = u'/srv/jupyterhub/proxy_auth_token'
c.JupyterHub.spawner_class = 'systemdspawner.SystemdSpawner'
c.SystemdSpawner.user_workingdir = '/home/{USERNAME}'
#c.JupyterHub.config_file = '/home/admin/jupyterhub_config.py'
# Limit memory and cpu usage for each user
c.SystemdSpawner.mem_limit = '0.5G'
c.SystemdSpawner.cpu_limit = 0.5
# create private /tmp to isolate each user info
c.SystemdSpawner.isolate_tmp = True
# Disable or enable user sudo
c.SystemdSpawner.disable_user_sudo = False
# Readonly
c.SystemdSpawner.readonly_paths = None
# Readwrite path
#c.SystemdSpawner.readwrite_paths = None
# use jupyterlab
c.Spawner.cmd = ['jupyter-labhub']
c.Spawner.default_url = '/tree'
# ser default_shell
c.SystemdSpawner.default_shell = '/bin/bash'
c.Authenticator.admin_users = {'admin', 'mrc-grader'}
c.Authenticator.whitelist = {'admin', 'mhm_wasil', 'instructor1',
'instructor2', 'student1', 'student2', 'student3',
'mrc-grader', 'wtus-grader'}
c.LocalAuthenticator.group_whitelist = {'mrc-group'}
#c.LocalAuthenticator.group_whitelist = {'mrc-group', 'wtus-group'}
# sionbg and willingc have access to a shared server:
c.JupyterHub.load_groups = {
'mrc-group': [
'instructor1',
'instructor2'
]
#,
#'wtus-student-group': [
# 'instructor2'
#]
}
service_names = ['shared-mrc-notebook', 'shared-wtus-notebook']
service_ports = [9998, 9999]
group_names = ['mrc-group']
#group_names = ['mrc-student-group', 'wtus-student-group']
# start the notebook server as a service
c.JupyterHub.services = [
{
'name': service_names[0],
'url': 'http://127.0.0.1:{}'.format(service_ports[0]),
'command': [
'jupyterhub-singleuser',
'--group={}'.format(group_names[0]),
'--debug',
],
'user': 'mrc-grader',
'cwd': '/home/mrc-grader'
}
#,
#{
# 'name': service_names[1],
# 'url': 'http://127.0.0.1:{}'.format(service_ports[1]),
# 'command': [
# 'jupyterhub-singleuser',
# '--group={}'.format(group_names[1]),
# '--debug',
# ],
# 'user': 'wtus-grader',
# 'cwd': '/home/wtus-grader'
#}
]
| 31.617284
| 78
| 0.643108
| 306
| 2,561
| 5.245098
| 0.362745
| 0.061682
| 0.034891
| 0.011215
| 0.196885
| 0.196885
| 0.145794
| 0.085981
| 0.085981
| 0
| 0
| 0.023245
| 0.193674
| 2,561
| 80
| 79
| 32.0125
| 0.753995
| 0.335025
| 0
| 0
| 0
| 0
| 0.286312
| 0.092648
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66d95353965e38496015e85b754a89803b392d87
| 11,908
|
py
|
Python
|
legacy/Environment.py
|
LaoKpa/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 7
|
2020-09-28T23:36:40.000Z
|
2022-02-22T02:00:32.000Z
|
legacy/Environment.py
|
LaoKpa/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 4
|
2020-11-13T18:48:52.000Z
|
2022-02-10T01:29:47.000Z
|
legacy/Environment.py
|
lzcaisg/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 3
|
2020-11-23T17:31:59.000Z
|
2021-04-08T10:55:03.000Z
|
import datetime
import warnings
import pandas as pd
import numpy as np
from MongoDBUtils import *
from scipy.optimize import fsolve
import pymongo
TRADING_FEE = 0.008
EARLIEST_DATE = datetime.datetime(2014, 10, 17)
LATEST_DATE = datetime.datetime(2019, 10, 17)
# In any cases, we shouldn't know today's and future value;
# ONLY PROVIDE CALCULATED RESULT
# Handled by Both Environment and Actors
class Environment():
def __init__(self):
self.client = pymongo.MongoClient("mongodb+srv://lzcai:raspberry@freecluster-q4nkd.gcp.mongodb.net/test?retryWrites=true&w=majority")
self.db = self.client["testing"]
def getOneRecord(self, todayDate, date, col_name="S&P 500"):
'''
:param todayDate:
:param date:
:param col_name:
:return: e.g.
{
'_id': ObjectId('5de7325e05597fc4f7b09fad'),
'Date': datetime.datetime(2019, 9, 10, 0, 0),
'Price': 2979.39, 'Open': 2971.01,
'High': 2979.39,
'Low': 2957.01,
'Vol': 0,
'Change': 0.0003
}
'''
if date >= todayDate:
return
collection = self.db[col_name]
query = {"Date": date}
result = collection.find_one(query)
return result
def getAllRecord(self, todayDate, col_name="S&P 500"):
pass
def getRecordFromDateList(self, todayDate, dateList, col_name="S&P 500"):
collection = self.db[col_name]
resultList = []
for date in dateList:
if date >= todayDate:
continue
query = {"Date": date}
result = collection.find_one(query)
if result:
resultList.append(result)
return resultList
def getRecordFromStartLength(self, todayDate, startDate, length, col_name="S&P 500"): # Return Sorted List of Dict
collection = self.db[col_name]
resultList = []
for i in range(length):
newDate = startDate + datetime.timedelta(days=i)
if newDate >= todayDate:
break
query = {"Date": newDate}
result = collection.find_one(query)
if result:
resultList.append(result)
return resultList
def getRecordFromStartLengthByETFList(self, todayDate, startDate, length, etfList):
'''
:param startDate:
:param length:
:param etfList: ["S&P 500", "DAX"]
:return: A Dict
{
"S&P 500": [{one record}, {another record}],
"DAX":[{...}, {...}],
...}
'''
if not isinstance(etfList, list):
warnings.warn("Environment/getRecordFromStartLengthByETFList() Warning: etfList is not List")
return None
resultDict = {}
for etf in etfList:
if etf == "CASH":
continue
else:
etfRecordList = []
collection = self.db[etf]
for i in range(length):
newDate = startDate + datetime.timedelta(days=i)
if newDate >= todayDate:
break
query = {"Date": newDate}
result = collection.find_one(query)
if result:
etfRecordList.append(result)
resultDict[etf] = etfRecordList
return resultDict
def getRecordFromEndLengthByETFList(self, todayDate, endDate, length, etfList):
'''
:param startDate:
:param length:
:param etfList: ["S&P 500", "DAX"]
:return: A Dict
{
"S&P 500": [{one record}, {another record}],
"DAX":[{...}, {...}],
...}
'''
if not isinstance(etfList, list):
warnings.warn("Environment/getRecordFromStartLengthByETFList() Warning: etfList is not List")
return None
resultDict = {}
for etf in etfList:
if etf == "CASH":
continue
else:
etfRecordList = []
collection = self.db[etf]
for i in range(length):
newDate = endDate - datetime.timedelta(days=i)
if newDate >= todayDate:
continue
query = {"Date": newDate}
result = collection.find_one(query)
if result:
etfRecordList.append(result)
resultDict[etf] = etfRecordList
return resultDict
def getPriceByETFList(self, todayDate, date, etfList): # Get PRICE only! Not the full record
'''
:param date:
:param etfList:
:return: A df like this:
Value
Name
Hang Seng 30
S&P 500 40
STI NaN
Shanghai 50
'''
if not isinstance(etfList, list):
warnings.warn("Environment/getRecordFromETFList() Warning: etfList is not List")
return None
resultDF = pd.DataFrame(etfList, columns=["Name"]).set_index('Name', drop=True)
resultDF['Value'] = np.nan
for etf in etfList:
if etf == "CASH":
resultDF['Value'][etf] = 1
else:
collection = self. db[etf]
if date >= todayDate:
continue
query = {"Date": date}
result = collection.find_one(query)
if result:
resultDF['Value'][etf] = result['Price']
return resultDF
def reallocateAndGetAbsoluteReward(self, oldPortfolio, newPortfolio):
'''
oldPortfolio: {
"portfolioDict": {"S&P 500": 0.3, "Hang Seng":0.5} -> 0.2 Cash
"date":
"value":
}
newPortfolio: {
"portfolioDict":
"date":
}
:returns: {
oldCurrentValue: xxx,
newCurrentValue: xxx,
deltaValue: xxx,
portfolio_df: portfolio_df
}
'''
# 1. Check whether the input is legit
if (
("portfolioDict" not in oldPortfolio) or
("date" not in oldPortfolio) or
("value" not in oldPortfolio)
):
warnings.warn("Environment/calculateAbsoluteReward() Warning: Input of oldPortfolio is NOT LEGIT")
return 0
if (
("portfolioDict" not in newPortfolio) or
("date" not in newPortfolio)
):
warnings.warn("Environment/calculateAbsoluteReward() Warning: Input of newPortfolio NOT LEGIT")
return 0
# 2. Check whether the portfolioDict is a dictionary
if not isinstance(oldPortfolio['portfolioDict'], dict):
warnings.warn(
"Environment/calculateAbsoluteReward() Warning: oldPortfolio['portfolioDict'] is not a dictionary")
return 0
if not isinstance(newPortfolio['portfolioDict'], dict):
warnings.warn(
"Environment/calculateAbsoluteReward() Warning: newPortfolio['portfolioDict'] is not a dictionary")
return 0
'''
portfolio_df:[
oldRatio, newRatio, oldPastValue, oldStockHeld, oldCurrentValue, oldCurrentRatio,
deltaRatio, deltaStockHeld, newCurrentValue
]
'''
# 3. Clean the ratio: >1: Normalize; <1: Cash Out
oldRatio_df = pd.DataFrame.from_dict(oldPortfolio['portfolioDict'], orient='index', columns=['ratio'])
newRatio_df = pd.DataFrame.from_dict(newPortfolio['portfolioDict'], orient='index', columns=['ratio'])
oldRatio_df = oldRatio_df.append(pd.DataFrame(index=['CASH'], data={'ratio': np.nan}))
newRatio_df = newRatio_df.append(pd.DataFrame(index=['CASH'], data={'ratio': np.nan}))
if oldRatio_df['ratio'].sum() > 1:
warnings.warn(
"Environment/calculateAbsoluteReward() Warning: oldRatio_df['ratio'].sum() > 1, Auto-Normalized")
oldRatio_df = oldRatio_df / oldRatio_df['ratio'].sum()
elif oldRatio_df['ratio'].sum() < 1:
oldRatio_df['ratio']['CASH'] = 1 - oldRatio_df['ratio'].sum()
if newRatio_df['ratio'].sum() > 1:
warnings.warn(
"Environment/calculateAbsoluteReward() Warning: newRatio_df['ratio'].values().sum() > 1, Auto-Normalized")
newRatio_df = newRatio_df / newRatio_df['ratio'].sum()
elif newRatio_df['ratio'].sum() < 1:
newRatio_df['ratio']['CASH'] = 1 - newRatio_df['ratio'].sum()
portfolio_df = pd.merge(oldRatio_df, newRatio_df, left_index=True, right_index=True, how='outer')
portfolio_df.columns = ['oldRatio', 'newRatio']
portfolio_df = portfolio_df.fillna(0)
# 4. Calculate the current value of the stocks: [oldPastValue, oldStockHeld, oldCurrentValue, oldCurrentRatio]
portfolio_df['oldPastValue'] = portfolio_df.apply(lambda row: row.oldRatio * oldPortfolio['value'], axis=1)
etfList = list(portfolio_df.index)
portfolio_df['oldPrice'] = self.getPriceByETFList(oldPortfolio['date'], etfList)
portfolio_df['newPrice'] = self.getPriceByETFList(newPortfolio['date'], etfList)
portfolio_df['oldStockHeld'] = portfolio_df['oldPastValue'].div(portfolio_df['oldPrice'].values)
portfolio_df['oldCurrentValue'] = portfolio_df['oldStockHeld'].mul(portfolio_df['newPrice'].values)
portfolio_df['oldCurrentRatio'] = portfolio_df['oldCurrentValue'] / portfolio_df['oldCurrentValue'].sum()
# 5. Calculate the deltas [deltaRatio, deltaStockHeld, newStockHeld]
portfolio_df['deltaRatio'] = portfolio_df['newRatio'].sub(portfolio_df['oldCurrentRatio'], fill_value=0)
def equation(n):
left = np.multiply(portfolio_df['oldStockHeld'] + n, portfolio_df['newPrice'])
right = portfolio_df['newRatio'] * (
np.dot(portfolio_df['newPrice'], portfolio_df['oldStockHeld']) - TRADING_FEE * np.dot(
portfolio_df['newPrice'], np.absolute(n)))
return left - right
a0 = np.zeros(portfolio_df['oldStockHeld'].shape)
n = fsolve(equation, a0)
portfolio_df['deltaStockHeld'] = n
portfolio_df['newStockHeld'] = portfolio_df['oldStockHeld'] + portfolio_df['deltaStockHeld']
portfolio_df['newCurrentValue'] = portfolio_df['newStockHeld'].mul(portfolio_df['newPrice'])
# 6. Return stuffs
oldPastValueSum = portfolio_df['oldPastValue'].sum()
newCurrentValueSum = portfolio_df['newCurrentValue'].sum()
return {
"oldPastValue": oldPastValueSum,
"newCurrentValue": newCurrentValueSum,
"deltaValue": newCurrentValueSum - oldPastValueSum,
"portfolio_df": portfolio_df
}
def getFuturePercentile(self, todayDate, delta, col_name="S&P 500"): # Delta includes todayDate!
# 1. To get all future results ang calculate the percentile using getRecordFromStartLength
# Disable the today_check by passing real-world date
resultList = self.getRecordFromStartLength(datetime.datetime.now(), todayDate, delta, col_name=col_name)
# 2. Transform the resultList into dataframe
df = pd.DataFrame(resultList)
todayRank = df['Price'].rank(method = 'average')[0] # The smaller the value, the smaller the rank
todayPercentile = (todayRank-1) / (df.shape[0]-1) # -1 to make it [0, 1], otherwise rank start with 1
# The greater the percentile, the worse the performance in the future
return todayPercentile
| 36.527607
| 141
| 0.570037
| 1,161
| 11,908
| 5.761413
| 0.226529
| 0.069069
| 0.008222
| 0.020631
| 0.380625
| 0.311855
| 0.311855
| 0.237405
| 0.22395
| 0.204814
| 0
| 0.01911
| 0.318861
| 11,908
| 325
| 142
| 36.64
| 0.805573
| 0.169718
| 0
| 0.477778
| 0
| 0.005556
| 0.175005
| 0.061722
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061111
| false
| 0.005556
| 0.038889
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66d9e2205d4a01f644f0a6147e2760e0d6b2de38
| 579
|
py
|
Python
|
examples/Titanic/titanic.py
|
mlflow/mlflow-torchserve
|
91663b630ef12313da3ad821767faf3fc409345b
|
[
"Apache-2.0"
] | 40
|
2020-11-13T02:08:10.000Z
|
2022-03-27T07:41:57.000Z
|
examples/Titanic/titanic.py
|
Ideas2IT/mlflow-torchserve
|
d6300fb73f16d74ee2c7718c249faf485c4f3b62
|
[
"Apache-2.0"
] | 23
|
2020-11-16T11:28:01.000Z
|
2021-09-23T11:28:24.000Z
|
examples/Titanic/titanic.py
|
Ideas2IT/mlflow-torchserve
|
d6300fb73f16d74ee2c7718c249faf485c4f3b62
|
[
"Apache-2.0"
] | 15
|
2020-11-13T10:25:25.000Z
|
2022-02-01T10:13:20.000Z
|
import torch.nn as nn
class TitanicSimpleNNModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(12, 12)
self.sigmoid1 = nn.Sigmoid()
self.linear2 = nn.Linear(12, 8)
self.sigmoid2 = nn.Sigmoid()
self.linear3 = nn.Linear(8, 2)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
lin1_out = self.linear1(x)
sigmoid_out1 = self.sigmoid1(lin1_out)
sigmoid_out2 = self.sigmoid2(self.linear2(sigmoid_out1))
return self.softmax(self.linear3(sigmoid_out2))
| 30.473684
| 64
| 0.62867
| 76
| 579
| 4.605263
| 0.407895
| 0.068571
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059633
| 0.246978
| 579
| 18
| 65
| 32.166667
| 0.743119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66dcca39ba0172f5d72111b99f2df6a26ed3cb02
| 6,431
|
py
|
Python
|
src/Datasets.py
|
fauxneticien/bnf_cnn_qbe-std
|
ab7dcb9c9d3d8969f1f17aaa87b7337d3ccfcc30
|
[
"MIT"
] | 4
|
2021-03-26T17:18:59.000Z
|
2022-03-21T18:28:56.000Z
|
src/Datasets.py
|
fauxneticien/bnf_cnn_qbe-std
|
ab7dcb9c9d3d8969f1f17aaa87b7337d3ccfcc30
|
[
"MIT"
] | 1
|
2021-11-02T17:29:46.000Z
|
2021-11-02T17:29:46.000Z
|
src/Datasets.py
|
fauxneticien/bnf_cnn_qbe-std
|
ab7dcb9c9d3d8969f1f17aaa87b7337d3ccfcc30
|
[
"MIT"
] | 1
|
2020-11-11T05:04:55.000Z
|
2020-11-11T05:04:55.000Z
|
import os
import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from scipy.spatial.distance import cdist
import logging
class STD_Dataset(Dataset):
"""Spoken Term Detection dataset."""
def __init__(self, root_dir, labels_csv, query_dir, audio_dir, apply_vad = False, max_height = 100, max_width = 800):
"""
Args:
root_dir (string): Absolute path to dataset directory with content below
labels_csv (string): Relative path to the csv file with query and test pairs, and labels
(1 = query in test; 0 = query not in test).
query_dir (string): Relative path to directory with all the audio queries.
audio_dir (string): Relative path to directory with all the test audio.
"""
if isinstance(labels_csv, dict):
# Supplying separate csv files for positive and negative labels
pos_frame = pd.read_csv(os.path.join(root_dir, labels_csv['positive_labels']))
neg_frame = pd.read_csv(os.path.join(root_dir, labels_csv['negative_labels']))
# Randomly down-sample neg examples to same number of positive examples
pos_frame = pos_frame.sample(frac = labels_csv['pos_sample_size'], replace = True)
neg_frame = neg_frame.sample(n = pos_frame.shape[0])
self.qtl_frame = pd.concat([pos_frame, neg_frame], axis = 0).sample(frac = 1)
else:
# If a single CSV file, then just read that in
self.qtl_frame = pd.read_csv(os.path.join(root_dir, labels_csv))
self.query_dir = os.path.join(root_dir, query_dir)
self.audio_dir = os.path.join(root_dir, audio_dir)
self.apply_vad = apply_vad
self.max_height = max_height
self.max_width = max_width
if apply_vad is True:
# If using voice activity detection we expect same directory structure
# and file names as feature files for .npy files containing voice activity
# detection (VAD) labels (0 = no speech activity, 1 = speech activity)
# in a 'vad_labels' directory
self.vad_query_dir = os.path.join(root_dir, 'vad_labels', query_dir)
self.vad_audio_dir = os.path.join(root_dir, 'vad_labels', audio_dir)
# Get filenames in audio and query directories
q_files = os.listdir(self.vad_query_dir)
a_files = os.listdir(self.vad_audio_dir)
# Get length of non-zero values in files
q_vlens = np.array([ len(np.flatnonzero(np.load(os.path.join(self.vad_query_dir, f)))) for f in q_files ])
a_vlens = np.array([ len(np.flatnonzero(np.load(os.path.join(self.vad_audio_dir, f)))) for f in a_files ])
# Get files (without .npy extensions) for which there are no non-zero values
zero_qs = [ os.path.splitext(x)[0] for x in np.take(q_files, np.where(q_vlens == 0)).flatten() ]
zero_as = [ os.path.splitext(x)[0] for x in np.take(a_files, np.where(a_vlens == 0)).flatten() ]
if(len(zero_qs) > 0):
logging.info(" Following queries removed from dataset (insufficient frames after VAD): %s" % (", ".join(zero_qs)))
if(len(zero_as) > 0):
logging.info(" Following references removed from dataset (insufficient frames after VAD): %s" % (", ".join(zero_as)))
# Discard from labels irrelevant files
self.qtl_frame = self.qtl_frame[~self.qtl_frame['query'].isin(zero_qs)]
self.qtl_frame = self.qtl_frame[~self.qtl_frame['reference'].isin(zero_as)]
def __len__(self):
return len(self.qtl_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
query_name = self.qtl_frame.iloc[idx, 0]
test_name = self.qtl_frame.iloc[idx, 1]
qt_label = self.qtl_frame.iloc[idx, 2]
# Get features where query = M x f, test = N x f, where M, N number of frames and f number of features
query_feats = np.load(os.path.join(self.query_dir, query_name + ".npy"), allow_pickle=True)
test_feats = np.load(os.path.join(self.audio_dir, test_name + ".npy"), allow_pickle=True)
if self.apply_vad is True:
query_vads = np.load(os.path.join(self.vad_query_dir, query_name + ".npy"), allow_pickle=True)
test_vads = np.load(os.path.join(self.vad_audio_dir, test_name + ".npy"), allow_pickle=True)
# Keep only frames (rows, axis = 0) where voice activity detection by rVAD has returned non-zero (i.e. 1)
query_feats = np.take(query_feats, np.flatnonzero(query_vads), axis = 0)
test_feats = np.take(test_feats, np.flatnonzero(test_vads), axis = 0)
# Create standardised Euclidean distance matrix of dimensions M x N
qt_dists = cdist(query_feats, test_feats, 'seuclidean', V = None)
# Range normalise matrix to [-1, 1]
qt_dists = -1 + 2 * ((qt_dists - qt_dists.min())/(qt_dists.max() - qt_dists.min()))
# Get indices to downsample or pad M x N matrix to max_height x max_width (default 100 x 800)
def get_keep_indices(dim_size, dim_max):
if dim_size <= dim_max:
# no need to downsample if M or N smaller than max_height/max_width
return np.arange(0, dim_size)
else:
# if bigger, return evenly spaced indices for correct height/width
return np.round(np.linspace(0, dim_size - 1, dim_max)).astype(int)
ind_rows = get_keep_indices(qt_dists.shape[0], self.max_height)
ind_cols = get_keep_indices(qt_dists.shape[1], self.max_width)
qt_dists = np.take(qt_dists, ind_rows, axis = 0)
qt_dists = np.take(qt_dists, ind_cols, axis = 1)
# Create empty 100 x 800 matrix, then fill relevant cells with dist values
temp_dists = np.full((self.max_height, self.max_width), qt_dists.min(), dtype='float32')
temp_dists[:qt_dists.shape[0], :qt_dists.shape[1]] = qt_dists
# Reshape to (1xHxW) since to feed into ConvNet with 1 input channel
dists = torch.Tensor(temp_dists).view(1, self.max_height, self.max_width)
label = torch.Tensor([qt_label])
sample = {'query': query_name, 'reference': test_name, 'dists': dists, 'labels': label}
return sample
| 51.448
| 133
| 0.640647
| 958
| 6,431
| 4.102296
| 0.232777
| 0.028499
| 0.033079
| 0.024936
| 0.31374
| 0.284478
| 0.246819
| 0.207125
| 0.175064
| 0.09771
| 0
| 0.011684
| 0.254704
| 6,431
| 124
| 134
| 51.862903
| 0.808262
| 0.263412
| 0
| 0.028986
| 0
| 0
| 0.063239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057971
| false
| 0
| 0.101449
| 0.014493
| 0.231884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66de338a8afcfc34368f70df12c0187b512a7430
| 3,209
|
py
|
Python
|
dmz/store.py
|
yuvipanda/edit-stats
|
fb096715f18df999b4af4fb116e6c4130f24c2ec
|
[
"MIT"
] | null | null | null |
dmz/store.py
|
yuvipanda/edit-stats
|
fb096715f18df999b4af4fb116e6c4130f24c2ec
|
[
"MIT"
] | null | null | null |
dmz/store.py
|
yuvipanda/edit-stats
|
fb096715f18df999b4af4fb116e6c4130f24c2ec
|
[
"MIT"
] | null | null | null |
"""Implements a db backed storage area for intermediate results"""
import sqlite3
class Store(object):
"""
Represents an sqlite3 backed storage area that's vaguely key value
modeled for intermediate storage about metadata / data for metrics
about multiple wikis that have some underlying country related basis
"""
_initial_sql_ = [
'CREATE TABLE IF NOT EXISTS meta (key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS meta_key ON meta(key);',
'CREATE TABLE IF NOT EXISTS wiki_meta (wiki, key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS wiki_meta_key ON wiki_meta(wiki, key);',
'CREATE TABLE IF NOT EXISTS country_info (wiki, country, key, value);',
'CREATE UNIQUE INDEX IF NOT EXISTS country_info_key ON country_info(wiki, country, key);'
]
def __init__(self, path):
"""Initialize a store at the given path.
Creates the tables required if they do not exist"""
self.db = sqlite3.connect(path)
for sql in Store._initial_sql_:
self.db.execute(sql)
def set_meta(self, key, value):
"""Set generic metadata key value, global to the store"""
self.db.execute("INSERT OR REPLACE INTO meta VALUES (?, ?)", (key, value))
self.db.commit()
def get_meta(self, key):
"""Get generic metadata key value, global to the store"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from meta WHERE key = ?", (key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
def set_wiki_meta(self, wiki, key, value):
"""Set wiki specific meta key value"""
self.db.execute("INSERT OR REPLACE INTO wiki_meta VALUES (?, ?, ?)", (wiki, key, value))
self.db.commit()
def get_wiki_meta(self, key):
"""Get wiki specific meta key value"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from wiki_meta WHERE wiki = ? AND key = ?", (wiki, key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
def set_country_info(self, wiki, country, key, value):
"""Set a country and wiki specific key and value"""
self.db.execute("INSERT OR REPLACE INTO country_info VALUES (?, ?, ?, ?)", (wiki, country, key, value))
self.db.commit()
def set_country_info_bulk(self, wiki, key, country_dict):
"""Bulk insert a dictionary of country specific key and value.
The dictionary should be of form {'country': 'value'}
"""
insert_data = [(wiki, k, key, v) for (k, v) in country_dict.iteritems()]
self.db.executemany("INSERT OR REPLACE INTO country_info VALUES (?, ?, ?, ?)", insert_data)
self.db.commit()
def get_country_info(self, wiki, country, key):
"""Get a country and wiki specific value for a given key"""
try:
cur = self.db.cursor()
cur.execute("SELECT value from country_info WHERE wiki = ? AND country = ?AND key = ?",
(wiki, country, key, ))
cur.fetchone()
return cur[0]
finally:
cur.close()
| 38.662651
| 111
| 0.600499
| 422
| 3,209
| 4.469194
| 0.234597
| 0.059385
| 0.034995
| 0.040297
| 0.530223
| 0.416755
| 0.347296
| 0.274655
| 0.136797
| 0.116119
| 0
| 0.00262
| 0.286382
| 3,209
| 82
| 112
| 39.134146
| 0.820961
| 0.229355
| 0
| 0.423077
| 0
| 0
| 0.313577
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.019231
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66e356546289b5293424a7a6ad3ffb4afce031ec
| 7,074
|
py
|
Python
|
main.py
|
usdot-its-jpo-data-portal/metadata-query-function
|
589e5df691fab82e264ce74196dd797b9eb17f5e
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
usdot-its-jpo-data-portal/metadata-query-function
|
589e5df691fab82e264ce74196dd797b9eb17f5e
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
usdot-its-jpo-data-portal/metadata-query-function
|
589e5df691fab82e264ce74196dd797b9eb17f5e
|
[
"Apache-2.0"
] | 1
|
2021-12-14T18:00:20.000Z
|
2021-12-14T18:00:20.000Z
|
import boto3
import dateutil
import glob
import json
import logging
import os
import queue
import time
from queries import MetadataQueries
USE_LOCAL_DATA = True # whether to load data from S3 (false) or locally (true)
LOCAL_DATA_REPOSITORY = "s3data/usdot-its-cvpilot-public-data" # path to local directory containing s3 data
### Query to run
METADATA_QUERY = 'query13_listOfLogFilesBefore'
### Data source configuration settings
PREFIX_STRINGS = ["wydot/BSM/2018/12", "wydot/BSM/2019/01", "wydot/BSM/2019/02", "wydot/BSM/2019/03", "wydot/BSM/2019/04", "wydot/TIM/2018/12", "wydot/TIM/2019/01", "wydot/TIM/2019/02", "wydot/TIM/2019/03", "wydot/TIM/2019/04"]
S3_BUCKET = "usdot-its-cvpilot-public-data"
def lambda_handler(event, context):
if USE_LOCAL_DATA:
print("NOTE: Using local data in directory '%s'" % LOCAL_DATA_REPOSITORY)
# Create a list of analyzable S3 files
s3_client = boto3.client('s3')
s3_file_list = []
for prefix in PREFIX_STRINGS:
matched_file_list = list_s3_files_matching_prefix(s3_client, prefix)
print("Queried for S3 files matching prefix string '%s'. Found %d matching files." % (prefix, len(matched_file_list)))
print("Matching files: [%s]" % ", ".join(matched_file_list))
s3_file_list.extend(matched_file_list)
metadataQueries = MetadataQueries()
perform_query(s3_client, s3_file_list, metadataQueries, METADATA_QUERY)
return
def perform_query(s3_client, s3_file_list, query_object, query_function):
total_records = 0
total_records_in_timeframe = 0
total_records_not_in_timeframe = 0
file_num = 1
query_start_time = time.time()
invalid_s3_files = []
for filename in s3_file_list:
file_process_start_time = time.time()
print("============================================================================")
print("Analyzing file (%d/%d) '%s'" % (file_num, len(s3_file_list), filename))
print("Query being performed: %s" % str(METADATA_QUERY))
file_num += 1
record_list = extract_records_from_file(s3_client, filename)
records_in_timeframe = 0
records_not_in_timeframe = 0
for record in record_list:
total_records += 1
if getattr(query_object, query_function)(record):
records_in_timeframe += 1
if METADATA_QUERY == 'query11_invalidS3FileCount' and filename not in invalid_s3_files:
invalid_s3_files.append(filename)
else:
records_not_in_timeframe += 1
print("Records satisfying query constraints found in this file: \t%d" % records_in_timeframe)
print("Total records found satisfying query constraints so far: \t\t%d" % total_records_in_timeframe)
print("Records NOT found satisfying query constraints: \t\t\t\t%d" % records_not_in_timeframe)
print("Total records NOT found satisfying query constraints so far: \t\t\t%d" % total_records_not_in_timeframe)
time_now = time.time()
print("Time taken to process this file: \t\t\t%.3f" % (time_now - file_process_start_time))
time_elapsed = (time_now - query_start_time)
avg_time_per_file = time_elapsed/file_num
avg_time_per_record = time_elapsed/total_records
est_time_remaining = avg_time_per_file * (len(s3_file_list) - file_num)
print("Time elapsed so far: \t\t\t\t\t%.3f" % time_elapsed)
print("Average time per file: \t\t\t\t\t%.3f" % avg_time_per_file)
print("Average time per record: \t\t\t\t%.6f" % avg_time_per_record)
print("Estimated time remaining: \t\t\t\t%.3f" % est_time_remaining)
total_records_in_timeframe += records_in_timeframe
total_records_not_in_timeframe += records_not_in_timeframe
print("============================================================================")
print("Querying complete.")
### Query-specific output
if hasattr(query_object, 'earliest_generated_at'):
print("Earliest record_generated_at: %s" % query_object.earliest_generated_at)
if hasattr(query_object, 'latest_generated_at'):
print("Latest record_generated_at: %s" % query_object.latest_generated_at)
if METADATA_QUERY == 'query11_invalidS3FileCount':
print("Invalid s3 file count: %d" % len(invalid_s3_files))
invalid_s3_file_out = open('invalid_s3_file_list.txt', 'w')
invalid_s3_file_out.write("%s" % "\n".join(invalid_s3_files))
print("Invalid S3 files written to 'invalid_s3_file_list.txt'")
if METADATA_QUERY == 'query13_listOfLogFilesBefore':
print("Invalid log file count: %d" % len(query_object.log_file_list))
invalid_log_file_list_out = open('invalid_log_file_list.txt', 'w')
invalid_log_file_list_out.write("%s" % "\n".join(query_object.log_file_list.keys()))
print("Invalid S3 files written to 'invalid_log_file_list.txt'")
print("Total number of records found satisfying query constraints: %d (Total number of records not found satisfying query constraints: %d" % (total_records_in_timeframe, total_records_not_in_timeframe))
### Returns a list of records from a given file
def extract_records_from_file(s3_client, filename):
if USE_LOCAL_DATA:
with open(filename, 'r') as f:
return f.readlines()
else:
s3_file = s3_client.get_object(
Bucket=S3_BUCKET,
Key=filename,
)
return list(s3_file['Body'].iter_lines()) ### iter_lines() is significantly faster than read().splitlines()
### Returns filenames from an S3 list files (list_objects) query
def list_s3_files_matching_prefix(s3_client, prefix_string):
if USE_LOCAL_DATA:
try:
files_and_directories = glob.glob(LOCAL_DATA_REPOSITORY+"/"+prefix_string+"/**/*", recursive=True)
files_only = []
for filepath in files_and_directories:
if os.path.isfile(filepath):
files_only.append(filepath)
return files_only
except FileNotFoundError as e:
return []
else:
response = list_s3_objects(s3_client, prefix_string)
filenames = []
if response.get('Contents'):
[filenames.append(item['Key']) for item in response.get('Contents')]
while response.get('NextContinuationToken'):
response = list_s3_objects(s3_client, prefix_string, response.get('NextContinuationToken'))
if response.get('Contents'):
[filenames.append(item['Key']) for item in response.get('Contents')]
return filenames
def list_s3_objects(s3_client, prefix_string, continuation_token=None):
if continuation_token:
return s3_client.list_objects_v2(
Bucket=S3_BUCKET,
Prefix=prefix_string,
ContinuationToken=continuation_token,
)
else:
return s3_client.list_objects_v2(
Bucket=S3_BUCKET,
Prefix=prefix_string,
)
if __name__ == "__main__":
lambda_handler(None, None)
| 46.235294
| 227
| 0.669918
| 931
| 7,074
| 4.789474
| 0.201933
| 0.009868
| 0.009419
| 0.037677
| 0.393137
| 0.230994
| 0.184571
| 0.130971
| 0.058309
| 0.058309
| 0
| 0.024844
| 0.209075
| 7,074
| 152
| 228
| 46.539474
| 0.772118
| 0.052446
| 0
| 0.155039
| 0
| 0.007752
| 0.253555
| 0.073342
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03876
| false
| 0
| 0.069767
| 0
| 0.170543
| 0.186047
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66e36f3c188b5158455460f11322fdc4021ffe06
| 1,070
|
py
|
Python
|
example_config/SecretConfig.py
|
axiegamingph-dev/discordaxieqrbot
|
fac9b3f325b98d21ece12445ec798c125d06f788
|
[
"MIT"
] | null | null | null |
example_config/SecretConfig.py
|
axiegamingph-dev/discordaxieqrbot
|
fac9b3f325b98d21ece12445ec798c125d06f788
|
[
"MIT"
] | null | null | null |
example_config/SecretConfig.py
|
axiegamingph-dev/discordaxieqrbot
|
fac9b3f325b98d21ece12445ec798c125d06f788
|
[
"MIT"
] | 2
|
2022-01-13T18:45:26.000Z
|
2022-03-03T11:50:43.000Z
|
Managers = ['Shim', 'Mike', 'Ryan', 'Kevin', 'Wessa', 'ser0wl']
# google spreedsheet id
ISKO_SPREADSHEET_ID = ''
# the list of names with discord ID
ISKO_DiscordAccount = 'DiscordAccount!A2:B100'
# the list of Names, ronin address, ronin private keys
# eg:
# Name | Address | Privatekey
# Isko1 | ronin:8213789127387543adfgsasdkjsd... | 0x0666c1234567890...
# Isko2 | ronin:8213789127387543adfgsasdkjsd... | 0x0666c1234567890...
# Isko3 | ronin:8213789127387543adfgsasdkjsd... | 0x0666c1234567890...
# note: Name should map to the ISKO_DiscordAccount values
ISKO_Accounts = 'Isko!A2:C100'
# list of names that can request qr code on behalf of that person.
# eg:
# Representative | IskoName
# Isko1 | Isko1
# Isko1 | Isko2
# this means Isko1 can request code for Isko1 and Isko2 using the !qrof Isko1 and !qrof Isko2.
ISKO_Representative = 'Representative!A2:B100'
# Put Your Discord Bot Token Here
DiscordBotToken_Prod = ''
DiscordBotToken_Test = ''
DiscordBotToken = DiscordBotToken_Prod
| 33.4375
| 94
| 0.699065
| 119
| 1,070
| 6.210084
| 0.537815
| 0.024357
| 0.044655
| 0.037889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138824
| 0.205607
| 1,070
| 31
| 95
| 34.516129
| 0.730588
| 0.67757
| 0
| 0
| 0
| 0
| 0.256098
| 0.134146
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66e492eef799f5d354e84f2867ee89f9c4cd7b7a
| 200
|
py
|
Python
|
tests/button_test.py
|
almasgai/Drone
|
1223375976baf79d0f4362d42287d1a4039ba1e9
|
[
"MIT"
] | null | null | null |
tests/button_test.py
|
almasgai/Drone
|
1223375976baf79d0f4362d42287d1a4039ba1e9
|
[
"MIT"
] | null | null | null |
tests/button_test.py
|
almasgai/Drone
|
1223375976baf79d0f4362d42287d1a4039ba1e9
|
[
"MIT"
] | null | null | null |
from gpiozero import Button
import os
from time import sleep
button = Button(2)
i = 0
while True:
if button.is_pressed:
print(i, ". I've been pressed")
i += 1
sleep(0.1)
| 15.384615
| 39
| 0.61
| 32
| 200
| 3.78125
| 0.59375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035461
| 0.295
| 200
| 12
| 40
| 16.666667
| 0.822695
| 0
| 0
| 0
| 0
| 0
| 0.095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66e5419754e56410c068112926f27e01cdae86bb
| 820
|
py
|
Python
|
reprojection.py
|
ekrell/nir2watermap
|
5253f2cde142a62103eb06fb2931c9aed6431211
|
[
"MIT"
] | null | null | null |
reprojection.py
|
ekrell/nir2watermap
|
5253f2cde142a62103eb06fb2931c9aed6431211
|
[
"MIT"
] | null | null | null |
reprojection.py
|
ekrell/nir2watermap
|
5253f2cde142a62103eb06fb2931c9aed6431211
|
[
"MIT"
] | null | null | null |
import rasterio
from rasterio.plot import show, reshape_as_raster, reshape_as_image, adjust_band
from rasterio import warp
import numpy as np
def reprojectio(img, bounds, transform, projection = "epsg:4326", resolution = 0.00001):
# Reproject
transform, width, height = warp.calculate_default_transform( \
aRaster.crs, {"init" : projection},
img.shape[0], img.shape[1],
left = bounds[0], bottom = bounds[1],
right = bounds[2], top = bounds[3],
resolution = resolution)
out_array = np.ndarray((img.shape[0], height, width), dtype = img.dtype)
warp.reproject(img, out_array, src_crs = aRaster.crs, dst_crs = {"init" : "epsg:4326"},
src_transform = transform,
dst_transform = transform, resampling = warp.Resampling.bilinear)
return out_array
| 37.272727
| 91
| 0.680488
| 105
| 820
| 5.180952
| 0.47619
| 0.044118
| 0.033088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032061
| 0.20122
| 820
| 21
| 92
| 39.047619
| 0.798473
| 0.010976
| 0
| 0
| 0
| 0
| 0.032178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66e80248874252f8ee1fc31cfa1763523a5f99eb
| 4,034
|
py
|
Python
|
opentsdb/push_thread.py
|
razvandimescu/opentsdb-py
|
61c15302468769121f94323493e88cb51efcea15
|
[
"MIT"
] | 48
|
2016-12-27T10:11:41.000Z
|
2021-11-15T16:05:24.000Z
|
opentsdb/push_thread.py
|
razvandimescu/opentsdb-py
|
61c15302468769121f94323493e88cb51efcea15
|
[
"MIT"
] | 8
|
2017-10-08T16:20:30.000Z
|
2022-02-23T08:36:52.000Z
|
opentsdb/push_thread.py
|
razvandimescu/opentsdb-py
|
61c15302468769121f94323493e88cb51efcea15
|
[
"MIT"
] | 17
|
2017-10-01T01:14:55.000Z
|
2021-11-15T16:05:24.000Z
|
from logging import getLogger
from queue import Empty
import threading
import random
import time
logger = getLogger('opentsdb-py')
class PushThread(threading.Thread):
WAIT_NEXT_METRIC_TIMEOUT = 3
def __init__(self, tsdb_connect, metrics_queue, close_client,
send_metrics_limit, send_metrics_batch_limit, statuses):
super().__init__()
self.tsdb_connect = tsdb_connect
self.metrics_queue = metrics_queue
self.close_client_flag = close_client
self.send_metrics_limit = send_metrics_limit
self.send_metrics_batch_limit = send_metrics_batch_limit
self.statuses = statuses
self._retry_send_metrics = None
def run(self):
while not self._is_done():
start_time = time.time()
try:
if self._retry_send_metrics:
data = self._retry_send_metrics
self._retry_send_metrics = None
else:
data = self._next(self.WAIT_NEXT_METRIC_TIMEOUT)
self.send(data)
except StopIteration:
break
except Empty:
continue
except Exception as error:
logger.exception(error)
if self.send_metrics_limit > 0:
self.__metrics_limit_timeout(start_time)
self.tsdb_connect.disconnect()
def _is_done(self):
return self.tsdb_connect.stopped.is_set() or (self.close_client_flag.is_set() and self.metrics_queue.empty())
def _next(self, wait_timeout):
raise NotImplementedError()
def send(self, data):
raise NotImplementedError()
def __metrics_limit_timeout(self, start_time):
pass
def _update_statuses(self, success, failed):
self.statuses['success'] += success
self.statuses['failed'] += failed
class HTTPPushThread(PushThread):
def _next(self, wait_timeout):
total_metrics = self.metrics_queue.qsize()
iter_count = total_metrics if total_metrics <= self.send_metrics_batch_limit else self.send_metrics_batch_limit
metrics = []
if total_metrics:
for _ in range(iter_count):
metrics.append(self.metrics_queue.get_nowait())
else:
metrics.append(self.metrics_queue.get(block=True, timeout=wait_timeout))
if StopIteration in metrics and len(metrics) == 1:
raise StopIteration
elif StopIteration in metrics:
metrics.remove(StopIteration)
self.metrics_queue.put(StopIteration)
return metrics
def send(self, data):
try:
result = self.tsdb_connect.sendall(*data)
except Exception as error:
logger.exception("Push metric failed: %s", error)
self._retry_send_metrics = data
time.sleep(1)
else:
failed = result.get('failed', 0)
self._update_statuses(result.get('success', 0), failed)
if failed:
logger.warning("Push metrics are failed %d/%d" % (failed, len(data)),
extra={'errors': result.get('errors')})
class TelnetPushThread(PushThread):
def _next(self, wait_timeout):
metric = self.metrics_queue.get(block=True, timeout=wait_timeout)
if metric is StopIteration:
raise metric
return metric
def __metrics_limit_timeout(self, start_time):
duration = time.time() - start_time
wait_time = (2.0 * random.random()) / self.send_metrics_limit
if wait_time > duration:
logger.debug("Wait for %s", wait_time - duration)
time.sleep(wait_time - duration)
def send(self, data):
try:
self.tsdb_connect.sendall(data)
except Exception as error:
logger.exception("Push metric failed: %s", error)
self._retry_send_metrics = data
time.sleep(1)
else:
self._update_statuses(1, 0)
| 32.015873
| 119
| 0.617005
| 459
| 4,034
| 5.141612
| 0.213508
| 0.074576
| 0.047458
| 0.050847
| 0.359322
| 0.236441
| 0.174576
| 0.144915
| 0.144915
| 0.144915
| 0
| 0.003892
| 0.299455
| 4,034
| 125
| 120
| 32.272
| 0.83121
| 0
| 0
| 0.285714
| 0
| 0
| 0.03297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122449
| false
| 0.010204
| 0.05102
| 0.010204
| 0.244898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66ee56f212ce0df2c239268cabb21b8541c895a2
| 1,063
|
py
|
Python
|
Week02/Assignment/jstoppelman_01.py
|
nkruyer/SkillsWorkshop2018
|
2201255ff63eca111635789267d0600a95854c38
|
[
"BSD-3-Clause"
] | 1
|
2020-04-18T03:30:46.000Z
|
2020-04-18T03:30:46.000Z
|
Week02/Assignment/jstoppelman_01.py
|
nkruyer/SkillsWorkshop2018
|
2201255ff63eca111635789267d0600a95854c38
|
[
"BSD-3-Clause"
] | 21
|
2018-07-12T19:12:23.000Z
|
2018-08-10T13:52:45.000Z
|
Week02/Assignment/jstoppelman_01.py
|
nkruyer/SkillsWorkshop2018
|
2201255ff63eca111635789267d0600a95854c38
|
[
"BSD-3-Clause"
] | 60
|
2018-05-08T16:59:20.000Z
|
2018-08-01T14:28:28.000Z
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import simps
from scipy.optimize import curve_fit
def curve3(x,a,b,c,d):
return a*x**3+b*x**2+c*x+d
def BIC(y, yhat, k, weight = 1):
err = y - yhat
sigma = np.std(np.real(err))
n = len(y)
B = n*np.log(sigma**2) + weight*k*np.log(n)
return B
x = [ 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., 6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10. ]
y = [3.43, 4.94, 6.45, 9.22, 6.32, 6.11, 4.63, 8.95, 7.8, 8.35, 11.45, 14.71, 11.97, 12.46, 17.42, 17.0, 15.45, 19.15, 20.86]
x=np.asarray(x)
y=np.asarray(y)
coeff=np.polyfit(x,y,1)
t=np.poly1d(coeff)
params, covar = curve_fit(curve3,x,y)
y3=np.asarray(curve3(x,*params))
bt3=BIC(y, y3,3)
print(bt3)
bt=BIC(y,t(x),1)
print(bt)
#print("area=", simps(t3(x),x))
plt.scatter(x,y)
plt.plot(x,t(x),'-')
plt.plot(x,curve3(x,*params),'-')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Week 2 Plot')
plt.text(6,5,"area={}".format(simps(curve3(x,*params)),x))
plt.savefig("jstoppelman_01.png")
| 26.575
| 125
| 0.590781
| 235
| 1,063
| 2.659574
| 0.4
| 0.056
| 0.0624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130045
| 0.160865
| 1,063
| 39
| 126
| 27.25641
| 0.570628
| 0.047037
| 0
| 0
| 0
| 0
| 0.039604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0.03125
| 0.25
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66f14722457fd9966ac9b7749eb637bceaf702bb
| 5,464
|
py
|
Python
|
websauna/system/devop/cmdline.py
|
stevepiercy/websauna
|
2886b86f7920d75900c634958779d61aa73f011b
|
[
"CNRI-Python"
] | null | null | null |
websauna/system/devop/cmdline.py
|
stevepiercy/websauna
|
2886b86f7920d75900c634958779d61aa73f011b
|
[
"CNRI-Python"
] | null | null | null |
websauna/system/devop/cmdline.py
|
stevepiercy/websauna
|
2886b86f7920d75900c634958779d61aa73f011b
|
[
"CNRI-Python"
] | null | null | null |
"""Helper functions to initializer Websauna framework for command line applications."""
# Standard Library
import logging
import os
import sys
import typing as t
# Pyramid
import plaster
from pyramid import router
from pyramid import scripting
from rainbow_logging_handler import RainbowLoggingHandler
# Websauna
from websauna.system import Initializer
from websauna.system.http import Request
from websauna.system.http.utils import make_routable_request
from websauna.system.model.meta import create_dbsession
def prepare_config_uri(config_uri: str) -> str:
"""Make sure a configuration uri has the prefix ws://.
:param config_uri: Configuration uri, i.e.: websauna/conf/development.ini
:return: Configuration uri with the prefix ws://.
"""
if not config_uri.startswith('ws://'):
config_uri = 'ws://{uri}'.format(uri=config_uri)
return config_uri
def get_wsgi_app(config_uri: str, defaults: dict) -> router.Router:
"""Return a Websauna WSGI application given a configuration uri.
:param config_uri: Configuration uri, i.e.: websauna/conf/development.ini.
:param defaults: Extra options to be passed to the app.
:return: A Websauna WSGI Application
"""
config_uri = prepare_config_uri(config_uri)
loader = plaster.get_loader(config_uri)
return loader.get_wsgi_app(defaults=defaults)
def initializer_from_app(app: router.Router) -> Initializer:
"""Return the initializer for the given app.
:param app: Websauna WSGI application
:return: Websauna Initializer
"""
initializer = getattr(app, 'initializer', None)
assert initializer is not None, "Configuration did not yield to Websauna application with Initializer set up"
return initializer
def setup_logging(config_uri, disable_existing_loggers=False):
"""Include-aware Python logging setup from INI config file.
"""
config_uri = prepare_config_uri(config_uri)
loader = plaster.get_loader(config_uri, protocols=['wsgi'])
loader.setup_logging(disable_existing_loggers=disable_existing_loggers)
def setup_console_logging(log_level: t.Optional[str]=None):
"""Setup console logging.
Aimed to give easy sane defaults for logging in command line applications.
Don't use logging settings from INI, but use hardcoded defaults.
"""
formatter = logging.Formatter("[%(asctime)s] [%(name)s %(funcName)s] %(message)s") # same as default
# setup `RainbowLoggingHandler`
# and quiet some logs for the test output
handler = RainbowLoggingHandler(sys.stdout)
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.handlers = [handler]
env_level = os.environ.get("LOG_LEVEL", "info")
log_level = log_level or getattr(logging, env_level.upper())
logger.setLevel(log_level)
logger = logging.getLogger("requests.packages.urllib3.connectionpool")
logger.setLevel(logging.ERROR)
# SQL Alchemy transactions
logger = logging.getLogger("txn")
logger.setLevel(logging.ERROR)
def init_websauna(config_uri: str, sanity_check: bool=False, console_app: bool=False, extra_options: dict=None) -> Request:
"""Initialize Websauna WSGI application for a command line oriented script.
Example:
.. code-block:: python
import sys
from websauna.system.devop.cmdline import init_websauna
config_uri = sys.argv[1]
request = init_websauna(config_uri)
:param config_uri: Path to config INI file
:param sanity_check: Perform database sanity check on start
:param console_app: Set true to setup console-mode logging. See :func:`setup_console_logging`
:param extra_options: Passed through bootstrap() and is available as :attr:`websauna.system.Initializer.global_options`.
:return: Faux Request object pointing to a site root, having registry and every configured.
"""
# Paster thinks we are a string
if sanity_check:
sanity_check = "true"
else:
sanity_check = "false"
options = {
"sanity_check": sanity_check
}
if extra_options:
options.update(extra_options)
app = get_wsgi_app(config_uri, defaults=options)
initializer = initializer_from_app(app)
registry = initializer.config.registry
dbsession = create_dbsession(registry)
# Set up the request with websauna.site_url setting as the base URL
request = make_routable_request(dbsession, registry)
# This exposes the app object for the integration tests e.g test_static_asset
# TODO: Find a cleaner way to do this
request.app = app
return request
def init_websauna_script_env(config_uri: str) -> dict:
"""Initialize Websauna WSGI application for a IPython notebook.
:param config_uri: Path to config INI file
:return: Dictionary of shell variables
"""
options = {"sanity_check": False}
app = get_wsgi_app(config_uri, defaults=options)
initializer = initializer_from_app(app)
registry = initializer.config.registry
dbsession = create_dbsession(registry)
pyramid_env = scripting.prepare(registry=app.initializer.config.registry)
pyramid_env["app"] = app
pyramid_env["initializer"] = initializer
# Websauna specific
# Set up the request with websauna.site_url setting as the base URL
request = make_routable_request(dbsession, registry)
pyramid_env["request"] = request
pyramid_env["dbsession"] = dbsession
return pyramid_env
| 31.583815
| 124
| 0.731149
| 705
| 5,464
| 5.516312
| 0.29078
| 0.06017
| 0.023142
| 0.016971
| 0.244279
| 0.217537
| 0.198509
| 0.198509
| 0.181538
| 0.181538
| 0
| 0.000449
| 0.184846
| 5,464
| 172
| 125
| 31.767442
| 0.872699
| 0.380307
| 0
| 0.197183
| 0
| 0
| 0.084914
| 0.012442
| 0
| 0
| 0
| 0.005814
| 0.014085
| 1
| 0.098592
| false
| 0
| 0.169014
| 0
| 0.338028
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd0515ae81e31b3081572aafa51d5253637ae85f
| 2,010
|
py
|
Python
|
src/apd/aggregation/actions/base.py
|
MatthewWilkes/apd.aggregation
|
427fa908f45332d623295f92e1ccfdaf545d6997
|
[
"BSD-3-Clause"
] | null | null | null |
src/apd/aggregation/actions/base.py
|
MatthewWilkes/apd.aggregation
|
427fa908f45332d623295f92e1ccfdaf545d6997
|
[
"BSD-3-Clause"
] | 11
|
2020-11-23T21:36:48.000Z
|
2022-03-12T00:48:58.000Z
|
src/apd/aggregation/actions/base.py
|
MatthewWilkes/apd.aggregation
|
427fa908f45332d623295f92e1ccfdaf545d6997
|
[
"BSD-3-Clause"
] | 1
|
2020-08-09T01:47:59.000Z
|
2020-08-09T01:47:59.000Z
|
import typing as t
from ..typing import T_value
from ..database import DataPoint
from ..exceptions import NoDataForTrigger
class Trigger(t.Generic[T_value]):
name: str
async def start(self) -> None:
"""Coroutine to do any initial setup"""
return
async def match(self, datapoint: DataPoint) -> bool:
"""Return True if the datapoint is of interest to this
trigger.
This is an optional method, called by the default implementation
of handle(...)."""
raise NotImplementedError
async def extract(self, datapoint: DataPoint) -> T_value:
"""Return the value that this datapoint implies for this trigger,
or raise NoDataForTrigger if no value is appropriate.
Can also raise IncompatibleTriggerError if the value is not readable.
This is an optional method, called by the default implementation
of handle(...).
"""
raise NotImplementedError
async def handle(self, datapoint: DataPoint) -> t.Optional[DataPoint]:
"""Given a data point, optionally return a datapoint that
represents the value of this trigger. Will delegate to the
match(...) and extract(...) functions."""
if not await self.match(datapoint):
# This data point isn't relevant
return None
try:
value = await self.extract(datapoint)
except NoDataForTrigger:
# There was no value for this point
return None
return DataPoint(
sensor_name=self.name,
data=value,
deployment_id=datapoint.deployment_id,
collected_at=datapoint.collected_at,
)
class Action:
async def start(self) -> None:
"""Coroutine to do any initial setup"""
return
async def handle(self, datapoint: DataPoint) -> bool:
"""Apply this datapoint to the action, returning
a boolean to indicate success."""
raise NotImplementedError
| 31.904762
| 77
| 0.636816
| 235
| 2,010
| 5.412766
| 0.365957
| 0.037736
| 0.069182
| 0.02673
| 0.290881
| 0.290881
| 0.246855
| 0.246855
| 0.246855
| 0.246855
| 0
| 0
| 0.288557
| 2,010
| 62
| 78
| 32.419355
| 0.88951
| 0.031841
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd05c5af3b4de9bb3a156483a19f52a9e8f9c454
| 1,056
|
py
|
Python
|
scripts/32_Model_Parse_SPRING/24_Collect_Test_Gold_Graphs.py
|
MeghaTiya/amrlib
|
61febbd1ed15d64e3f01126eaeea46211d42e738
|
[
"MIT"
] | null | null | null |
scripts/32_Model_Parse_SPRING/24_Collect_Test_Gold_Graphs.py
|
MeghaTiya/amrlib
|
61febbd1ed15d64e3f01126eaeea46211d42e738
|
[
"MIT"
] | null | null | null |
scripts/32_Model_Parse_SPRING/24_Collect_Test_Gold_Graphs.py
|
MeghaTiya/amrlib
|
61febbd1ed15d64e3f01126eaeea46211d42e738
|
[
"MIT"
] | 1
|
2022-02-09T16:20:42.000Z
|
2022-02-09T16:20:42.000Z
|
#!/usr/bin/python3
import setup_run_dir # Set the working directory and python sys.path to 2 levels above
import os
from glob import glob
from amrlib.graph_processing.amr_loading_raw import load_raw_amr
# Collect all the amr graphs from multiple files and create a gold test file.
# This simply concatenates files and cleans a few bad characters out. The glob pattern
# needs to be exactly the same as what's in generate so the output graph ordering is the same.
if __name__ == '__main__':
glob_pattern = 'amrlib/data/amr_annotation_3.0/data/amrs/split/test/*.txt'
out_fpath = 'amrlib/data/model_parse_spring/test-gold.txt.wiki'
# Load the data
graphs = []
print('Loading data from', glob_pattern)
for fpath in sorted(glob(glob_pattern)):
graphs.extend(load_raw_amr(fpath))
print('Loaded {:,} graphs'.format(len(graphs)))
# Save the collated data
print('Saving data to', out_fpath)
with open(out_fpath, 'w') as f:
for graph in graphs:
f.write('%s\n\n' % graph)
print()
| 37.714286
| 94
| 0.705492
| 167
| 1,056
| 4.299401
| 0.550898
| 0.061281
| 0.027855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004739
| 0.200758
| 1,056
| 27
| 95
| 39.111111
| 0.845972
| 0.35322
| 0
| 0
| 0
| 0
| 0.251479
| 0.156805
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.235294
| 0
| 0.235294
| 0.235294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd0b8f696341df5e31ece62f9a50dbeb45afc875
| 5,175
|
py
|
Python
|
ProxyCrawl/ProxyCrawl/rules.py
|
Time1ess/ProxyPool
|
c44e74e8045fc560e5fe905aa41135ecb3e6da98
|
[
"MIT"
] | 18
|
2017-04-25T09:39:08.000Z
|
2022-03-09T08:07:28.000Z
|
ProxyCrawl/ProxyCrawl/rules.py
|
ghosttyq/ProxyPool
|
c44e74e8045fc560e5fe905aa41135ecb3e6da98
|
[
"MIT"
] | null | null | null |
ProxyCrawl/ProxyCrawl/rules.py
|
ghosttyq/ProxyPool
|
c44e74e8045fc560e5fe905aa41135ecb3e6da98
|
[
"MIT"
] | 10
|
2017-05-29T00:53:41.000Z
|
2021-05-08T09:07:52.000Z
|
#!/usr/local/bin/python3
# coding: UTF-8
# Author: David
# Email: youchen.du@gmail.com
# Created: 2017-04-26 11:14
# Last modified: 2017-04-30 15:55
# Filename: rules.py
# Description:
import os
import redis
from scrapy.utils.conf import init_env
from ProxyCrawl.settings import PROJECT_ROOT
conn = redis.Redis(decode_responses=True)
labels = ('name', 'url_fmt', 'row_xpath', 'host_xpath', 'port_xpath',
'addr_xpath', 'mode_xpath', 'proto_xpath', 'vt_xpath', 'max_page')
class Rule:
"""
A rule tells how to crawl proxies from a site.
keys in rule_dict:
name:
url_fmt:
row_xpath: Extract one data row from response
host_xpath: Extract host from data row
port_xpath: Extract port from data row
addr_xpath:
mode_xpath:
proto_xpath:
vt_xpath: validation_time
max_page: 200
status:
Author: David
"""
def __getattr__(self, name):
return self.rule_dict.get(name)
def __str__(self):
return 'Rule:{} - {}'.format(self.name, self.rule_dict)
def __repr__(self):
return 'Rule:{} - <{}>'.format(self.name, self.url_fmt)
def __check_vals(self):
if not all([
self.name, self.url_fmt, self.row_xpath, self.host_xpath,
self.port_xpath, self.addr_xpath, self.mode_xpath,
self.proto_xpath, self.vt_xpath]):
raise ValueError('Rule arguments not set properly')
def __init__(self, rule_dict):
self.rule_dict = rule_dict
self.__check_vals()
@staticmethod
def _load_redis_rule(name=None):
"""
Load rule from redis, raise ValueError if no rule fetched.
Author: David
"""
if name is None:
keys = ['Rule:'+key for key in conn.smembers('Rules')]
rule_dicts = []
for key in keys:
res = conn.hgetall(key)
if not res:
raise ValueError('No rule fetched.')
rule_dicts.append(res)
return rule_dicts
else:
key = 'Rule:' + name
res = conn.hgetall(key)
if not res:
raise ValueError('No rule fetched.')
return res
@staticmethod
def _load_csv_rule(name=None):
data = []
with open(os.path.join(PROJECT_ROOT, 'rules.csv'), 'rb') as f:
for line in f:
data.append(tuple(line.decode('utf-8').strip('\n').split(' ')))
rule_dicts = []
for d in data:
rule_dicts.append({k: v for k, v in zip(labels, d)})
if name:
matches = [r for r in rule_dicts if r['name'] == name]
if not matches:
raise ValueError('No rule fetched.')
elif len(matches) > 1:
raise ValueError('Multiple rules fetched.')
else:
return matches[0]
return rule_dicts
@staticmethod
def _decode_rule(rule, int_keys=('max_page',)):
"""
Decode rule filed, transform str to int.
Author: David
"""
for key in int_keys:
rule[key] = int(rule[key])
return rule
@staticmethod
def _default_status(rule):
"""
Add default status for rule.
Author: David
"""
if not rule.get('status', False):
rule['status'] = 'stopped'
return rule
@classmethod
def _clean_rule(cls, rule, *args, **kwargs):
"""
Clean rule.
Author: David
"""
rule = cls._decode_rule(rule, *args, **kwargs)
rule = cls._default_status(rule)
return rule
@classmethod
def load(cls, name, src='redis'):
"""
Load rule from source and instantiate a new rule item.
Author: David
"""
load_method = getattr(cls, '_load_{}_rule'.format(src))
rule_dict = load_method(name)
rule_dict = cls._clean_rule(rule_dict)
return cls(rule_dict)
@classmethod
def loads(cls, src='redis'):
"""
Load rules from source and instantiate all rule items.
Author: David
"""
load_method = getattr(cls, '_load_{}_rule'.format(src))
rule_dicts = load_method()
rule_dicts = [cls._clean_rule(rule) for rule in rule_dicts]
insts = [cls(rule_dict) for rule_dict in rule_dicts]
return insts
@staticmethod
def _save_redis_rule(rule_dict):
key = 'Rule:' + rule_dict['name']
conn.hmset(key, rule_dict)
conn.sadd('Rules', rule_dict['name'])
@staticmethod
def _save_csv_rule(rule_dict):
raise NotImplementedError
def save(self, dst='redis'):
"""
Save rule to destination.
Author: David
"""
self.__check_vals()
save_method = getattr(self, '_save_{}_rule'.format(dst))
save_method(self.rule_dict)
if __name__ == '__main__':
# rule = Rule.load('xici')
init_env('default')
rules = Rule.loads('csv')
for r in rules:
r.save()
print(rules[0])
# rule.save_rule()
| 26.813472
| 79
| 0.565217
| 639
| 5,175
| 4.358372
| 0.261346
| 0.051706
| 0.021544
| 0.022621
| 0.155117
| 0.123519
| 0.123519
| 0.100539
| 0.075404
| 0.075404
| 0
| 0.009412
| 0.322512
| 5,175
| 192
| 80
| 26.953125
| 0.78494
| 0.182222
| 0
| 0.278846
| 0
| 0
| 0.093956
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144231
| false
| 0
| 0.038462
| 0.028846
| 0.307692
| 0.009615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd118309b83096677693134bb6b0d70a964e1ab7
| 1,157
|
py
|
Python
|
fastquotes/fund/__init__.py
|
YangzhenZhao/fastquotes
|
1faba9f7fc7801a11359001e08cefa9cfbc41d64
|
[
"MIT"
] | 4
|
2020-11-18T11:25:00.000Z
|
2021-04-08T01:02:49.000Z
|
fastquotes/fund/__init__.py
|
YangzhenZhao/fastquotes
|
1faba9f7fc7801a11359001e08cefa9cfbc41d64
|
[
"MIT"
] | null | null | null |
fastquotes/fund/__init__.py
|
YangzhenZhao/fastquotes
|
1faba9f7fc7801a11359001e08cefa9cfbc41d64
|
[
"MIT"
] | 1
|
2020-11-18T11:25:01.000Z
|
2020-11-18T11:25:01.000Z
|
import json
import requests
def fund_intro_dict() -> dict:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
}
url = "http://fund.eastmoney.com/js/fundcode_search.js"
res = requests.get(url, headers=headers)
text_data = res.text
res_list = json.loads(text_data.strip("var r = ")[:-1])
res_dict = {}
for item in res_list:
res_dict[item[0]] = {"基金代码": item[0], "基金简称": item[2], "基金类型": item[3]}
return res_dict
def etf_list() -> list:
url = (
"http://vip.stock.finance.sina.com.cn/quotes_service/api"
"/jsonp.php/IO.XSRV2.CallbackList['da_yPT46_Ll7K6WD']:"
"/Market_Center.getHQNodeDataSimple"
)
params = {
"page": "1",
"num": "1000",
"sort": "symbol",
"asc": "0",
"node": "etf_hq_fund",
"[object HTMLDivElement]": "qvvne",
}
r = requests.get(url, params=params)
data_text = r.text
data_list = json.loads(data_text[data_text.find("([") + 1 : -2])
return [item["symbol"] for item in data_list]
| 29.666667
| 83
| 0.586863
| 158
| 1,157
| 4.158228
| 0.556962
| 0.048706
| 0.042618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053348
| 0.238548
| 1,157
| 38
| 84
| 30.447368
| 0.692395
| 0
| 0
| 0
| 0
| 0.030303
| 0.355229
| 0.075194
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.060606
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd17680bbd248da6c5086919dd5e04da84e0dd2e
| 15,119
|
py
|
Python
|
udebs/interpret.py
|
recrm/Udebs
|
d7e8e248e7afaf6559f2a96ce5dd6e2698d65af7
|
[
"MIT"
] | 6
|
2017-08-20T02:48:12.000Z
|
2020-09-04T21:46:35.000Z
|
udebs/interpret.py
|
recrm/Udebs
|
d7e8e248e7afaf6559f2a96ce5dd6e2698d65af7
|
[
"MIT"
] | null | null | null |
udebs/interpret.py
|
recrm/Udebs
|
d7e8e248e7afaf6559f2a96ce5dd6e2698d65af7
|
[
"MIT"
] | 1
|
2019-08-28T00:48:43.000Z
|
2019-08-28T00:48:43.000Z
|
import copy
import json
import itertools
import os
import operator
from .errors import *
# ---------------------------------------------------
# Imports and Variables -
# ---------------------------------------------------
class Standard:
"""
Basic functionality wrappers.
Do not import any of these, included only as reference for udebs config file syntax.
"""
@staticmethod
def print(*args):
"""
prints extra output to console.
.. code-block:: xml
<i>print arg1 arg2 ...</i>
"""
print(*args)
return True
@staticmethod
def logicif(cond, value, other):
"""
returns value if condition else other.
TODO: Other is executed even if value is true.
.. code-block:: xml
<i>if cond value other</i>
"""
return value if cond else other
@staticmethod
def inside(before, after, amount=1):
"""
Returns true if before in after amount times else false.
.. code-block:: xml
<i>value in obj</i>
"""
if isinstance(after, str):
return before in after
if amount == 0:
return True
count = 0
for item in after:
if item == before:
count += 1
if count >= amount:
return True
return False
@staticmethod
def notin(*args, **kwargs):
"""
Returns false if value in obj else true.
.. code-block:: xml
<i>value in obj</i>
"""
return not Standard.inside(*args, **kwargs)
@staticmethod
def equal(*args):
"""Checks for equality of args.
.. code-block:: xml
<i>== arg1 arg2 ...</i>
<i>arg1 == arg2</i>
"""
x = args[0]
for y in args:
if y != x:
return False
return True
@staticmethod
def notequal(*args):
"""Checks for inequality of args.
.. code-block:: xml
<i>!= arg1 arg2 ...</i>
<i>arg1 != arg2</i>
"""
x = args[0]
for y in args[1:]:
if x == y:
return False
return True
@staticmethod
def gt(before, after):
"""Checks if before is greater than after
.. code-block:: xml
<i>before > after</i>
"""
return before > after
@staticmethod
def lt(before, after):
"""Checks if before is less than after
.. code-block:: xml
<i>before < after</i>
"""
return before < after
@staticmethod
def gtequal(before, after):
"""Checks if before is greater than or equal to after
.. code-block:: xml
<i>before >= after</i>
"""
return before >= after
@staticmethod
def ltequal(before, after):
"""Checks if before is less than or equal to after
.. code-block:: xml
<i>before <= after</i>
"""
return before <= after
@staticmethod
def plus(*args):
"""Sums arguments
.. code-block:: xml
<i>arg1 + arg2</i>
<i>+ arg1 arg2 ...</i>
"""
return sum(args)
@staticmethod
def multiply(*args):
"""Multiplies arguments
.. code-block:: xml
<i>arg1 * arg2</i>
<i>* arg1 arg2 ...</i>
"""
i = 1
for number in args:
i *= number
return i
@staticmethod
def logicor(*args, storage=None, field=None):
"""
returns true if even one of args is true.
Note: All arguments are processed unless extra arguments are quoted.
.. code-block:: xml
<i>arg1 or arg2</i>
<i>or arg1 arg2 ...</i>
"""
env = _getEnv(storage, {"self": field})
for i in args:
if isinstance(i, UdebsStr):
i = field.getEntity(i).testRequire(env)
if i:
return True
return False
@staticmethod
def mod(before, after):
"""Returns before mod after.
.. code-block:: xml
<i>before % after</i>
"""
return before % after
@staticmethod
def setvar(storage, variable, value):
"""Stores value inside of variable.
Note: always returns true so can be used in require block.
.. code-block:: xml
<i>variable = value</i>
<i>variable -> value</i>
"""
storage[variable] = value
return True
@staticmethod
def getvar(storage, variable):
"""Retrieves a variable
.. code-block:: xml
<i>$ variable</i>
<i>$variable</i>
"""
return storage[variable]
@staticmethod
def div(before, after):
"""Returns before divided by after.
.. code-block:: xml
<i>before / after</i>
"""
return before / after
@staticmethod
def logicnot(element):
"""Switches a boolean from true to false and vice versa
.. code-block:: xml
<i>! element</i>
<i>!element</i>
"""
return not element
@staticmethod
def minus(before, element):
"""Returns before - element. (before defaults to 0 if not given)
.. code-block:: xml
<i>before - element</i>
<i>-element</i>
"""
return before - element
@staticmethod
def sub(array, i):
"""Gets the ith element of array.
.. code-block:: xml
<i>array sub i</i>
"""
return next(itertools.islice(array, int(i), None), 'empty')
@staticmethod
def length(list_):
"""Returns the length of an iterable.
.. code-block:: xml
<i>length list_</i>
"""
return len(list(list_))
@staticmethod
def quote(string):
"""Treats input as string literal and does not process commands.
.. code-block:: xml
@staticmethod
<i>`(caster CAST target move)</i>
"""
return UdebsStr(string)
class Variables:
versions = [0, 1]
modules = {
-1: {},
}
env = {
"__builtins__": {"abs": abs, "min": min, "max": max, "len": len},
"standard": Standard,
"operator": operator,
"storage": {},
}
default = {
"f": "",
"args": [],
"kwargs": {},
"all": False,
"default": {},
"string": [],
}
@staticmethod
def keywords(version=1):
return dict(Variables.modules[version], **Variables.modules[-1])
def importFunction(f, args):
"""
Allows a user to import a single function into udebs.
**deprecated - please use udebs.utilities.register
"""
module = {
f.__name__: {
"f": f.__name__
}
}
module[f.__name__].update(args)
importModule(module, {f.__name__: f})
def importModule(dicts=None, globs=None, version=-1):
"""
Allows user to extend base variables available to the interpreter.
Should be run before the instance object is created.
**deprecated for users - please use udebs.utilities.register
"""
if globs is None:
globs = {}
if dicts is None:
dicts = {}
if version not in Variables.modules:
Variables.modules[version] = {}
Variables.modules[version].update(dicts)
Variables.env.update(globs)
def importSystemModule(name, globs=None):
"""Convenience script for import system keywords."""
if globs is None:
globs = {}
path = os.path.dirname(__file__)
for version in Variables.versions:
filename = "{}/keywords/{}-{}.json".format(path, name, str(version))
with open(filename) as fp:
importModule(json.load(fp), globs, version)
def _getEnv(local, glob=None):
"""Retrieves a copy of the base variables."""
value = copy.copy(Variables.env)
if glob:
value.update(glob)
value["storage"] = local
return value
# ---------------------------------------------------
# Interpreter Functions -
# ---------------------------------------------------
def formatS(string, version):
"""Converts a string into its python representation."""
string = str(string)
if string == "self":
return string
elif string == "false":
return "False"
elif string == "true":
return "True"
elif string == "None":
return string
elif string.isdigit():
return string
# String quoted by user.
elif string[0] == string[-1] and string[0] in {"'", '"'}:
return string
# String has already been handled by call
elif string[-1] == ")":
return string
elif string in Variables.env:
return string
# In case prefix notation used in keyword defaults.
elif string[0] in Variables.keywords(version):
return interpret(string, version)
else:
return "'" + string + "'"
def call(args, version):
"""Converts callList into functionString."""
# Find keyword
keywords = [i for i in args if i in Variables.keywords(version)]
# Too many keywords is a syntax error.
if len(keywords) > 1:
raise UdebsSyntaxError("CallList contains to many keywords '{}'".format(args))
# No keywords creates a tuple object.
elif len(keywords) == 0:
return "(" + ",".join(formatS(i, version) for i in args) + ")"
keyword = keywords[0]
# Get and fix data for this keyword.
data = copy.copy(Variables.default)
data.update(Variables.keywords(version)[keyword])
# Create dict of values
current = args.index(keyword)
nodes = copy.copy(data["default"])
for index in range(len(args)):
value = "$" if index >= current else "-$"
value += str(abs(index - current))
if args[index] != keyword:
nodes[value] = args[index]
# Force strings into quoted arguments.
for string in data["string"]:
nodes[string] = "'" + str(nodes[string]).replace("'", "\\'") + "'"
# Claim keyword arguments.
kwargs = {}
for key, value in data["kwargs"].items():
if value in nodes:
new_value = nodes[value]
del nodes[value]
else:
new_value = value
kwargs[key] = formatS(new_value, version)
arguments = []
# Insert positional arguments
for key in data["args"]:
if key in nodes:
arguments.append(formatS(nodes[key], version))
del nodes[key]
else:
arguments.append(formatS(key, version))
# Insert ... arguments.
if data["all"]:
for key in sorted(nodes.keys(), key=lambda x: int(x.replace("$", ""))):
arguments.append(formatS(nodes[key], version))
del nodes[key]
if len(nodes) > 0:
raise UdebsSyntaxError("Keyword contains unused arguments. '{}'".format(" ".join(args)))
# Insert keyword arguments.
for key in sorted(kwargs.keys()):
arguments.append(str(key) + "=" + str(kwargs[key]))
return data["f"] + "(" + ",".join(arguments) + ")"
def split_callstring(raw, version):
"""Converts callString into call_list."""
open_bracket = {'(', '{', '['}
close_bracket = {')', '}', ']'}
call_list = []
buf = ''
in_brackets = 0
in_quotes = False
dot_legal = True
for char in raw.strip():
if char in {'"', "'"}:
in_quotes = not in_quotes
elif not in_quotes:
if char in open_bracket:
in_brackets += 1
elif char in close_bracket:
in_brackets -= 1
elif not in_brackets:
if dot_legal:
if char == ".":
call_list.append(buf)
buf = ''
continue
elif char.isspace():
dot_legal = False
if call_list:
call_list = [".".join(call_list) + "." + buf]
buf = ''
if char.isspace():
if buf:
call_list.append(buf)
buf = ''
continue
buf += char
call_list.append(buf)
if in_brackets:
raise UdebsSyntaxError("Brackets are mismatched. '{}'".format(raw))
if '' in call_list:
raise UdebsSyntaxError("Empty element in call_list. '{}'".format(raw))
# Length one special cases.
if len(call_list) == 1:
value = call_list[0]
# Prefix calling.
if value not in Variables.keywords(version):
if value[0] in Variables.keywords(version):
return [value[0], value[1:]]
return call_list
def interpret(string, version=1, debug=False):
"""Recursive function that parses callString"""
try:
_list = split_callstring(string, version)
if debug:
print("Interpret:", string)
print("Split:", _list)
found = []
for entry in _list:
if entry[0] == "(" and entry[-1] == ")":
found.append(interpret(entry[1:-1], version, debug))
elif "." in entry:
found.append(interpret(entry, version, debug))
elif entry[0] in Variables.keywords(version) and entry not in Variables.keywords(version):
found.append(interpret(entry, version, debug))
else:
found.append(entry)
comp = call(found, version)
if debug:
print("call:", _list)
print("computed:", comp)
return UdebsStr(comp)
except Exception:
print(string)
raise
# ---------------------------------------------------
# Script Main Class -
# ---------------------------------------------------
# An easy way to distinguish between interpreted strings.
class UdebsStr(str):
pass
class Script:
def __init__(self, effect, version=1, debug=False):
# Raw text given to script.
self.raw = effect
self.interpret = effect
if not isinstance(effect, UdebsStr):
self.interpret = interpret(effect, version, debug)
self.code = compile(self.interpret, '<string>', "eval")
def __repr__(self):
return "<Script " + self.raw + ">"
def __str__(self):
return self.raw
def __call__(self, env):
return eval(self.code, env)
def __eq__(self, other):
if not isinstance(other, Script):
return False
return self.raw == other.raw
# ---------------------------------------------------
# Runtime -
# ---------------------------------------------------
importSystemModule("base")
importSystemModule("udebs")
| 25.031457
| 102
| 0.511674
| 1,610
| 15,119
| 4.750932
| 0.196894
| 0.045104
| 0.034514
| 0.035691
| 0.224605
| 0.16525
| 0.114786
| 0.114786
| 0.090469
| 0.077919
| 0
| 0.006142
| 0.343078
| 15,119
| 603
| 103
| 25.072968
| 0.763995
| 0.274092
| 0
| 0.220779
| 0
| 0
| 0.039885
| 0.002183
| 0
| 0
| 0
| 0.001658
| 0
| 1
| 0.116883
| false
| 0.003247
| 0.042208
| 0.012987
| 0.347403
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd1a79c02a429daf639fa22cee8d29423011e935
| 12,150
|
py
|
Python
|
src/predict.py
|
yzhhome/JDProductSummaryGeneration
|
4939f061ca90ad7ddd69b5a1794735f962e45bc0
|
[
"MIT"
] | 1
|
2021-09-18T07:42:36.000Z
|
2021-09-18T07:42:36.000Z
|
src/predict.py
|
yzhhome/JDProductSummaryGeneration
|
4939f061ca90ad7ddd69b5a1794735f962e45bc0
|
[
"MIT"
] | null | null | null |
src/predict.py
|
yzhhome/JDProductSummaryGeneration
|
4939f061ca90ad7ddd69b5a1794735f962e45bc0
|
[
"MIT"
] | null | null | null |
'''
@Author: dzy
@Date: 2021-09-13 11:07:48
@LastEditTime: 2021-09-26 20:25:17
@LastEditors: dzy
@Description: Helper functions or classes used for the model.
@FilePath: /JDProductSummaryGeneration/src/predict.py
'''
import random
import os
import sys
import pathlib
import torch
import jieba
import config
from model import PGN
from dataset import PairDataset
from utils import source2ids, outputids2words, Beam, timer, add2heap, replace_oovs
abs_path = pathlib.Path(__file__).parent.absolute()
class Predict():
@timer(module='initalize predicter')
def __init__(self):
self.DEVICE = torch.DEVICE
dataset = PairDataset(config.data_path,
max_src_len=config.max_src_len,
max_tgt_len=config.max_tgt_len,
truncate_src=config.truncate_src,
truncate_tgt=config.truncate_tgt)
self.vocab = dataset.build_vocab(embed_file=config.embed_file)
self.model = PGN(self.vocab)
# 停用词index索引列表
self.stop_word = list(set([self.vocab[x.strip()]
for x in open(config.stop_word_file).readlines()]))
self.model.load_model()
self.model.to(self.DEVICE)
def greedy_search(self, x, max_sum_len, len_oovs, x_padding_masks):
"""Function which returns a summary by always picking
the highest probability option conditioned on the previous word.
Args:
x (Tensor): Input sequence as the source.
max_sum_len (int): The maximum length a summary can have.
len_oovs (Tensor): Numbers of out-of-vocabulary tokens.
x_padding_masks (Tensor):
The padding masks for the input sequences
with shape (batch_size, seq_len).
Returns:
summary (list): The token list of the result summary.
"""
# 获取encoder的输出和hidden states
encoder_output, encoder_states = self.model.encoder(
replace_oovs(x, self.vocab))
# 初始化decoder hidden states为encoder hidden states
# encoder为双向lstm,decoder为单向lstm,所以需要降维
decoder_states = self.model.reduce_state(encoder_states)
# decoder在time step 0的输入为SOS起始符
x_t = torch.ones(1) * self.vocab.SOS
x_t = x_t.to(self.DEVICE, dtype=torch.int64)
# summary第一个词为SOS
summary = [self.vocab.SOS]
# 初始化coverage_vector
coverage_vector = torch.zeros((1, x.shape[1])).to(self.DEVICE)
# 没有碰到结束符且summary的长度小于最大summary长度继续生成
while int(x_t.item()) != (self.vocab.EOS) and \
len(summary) < max_sum_len:
context_vector, attention_weights, coverage_vector = \
self.model.attention(decoder_states,
encoder_output,
x_padding_masks,
coverage_vector)
p_vocab, decoder_states, p_gen = \
self.model.decoder(x_t.unsqueeze(1),
decoder_states,
context_vector)
final_dist = self.model.get_final_distribution(
x,
p_gen,
p_vocab,
attention_weights,
torch.max(len_oovs))
# 获取final distribution中最大概率的词
x_t = torch.argmax(final_dist, dim=1).to(self.DEVICE)
decoder_word_idx = x_t.item()
# 添加到生成的summary
summary.append(decoder_word_idx)
# 替换输入中的oov,继续下次生成
x_t = replace_oovs(x_t, self.vocab)
return summary
# @timer('best k')
def best_k(self, beam, k, encoder_output, x_padding_masks, x, len_oovs):
"""Get best k tokens to extend the current sequence at the current time step.
Args:
beam (untils.Beam): The candidate beam to be extended.
k (int): Beam size.
encoder_output (Tensor): The lstm output from the encoder.
x_padding_masks (Tensor):
The padding masks for the input sequences.
x (Tensor): Source token ids.
len_oovs (Tensor): Number of oov tokens in a batch.
Returns:
best_k (list(Beam)): The list of best k candidates.
"""
# use decoder to generate vocab distribution for the next token
decoder_input_t = torch.tensor(beam.tokens[-1]).reshape(1, 1)
decoder_input_t = decoder_input_t.to(self.DEVICE)
# Get context vector from attention network.
context_vector, attention_weights, coverage_vector = \
self.model.attention(beam.decoder_states,
encoder_output,
x_padding_masks,
beam.coverage_vector)
# Replace the indexes of OOV words with the index of OOV token
# to prevent index-out-of-bound error in the decoder.
p_vocab, decoder_states, p_gen = \
self.model.decoder(replace_oovs(decoder_input_t, self.vocab),
beam.decoder_states,
context_vector)
final_dist = self.model.get_final_distribution(x,
p_gen,
p_vocab,
attention_weights,
torch.max(len_oovs))
# Calculate log probabilities.
log_probs = torch.log(final_dist.squeeze())
# Filter forbidden tokens.
if len(beam.tokens) == 1:
forbidden_ids = [
self.vocab[u"这"],
self.vocab[u"此"],
self.vocab[u"采用"],
self.vocab[u","],
self.vocab[u"。"],
]
log_probs[forbidden_ids] = -float('inf')
# EOS token penalty. Follow the definition in
# https://opennmt.net/OpenNMT/translation/beam_search/.
log_probs[self.vocab.EOS] *= \
config.gamma * x.size()[1] / len(beam.tokens)
log_probs[self.vocab.UNK] = -float('inf')
# Get top k tokens and the corresponding logprob.
topk_probs, topk_idx = torch.topk(log_probs, k)
# Extend the current hypo with top k tokens, resulting k new hypos.
best_k = [beam.extend(x,
log_probs[x],
decoder_states,
coverage_vector) for x in topk_idx.tolist()]
return best_k
def beam_search(self, x,
max_sum_len,
beam_width,
len_oovs,
x_padding_masks):
"""Using beam search to generate summary.
Args:
x (Tensor): Input sequence as the source.
max_sum_len (int): The maximum length a summary can have.
beam_width (int): Beam size.
max_oovs (int): Number of out-of-vocabulary tokens.
x_padding_masks (Tensor):
The padding masks for the input sequences.
Returns:
result (list(Beam)): The list of best k candidates.
"""
# run body_sequence input through encoder
encoder_output, encoder_states = self.model.encoder(
replace_oovs(x, self.vocab))
coverage_vector = torch.zeros((1, x.shape[1])).to(self.DEVICE)
# initialize decoder states with encoder forward states
decoder_states = self.model.reduce_state(encoder_states)
# initialize the hypothesis with a class Beam instance.
init_beam = Beam([self.vocab.SOS],
[0],
decoder_states,
coverage_vector)
# get the beam size and create a list for stroing current candidates
# and a list for completed hypothesis
k = beam_width
curr, completed = [init_beam], []
# use beam search for max_sum_len (maximum length) steps
for _ in range(max_sum_len):
# get k best hypothesis when adding a new token
topk = []
for beam in curr:
# When an EOS token is generated, add the hypo to the completed
# list and decrease beam size.
if beam.tokens[-1] == self.vocab.EOS:
completed.append(beam)
k -= 1
continue
for can in self.best_k(beam,
k,
encoder_output,
x_padding_masks,
x,
torch.max(len_oovs)
):
# Using topk as a heap to keep track of top k candidates.
# Using the sequence scores of the hypos to campare
# and object ids to break ties.
add2heap(topk, (can.seq_score(), id(can), can), k)
curr = [items[2] for items in topk]
# stop when there are enough completed hypothesis
if len(completed) == beam_width:
break
# When there are not engouh completed hypotheses,
# take whatever when have in current best k as the final candidates.
completed += curr
# sort the hypothesis by normalized probability and choose the best one
result = sorted(completed,
key=lambda x: x.seq_score(),
reverse=True)[0].tokens
return result
@timer(module='doing prediction')
def predict(self, text, tokenize=True, beam_search=True):
"""Generate summary.
Args:
text (str or list): Source.
tokenize (bool, optional):
Whether to do tokenize or not. Defaults to True.
beam_search (bool, optional):
Whether to use beam search or not.
Defaults to True (means using greedy search).
Returns:
str: The final summary.
"""
if isinstance(text, str) and tokenize:
text = list(jieba.cut(text))
x, oov = source2ids(text, self.vocab)
x = torch.tensor(x).to(self.DEVICE)
len_oovs = torch.tensor([len(oov)]).to(self.DEVICE)
x_padding_masks = torch.ne(x, 0).byte().float()
if beam_search:
summary = self.beam_search(x.unsqueeze(0),
max_sum_len=config.max_dec_steps,
beam_width=config.beam_size,
len_oovs=len_oovs,
x_padding_masks=x_padding_masks)
else:
summary = self.greedy_search(x.unsqueeze(0),
max_sum_len=config.max_dec_steps,
len_oovs=len_oovs,
x_padding_masks=x_padding_masks)
# 输出summary中词index到词的转换
summary = outputids2words(summary, oov, self.vocab)
# <SOS>和<EOS>不显示出来
return summary.replace('<SOS>', '').replace('<EOS>', '').strip()
if __name__ == "__main__":
pred = Predict()
print('vocab_size: ', len(pred.vocab))
# 从测试集中随机选取一个样本进行预测
with open(config.test_data_path, 'r') as test:
picked = random.choice(list(test))
source, ref = picked.strip().split('<sep>')
print('source: ', source, '\n')
greedy_prediction = pred.predict(source.split(), beam_search=False)
print('greedy: ', greedy_prediction, '\n')
beam_prediction = pred.predict(source.split(), beam_search=True)
print('beam: ', beam_prediction, '\n')
print('reference: ', ref, '\n')
| 38.087774
| 85
| 0.535885
| 1,336
| 12,150
| 4.701347
| 0.241766
| 0.030091
| 0.028976
| 0.009553
| 0.263652
| 0.251234
| 0.244865
| 0.219073
| 0.184047
| 0.152205
| 0
| 0.007599
| 0.382634
| 12,150
| 319
| 86
| 38.087774
| 0.829756
| 0.2893
| 0
| 0.251534
| 0
| 0
| 0.01507
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030675
| false
| 0
| 0.06135
| 0
| 0.122699
| 0.030675
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd1ed841552b8b3a90cb7777b80332b35c886661
| 7,621
|
py
|
Python
|
PySyft_dev/FL_BC/cryptolib/wrapper_pyca.py
|
samuelxu999/FederatedLearning_dev
|
354d951c53ee20eb41bf7980210d61b7a358d341
|
[
"MIT"
] | null | null | null |
PySyft_dev/FL_BC/cryptolib/wrapper_pyca.py
|
samuelxu999/FederatedLearning_dev
|
354d951c53ee20eb41bf7980210d61b7a358d341
|
[
"MIT"
] | 2
|
2021-03-17T23:27:00.000Z
|
2021-03-17T23:27:01.000Z
|
PySyft_dev/FL_BC/cryptolib/wrapper_pyca.py
|
samuelxu999/FederatedLearning_dev
|
354d951c53ee20eb41bf7980210d61b7a358d341
|
[
"MIT"
] | 2
|
2019-04-23T22:13:18.000Z
|
2019-08-19T01:39:51.000Z
|
'''
========================
Wrapper_pyca module
========================
Created on Nov.7, 2017
@author: Xu Ronghua
@Email: rxu22@binghamton.edu
@TaskDescription: This module provide cryptography function based on pyca API.
@Reference:https://cryptography.io/en/latest/
'''
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec, dsa
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat, PrivateFormat, BestAvailableEncryption
from cryptography.hazmat.primitives import serialization
from cryptography.exceptions import InvalidSignature
class Crypto_DSA(object):
'''
Generate key pairs as json fromat
@in: key_size
@out: key_pairs={'private_key':x,'public_key':{'y':y,'p':p,'q':q,'g':g}}
'''
@staticmethod
def generate_key_pairs(key_size=1024):
#define key_pairs dictionary
key_pairs={}
#generate private key
private_key = dsa.generate_private_key(key_size=key_size, backend=default_backend())
private_number=private_key.private_numbers()
#add private key value - x
key_pairs['private_key']=private_number.x
#get private key from private_key
public_key = private_key.public_key()
#get public number
public_numbers=public_key.public_numbers()
y=public_numbers.y
p=public_numbers.parameter_numbers.p
q=public_numbers.parameter_numbers.q
g=public_numbers.parameter_numbers.g
#add public_key_numbers value - y, p, q, g
public_keys_numbers={'y':y, 'p':p, 'q':q, 'g':g}
key_pairs['public_key']=public_keys_numbers
return key_pairs
'''
Display out key pairs data on screen
@in: key_pairs={'private_key':x,'public_key':{'y':y,'p':p,'q':q,'g':g}}
@out: print out key pairs data on screen
'''
@staticmethod
def display_key_pairs(key_pairs):
print("private key value x:%d" %(key_pairs['private_key']))
public_keys_numbers=key_pairs['public_key']
print("public key value y:%d" %(public_keys_numbers['y']))
print("public key value p:%d" %(public_keys_numbers['p']))
print("public key value q:%d" %(public_keys_numbers['q']))
print("public key value g:%d" %(public_keys_numbers['g']))
'''
Get public key object given public key numbers
@in: public_key_numbers={'public_key':{'y':y,'p':p,'q':q,'g':g}}
@out: public_key object
'''
@staticmethod
def get_public_key(public_key_numbers):
y=public_key_numbers['y']
p=public_key_numbers['p']
q=public_key_numbers['q']
g=public_key_numbers['g']
#construct public key based on public_key_numbers
parameter_numbers=dsa.DSAParameterNumbers(p,q,g)
publick_number=dsa.DSAPublicNumbers(y,parameter_numbers)
public_key=publick_number.public_key(default_backend())
#print(publick_number)
return public_key
'''
Get private key object given private key numbers
@in: private_key_numbers={'publicprivate_key':x}
@in: public_key_numbers={'public_key':{'y':y,'p':p,'q':q,'g':g}}
@out: private_key object
'''
@staticmethod
def get_private_key(x, public_key_numbers):
#reconstruct private key
private_numbers=dsa.DSAPrivateNumbers(x, public_key_numbers)
#construct private_key based on private_numbers
private_key=private_numbers.private_key(default_backend())
return private_key
'''
Generate signature by signing data
@in: private_key object
@in: sign_data
@out: signature
'''
@staticmethod
def sign(private_key, sign_data):
signature=private_key.sign(sign_data,hashes.SHA256())
return signature
'''
Verify signature by using public_key
@in: public_key object
@in: signature
@in: sign_data
@out: True or False
'''
@staticmethod
def verify(public_key, signature, sign_data):
try:
public_key.verify(signature, sign_data, hashes.SHA256())
except InvalidSignature:
return False
except:
return False
return True
'''
Generate public key bytes
@in: public_key object
@in: encoding- Encoding.PEM or Encoding.DER
@out: public_key_bytes
'''
@staticmethod
def get_public_key_bytes(public_key, encoding=Encoding.PEM):
public_key_bytes=public_key.public_bytes(encoding, PublicFormat.SubjectPublicKeyInfo)
return public_key_bytes
'''
Generate public_key object by loading public key bytes
@in: public_key_bytes
@in: encoding- Encoding.PEM or Encoding.DER
@out: public_key object
'''
@staticmethod
def load_public_key_bytes(public_key_bytes,encoding=Encoding.PEM):
if(encoding==Encoding.PEM):
public_key=serialization.load_pem_public_key(public_key_bytes, default_backend())
elif(encoding==Encoding.DER):
public_key=serialization.load_der_public_key(public_key_bytes, default_backend())
else:
public_key=''
return public_key
'''
Generate private key bytes
@in: private_key object
@in: encryp_pw- password for encryption private_key_bytes
@in: encoding- Encoding.PEM or Encoding.DER
@in: private_format- PrivateFormat.PKCS8 or PrivateFormat.TraditionalOpenSSL
@out: private_key_bytes
'''
@staticmethod
def get_private_key_bytes(private_key, encryp_pw=b'rootpasswd', encoding=Encoding.PEM, private_format=PrivateFormat.PKCS8):
private_key_bytes=private_key.private_bytes(encoding, private_format, BestAvailableEncryption(bytes(encryp_pw)))
return private_key_bytes
'''
Generate private_key object by loading public key bytes
@in: private_key_bytes
@in: encryp_pw- password for encryption private_key_bytes
@in: encoding- Encoding.PEM or Encoding.DER
@out: private_key object
'''
@staticmethod
def load_private_key_bytes(private_key_bytes, encryp_pw=b'rootpasswd', encoding=Encoding.PEM):
if(encoding==Encoding.PEM):
private_key=serialization.load_pem_private_key(private_key_bytes, encryp_pw, default_backend())
elif(encoding==Encoding.DER):
private_key=serialization.load_der_private_key(private_key_bytes, encryp_pw, default_backend())
else:
private_key=''
return private_key
'''
Save key bytes data in key_file
@in: key_bytes
@in: key_file
'''
@staticmethod
def save_key_bytes(key_bytes, key_file):
fname = open(key_file, 'w')
fname.write("%s" %(key_bytes.decode(encoding='UTF-8')))
fname.close()
'''
Load key bytes data from key_file
@in: key_file
@out: key_bytes
'''
@staticmethod
def load_key_bytes(key_file):
fname = open(key_file, 'r')
key_bytes=fname.read().encode(encoding='UTF-8')
fname.close()
return key_bytes
# Message digests (Hashing) related function
class Crypto_Hash(object):
'''
Generate hash value given input data
@in: byte_data
@out: hashed_value
'''
@staticmethod
def generate_hash(byte_data):
#new digest hash instance
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
# apply hash function to data block
digest.update(byte_data)
# Finalize the current context and return the message digest as bytes.
hash_block=digest.finalize()
return hash_block
'''
verify hash value of given input data
@in: hash_data
@in: byte_data
@out: hashed_value
'''
@staticmethod
def verify_hash(hash_data, byte_data):
#new digest hash instance
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
# apply hash function to data block
digest.update(byte_data)
# Finalize the current context and return the message digest as bytes.
hash_block=digest.finalize()
return hash_data==hash_block
'''
Get all dataset
'''
def test_func():
hash_value=Crypto_Hash.generate_hash(b'samuel')
print(Crypto_Hash.verify_hash(hash_value, b'samuel'))
pass
if __name__ == "__main__":
test_func()
pass
| 28.543071
| 124
| 0.746228
| 1,081
| 7,621
| 5.009251
| 0.152636
| 0.098061
| 0.035457
| 0.003693
| 0.384303
| 0.297507
| 0.253924
| 0.214958
| 0.189474
| 0.156233
| 0
| 0.004105
| 0.136859
| 7,621
| 266
| 125
| 28.650376
| 0.819094
| 0.142895
| 0
| 0.310345
| 0
| 0
| 0.045542
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12931
| false
| 0.034483
| 0.060345
| 0
| 0.327586
| 0.051724
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd1f85e853fc4ae8cfcfa14f28add26fec35c361
| 693
|
py
|
Python
|
src/utils/formatter.py
|
RuhuiCheng/ladybug
|
fa9e1ea660dd040d3ecfde96ad6c4db67df9bcb9
|
[
"Apache-2.0"
] | 4
|
2020-03-14T10:43:29.000Z
|
2020-09-23T11:15:44.000Z
|
src/utils/formatter.py
|
RuhuiCheng/ladybug
|
fa9e1ea660dd040d3ecfde96ad6c4db67df9bcb9
|
[
"Apache-2.0"
] | null | null | null |
src/utils/formatter.py
|
RuhuiCheng/ladybug
|
fa9e1ea660dd040d3ecfde96ad6c4db67df9bcb9
|
[
"Apache-2.0"
] | null | null | null |
import logging
import json
from src.utils.ucm import app_id, env
class JsonLogFormatter(logging.Formatter):
def format(self, record):
msg = ''
if record.exc_text is None:
msg = record.message
else:
msg = record.exc_text
data = {
'app_id': ''+app_id+'',
'asctime': ''+record.asctime+'',
'env': ''+env+'',
'file_name': ''+record.filename+'',
'func_name': ''+record.funcName+'',
'level': ''+record.levelname+'',
'line_number': record.lineno,
'message': ''+msg+''
}
string_msg = json.dumps(data)
return string_msg
| 26.653846
| 47
| 0.506494
| 70
| 693
| 4.871429
| 0.571429
| 0.043988
| 0.076246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.340548
| 693
| 25
| 48
| 27.72
| 0.746171
| 0
| 0
| 0
| 0
| 0
| 0.082251
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd2300aac8a3080e89edc939e28aa0516c80f6a3
| 4,909
|
py
|
Python
|
wotpy/wot/dictionaries/thing.py
|
JKRhb/wot-py
|
3eaa780189b686c82b7dbdea404fd8077bd3c9f9
|
[
"MIT"
] | null | null | null |
wotpy/wot/dictionaries/thing.py
|
JKRhb/wot-py
|
3eaa780189b686c82b7dbdea404fd8077bd3c9f9
|
[
"MIT"
] | null | null | null |
wotpy/wot/dictionaries/thing.py
|
JKRhb/wot-py
|
3eaa780189b686c82b7dbdea404fd8077bd3c9f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Wrapper class for dictionaries to represent Things.
"""
import six
from wotpy.wot.dictionaries.base import WotBaseDict
from wotpy.wot.dictionaries.interaction import PropertyFragmentDict, ActionFragmentDict, EventFragmentDict
from wotpy.wot.dictionaries.link import LinkDict
from wotpy.wot.dictionaries.security import SecuritySchemeDict
from wotpy.utils.utils import to_camel
from wotpy.wot.dictionaries.version import VersioningDict
from wotpy.wot.enums import SecuritySchemeType
class ThingFragment(WotBaseDict):
"""ThingFragment is a wrapper around a dictionary that contains properties
representing semantic metadata and interactions (Properties, Actions and Events).
It is used for initializing an internal representation of a Thing Description,
and it is also used in ThingFilter."""
class Meta:
fields = {
"id",
"version",
"name",
"description",
"support",
"created",
"lastModified",
"base",
"properties",
"actions",
"events",
"links",
"security"
}
required = {
"id"
}
fields_readonly = [
"id"
]
fields_str = [
"name",
"description",
"support",
"created",
"lastModified",
"base"
]
fields_dict = [
"properties",
"actions",
"events"
]
fields_list = [
"links",
"security"
]
fields_instance = [
"version"
]
assert set(fields_readonly + fields_str + fields_dict + fields_list + fields_instance) == fields
def __setattr__(self, name, value):
"""Checks to see if the attribute that is being set is a
Thing fragment property and updates the internal dict."""
name_camel = to_camel(name)
if name_camel not in self.Meta.fields:
return super(ThingFragment, self).__setattr__(name, value)
if name_camel in self.Meta.fields_readonly:
raise AttributeError("Can't set attribute {}".format(name))
if name_camel in self.Meta.fields_str:
self._init[name_camel] = value
return
if name_camel in self.Meta.fields_dict:
self._init[name_camel] = {key: val.to_dict() for key, val in six.iteritems(value)}
return
if name_camel in self.Meta.fields_list:
self._init[name_camel] = [item.to_dict() for item in value]
return
if name_camel in self.Meta.fields_instance:
self._init[name_camel] = value.to_dict()
return
@property
def name(self):
"""The name of the Thing.
This property returns the ID if the name is undefined."""
return self._init.get("name", self.id)
@property
def security(self):
"""Set of security configurations, provided as an array,
that must all be satisfied for access to resources at or
below the current level, if not overridden at a lower level.
A default nosec security scheme will be provided if none are defined."""
if "security" not in self._init:
return [SecuritySchemeDict.build({"scheme": SecuritySchemeType.NOSEC})]
return [SecuritySchemeDict.build(item) for item in self._init.get("security")]
@property
def properties(self):
"""The properties optional attribute represents a dict with keys
that correspond to Property names and values of type PropertyFragment."""
return {
key: PropertyFragmentDict(val)
for key, val in six.iteritems(self._init.get("properties", {}))
}
@property
def actions(self):
"""The actions optional attribute represents a dict with keys
that correspond to Action names and values of type ActionFragment."""
return {
key: ActionFragmentDict(val)
for key, val in six.iteritems(self._init.get("actions", {}))
}
@property
def events(self):
"""The events optional attribute represents a dictionary with keys
that correspond to Event names and values of type EventFragment."""
return {
key: EventFragmentDict(val)
for key, val in six.iteritems(self._init.get("events", {}))
}
@property
def links(self):
"""The links optional attribute represents an array of Link objects."""
return [LinkDict(item) for item in self._init.get("links", [])]
@property
def version(self):
"""Provides version information."""
return VersioningDict(self._init.get("version")) if self._init.get("version") else None
| 29.932927
| 106
| 0.606233
| 546
| 4,909
| 5.355311
| 0.274725
| 0.035568
| 0.030096
| 0.032832
| 0.230848
| 0.188782
| 0.150137
| 0.115253
| 0.115253
| 0.076265
| 0
| 0.000293
| 0.303728
| 4,909
| 163
| 107
| 30.116564
| 0.855178
| 0.259931
| 0
| 0.333333
| 0
| 0
| 0.077648
| 0
| 0
| 0
| 0
| 0
| 0.009804
| 1
| 0.078431
| false
| 0
| 0.078431
| 0
| 0.303922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd25b254cf6453ad21e303d8fb8dc65ace25ddf6
| 1,131
|
py
|
Python
|
src/models/losses/corr_loss.py
|
yewzijian/RegTR
|
64e5b3f0ccc1e1a11b514eb22734959d32e0cec6
|
[
"MIT"
] | 25
|
2022-03-28T06:26:16.000Z
|
2022-03-30T14:21:24.000Z
|
src/models/losses/corr_loss.py
|
yewzijian/RegTR
|
64e5b3f0ccc1e1a11b514eb22734959d32e0cec6
|
[
"MIT"
] | null | null | null |
src/models/losses/corr_loss.py
|
yewzijian/RegTR
|
64e5b3f0ccc1e1a11b514eb22734959d32e0cec6
|
[
"MIT"
] | 2
|
2022-03-29T09:37:50.000Z
|
2022-03-30T06:26:35.000Z
|
import torch
import torch.nn as nn
from utils.se3_torch import se3_transform_list
_EPS = 1e-6
class CorrCriterion(nn.Module):
"""Correspondence Loss.
"""
def __init__(self, metric='mae'):
super().__init__()
assert metric in ['mse', 'mae']
self.metric = metric
def forward(self, kp_before, kp_warped_pred, pose_gt, overlap_weights=None):
losses = {}
B = pose_gt.shape[0]
kp_warped_gt = se3_transform_list(pose_gt, kp_before)
corr_err = torch.cat(kp_warped_pred, dim=0) - torch.cat(kp_warped_gt, dim=0)
if self.metric == 'mae':
corr_err = torch.sum(torch.abs(corr_err), dim=-1)
elif self.metric == 'mse':
corr_err = torch.sum(torch.square(corr_err), dim=-1)
else:
raise NotImplementedError
if overlap_weights is not None:
overlap_weights = torch.cat(overlap_weights)
mean_err = torch.sum(overlap_weights * corr_err) / torch.clamp_min(torch.sum(overlap_weights), _EPS)
else:
mean_err = torch.mean(corr_err, dim=1)
return mean_err
| 27.585366
| 112
| 0.625111
| 155
| 1,131
| 4.270968
| 0.380645
| 0.074018
| 0.072508
| 0.049849
| 0.060423
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013221
| 0.264368
| 1,131
| 40
| 113
| 28.275
| 0.782452
| 0.017683
| 0
| 0.076923
| 0
| 0
| 0.013649
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 1
| 0.076923
| false
| 0
| 0.115385
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd26b6dd687da7d2ec0ed40d629b6615e9538af8
| 501
|
py
|
Python
|
application/services/balance_service.py
|
singnet/token-balances-service
|
5e32b11bbad46e9df2820132026ab993935f8049
|
[
"MIT"
] | null | null | null |
application/services/balance_service.py
|
singnet/token-balances-service
|
5e32b11bbad46e9df2820132026ab993935f8049
|
[
"MIT"
] | 1
|
2021-04-07T14:40:02.000Z
|
2021-04-07T14:40:02.000Z
|
application/services/balance_service.py
|
singnet/token-balances-service
|
5e32b11bbad46e9df2820132026ab993935f8049
|
[
"MIT"
] | 3
|
2021-04-07T14:12:00.000Z
|
2021-04-27T07:18:34.000Z
|
from infrastructure.repository.token_snapshot_repo import TokenSnapshotRepo
from http import HTTPStatus
def get_snapshot_by_address(address):
balance = TokenSnapshotRepo().get_token_balance(address)
if balance is None:
data = None
statusCode = HTTPStatus.BAD_REQUEST.value
message = "Address not found in snapshot"
else:
data = balance
statusCode = HTTPStatus.OK.value
message = HTTPStatus.OK.phrase
return statusCode, message, data
| 27.833333
| 75
| 0.718563
| 56
| 501
| 6.285714
| 0.553571
| 0.113636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223553
| 501
| 17
| 76
| 29.470588
| 0.904884
| 0
| 0
| 0
| 0
| 0
| 0.057884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd272efee44a376502bf4522d14dd1625b93c91b
| 5,015
|
py
|
Python
|
vaccine_allocation/TN_proj.py
|
COVID-IWG/epimargin-studies
|
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
|
[
"MIT"
] | null | null | null |
vaccine_allocation/TN_proj.py
|
COVID-IWG/epimargin-studies
|
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
|
[
"MIT"
] | null | null | null |
vaccine_allocation/TN_proj.py
|
COVID-IWG/epimargin-studies
|
7d4a78e2e6713c6a0aea2cd2440529153e9a635d
|
[
"MIT"
] | null | null | null |
from typing import Callable, Tuple
from epimargin.models import SIR
import pandas as pd
from epimargin.estimators import analytical_MPVS
from epimargin.etl.covid19india import data_path, get_time_series, load_all_data
import epimargin.plots as plt
from epimargin.smoothing import notched_smoothing
from epimargin.utils import cwd, weeks
from studies.vaccine_allocation.commons import *
from studies.vaccine_allocation.epi_simulations import *
from tqdm import tqdm
# model details
CI = 0.95
smoothing = 7
root = cwd()
data = root/"data"
figs = root/"figs"
data.mkdir(exist_ok=True)
figs.mkdir(exist_ok=True)
# define data versions for api files
paths = {
"v3": [data_path(i) for i in (1, 2)],
"v4": [data_path(i) for i in range(3, 26)]
}
for target in paths['v3'] + paths['v4']:
try:
download_data(data, target)
except:
pass
df = load_all_data(
v3_paths = [data/filepath for filepath in paths['v3']],
v4_paths = [data/filepath for filepath in paths['v4']]
)
# cutoff = None
# cutoff = "April 7, 2021"
cutoff = "April 14, 2021"
if cutoff:
df = df[df.date_announced <= cutoff]
data_recency = str(df["date_announced"].max()).split()[0]
run_date = str(pd.Timestamp.now()).split()[0]
ts = get_time_series(
df[df.detected_state == "Tamil Nadu"],
["detected_state", "detected_district"]
)\
.drop(columns = ["date", "time", "delta", "logdelta"])\
.rename(columns = {
"Deceased": "dD",
"Hospitalized": "dT",
"Recovered": "dR"
}).droplevel(0)\
.drop(labels = ["Other State", "Railway Quarantine", "Airport Quarantine"])
district_estimates = []
simulation_initial_conditions = pd.read_csv(data/f"all_india_coalesced_initial_conditions{simulation_start.strftime('%b%d')}.csv")\
.drop(columns = ["Unnamed: 0"])\
.set_index(["state", "district"])\
.loc["Tamil Nadu"]
def setup(district) -> Tuple[Callable[[str], SIR], pd.DataFrame]:
demographics = simulation_initial_conditions.loc[district]
dR_conf = ts.loc[district].dR
dR_conf = dR_conf.reindex(pd.date_range(dR_conf.index.min(), dR_conf.index.max()), fill_value = 0)
dR_conf_smooth = pd.Series(smooth(dR_conf), index = dR_conf.index).clip(0).astype(int)
R_conf_smooth = dR_conf_smooth.cumsum().astype(int)
R0 = R_conf_smooth[data_recency]
dD_conf = ts.loc[district].dD
dD_conf = dD_conf.reindex(pd.date_range(dD_conf.index.min(), dD_conf.index.max()), fill_value = 0)
dD_conf_smooth = pd.Series(smooth(dD_conf), index = dD_conf.index).clip(0).astype(int)
D_conf_smooth = dD_conf_smooth.cumsum().astype(int)
D0 = D_conf_smooth[data_recency]
dT_conf = ts.loc[district].dT
dT_conf = dT_conf.reindex(pd.date_range(dT_conf.index.min(), dT_conf.index.max()), fill_value = 0)
(
dates,
Rt_pred, Rt_CI_upper, Rt_CI_lower,
T_pred, T_CI_upper, T_CI_lower,
total_cases, new_cases_ts,
*_
) = analytical_MPVS(ts.loc[district].dT, CI = CI, smoothing = notched_smoothing(window = smoothing), totals = False)
Rt_estimates = pd.DataFrame(data = {
"dates" : dates,
"Rt_pred" : Rt_pred,
"Rt_CI_upper" : Rt_CI_upper,
"Rt_CI_lower" : Rt_CI_lower,
"T_pred" : T_pred,
"T_CI_upper" : T_CI_upper,
"T_CI_lower" : T_CI_lower,
"total_cases" : total_cases[2:],
"new_cases_ts": new_cases_ts,
})
dT_conf_smooth = pd.Series(smooth(dT_conf), index = dT_conf.index).clip(0).astype(int)
T_conf_smooth = dT_conf_smooth.cumsum().astype(int)
T0 = T_conf_smooth[data_recency]
dT0 = dT_conf_smooth[data_recency]
S0 = max(0, demographics.N_tot - T0)
I0 = max(0, T0 - R0 - D0)
return (
lambda seed = 0: SIR(
name = district,
mortality = demographics[[f"N_{i}" for i in range(7)]] @ np.array(list(TN_IFRs.values()))/demographics.N_tot,
population = demographics.N_tot,
random_seed = seed,
infectious_period = 10,
S0 = S0,
I0 = I0,
R0 = R0,
D0 = D0,
dT0 = dT0,
Rt0 = Rt_estimates.set_index("dates").loc[data_recency].Rt_pred * demographics.N_tot/S0),
Rt_estimates
)
district_estimates = []
for district in tqdm(simulation_initial_conditions.index.get_level_values(0).unique()):
simulation, Rt_estimates = setup(district)
district_estimates.append(Rt_estimates.assign(district = district))
Rt_estimates.to_csv(data/f"TN_Rt_data_{district}_{data_recency}_run{run_date}.csv")
projections = pd.DataFrame(
np.array(
[simulation(_).run(6 * weeks).dT for _ in range(1000)]
)).astype(int).T\
.set_index(pd.date_range(start = data_recency, freq = "D", periods = 6*weeks + 1))
print(district, projections.mean(axis = 1))
projections.to_csv(data/f"TN_projections/projections_{district}_data{data_recency}_run{run_date}.csv")
| 34.586207
| 131
| 0.656032
| 708
| 5,015
| 4.389831
| 0.269774
| 0.041828
| 0.016731
| 0.027027
| 0.220721
| 0.129987
| 0.045045
| 0
| 0
| 0
| 0
| 0.019652
| 0.208574
| 5,015
| 144
| 132
| 34.826389
| 0.763416
| 0.017348
| 0
| 0.017094
| 0
| 0
| 0.106439
| 0.041641
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008547
| false
| 0.008547
| 0.094017
| 0
| 0.111111
| 0.008547
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd28575d99501b8ab89e76a54053a882db38d79c
| 1,514
|
py
|
Python
|
backend/db/test/id_allocator_test.py
|
xuantan/viewfinder
|
992209086d01be0ef6506f325cf89b84d374f969
|
[
"Apache-2.0"
] | 645
|
2015-01-03T02:03:59.000Z
|
2021-12-03T08:43:16.000Z
|
backend/db/test/id_allocator_test.py
|
hoowang/viewfinder
|
9caf4e75faa8070d85f605c91d4cfb52c4674588
|
[
"Apache-2.0"
] | null | null | null |
backend/db/test/id_allocator_test.py
|
hoowang/viewfinder
|
9caf4e75faa8070d85f605c91d4cfb52c4674588
|
[
"Apache-2.0"
] | 222
|
2015-01-07T05:00:52.000Z
|
2021-12-06T09:54:26.000Z
|
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Tests for IdAllocator data object.
"""
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
import unittest
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.id_allocator import IdAllocator
from base_test import DBBaseTestCase
class IdAllocatorTestCase(DBBaseTestCase):
@async_test
def testCreate(self):
alloc = IdAllocator('type', 13)
num_ids = 3000
def _OnAllocated(ids):
id_set = set(ids)
assert len(id_set) == num_ids
self.stop()
with util.ArrayBarrier(_OnAllocated) as b:
[alloc.NextId(self._client, callback=b.Callback()) for i in xrange(num_ids)]
@async_test
def testMultiple(self):
"""Tests that multiple allocations from the same sequence do
not overlap.
"""
allocs = [IdAllocator('type'), IdAllocator('type')]
num_ids = 3000
def _OnAllocated(id_lists):
assert len(id_lists) == 2
id_set1 = set(id_lists[0])
id_set2 = set(id_lists[1])
assert len(id_set1) == 3000
assert len(id_set2) == 3000
assert id_set1.isdisjoint(id_set2)
self.stop()
with util.ArrayBarrier(_OnAllocated) as b:
with util.ArrayBarrier(b.Callback()) as b1:
[allocs[0].NextId(self._client, b1.Callback()) for i in xrange(num_ids)]
with util.ArrayBarrier(b.Callback()) as b2:
[allocs[1].NextId(self._client, b2.Callback()) for i in xrange(num_ids)]
| 30.28
| 82
| 0.694188
| 205
| 1,514
| 4.960976
| 0.370732
| 0.035398
| 0.043265
| 0.041298
| 0.267453
| 0.220256
| 0.159292
| 0.082596
| 0
| 0
| 0
| 0.030303
| 0.193527
| 1,514
| 49
| 83
| 30.897959
| 0.802621
| 0.104359
| 0
| 0.235294
| 0
| 0
| 0.041199
| 0.018727
| 0
| 0
| 0
| 0
| 0.147059
| 1
| 0.117647
| false
| 0
| 0.147059
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd2928863b82fbf5dba0596d90335b5ef6bbbb9b
| 2,429
|
py
|
Python
|
ayame/link.py
|
hattya/ayame
|
e8bb2b0ace79cd358b1384270cb9c5e809e12b5d
|
[
"MIT"
] | 1
|
2022-03-05T03:21:13.000Z
|
2022-03-05T03:21:13.000Z
|
ayame/link.py
|
hattya/ayame
|
e8bb2b0ace79cd358b1384270cb9c5e809e12b5d
|
[
"MIT"
] | 1
|
2021-08-25T13:41:34.000Z
|
2021-08-25T13:41:34.000Z
|
ayame/link.py
|
hattya/ayame
|
e8bb2b0ace79cd358b1384270cb9c5e809e12b5d
|
[
"MIT"
] | 1
|
2018-03-04T21:47:27.000Z
|
2018-03-04T21:47:27.000Z
|
#
# ayame.link
#
# Copyright (c) 2012-2021 Akinori Hattori <hattya@gmail.com>
#
# SPDX-License-Identifier: MIT
#
import urllib.parse
from . import core, markup, uri, util
from . import model as mm
from .exception import ComponentError
__all__ = ['Link', 'ActionLink', 'PageLink']
# HTML elements
_A = markup.QName(markup.XHTML_NS, 'a')
_LINK = markup.QName(markup.XHTML_NS, 'link')
_AREA = markup.QName(markup.XHTML_NS, 'area')
_SCRIPT = markup.QName(markup.XHTML_NS, 'script')
_STYLE = markup.QName(markup.XHTML_NS, 'style')
# HTML attributes
_HREF = markup.QName(markup.XHTML_NS, 'href')
_SRC = markup.QName(markup.XHTML_NS, 'src')
class Link(core.MarkupContainer):
def __init__(self, id, model=None):
if isinstance(model, str):
model = mm.Model(model)
super().__init__(id, model)
def on_render(self, element):
# modify attribute
attr = None
if element.qname in (_A, _LINK, _AREA):
attr = _HREF
elif element.qname in (_SCRIPT, _STYLE):
attr = _SRC
if attr is not None:
uri = self.new_uri(element.attrib.get(attr))
if uri is None:
if attr in element.attrib:
del element.attrib[attr]
else:
element.attrib[attr] = uri
# replace children by model object
body = self.model_object_as_string()
if body:
element[:] = (body,)
# render link
return super().on_render(element)
def new_uri(self, uri):
return uri
class ActionLink(Link):
def on_fire(self):
self.on_click()
def new_uri(self, _):
query = self.request.query.copy()
query[core.AYAME_PATH] = [self.path()]
environ = self.environ.copy()
environ['QUERY_STRING'] = urllib.parse.urlencode(query, doseq=True)
return uri.request_uri(environ, True)
def on_click(self):
pass
class PageLink(Link):
def __init__(self, id, page, values=None, anchor=''):
super().__init__(id, None)
if (not issubclass(page, core.Page)
or page is core.Page):
raise ComponentError(self, f"'{util.fqon_of(page)}' is not a subclass of Page")
self._page = page
self._values = values
self._anchor = anchor
def new_uri(self, uri):
return self.uri_for(self._page, self._values, self._anchor)
| 26.692308
| 91
| 0.611774
| 311
| 2,429
| 4.569132
| 0.315113
| 0.054187
| 0.083744
| 0.108374
| 0.149191
| 0.030964
| 0
| 0
| 0
| 0
| 0
| 0.004507
| 0.269247
| 2,429
| 90
| 92
| 26.988889
| 0.796056
| 0.079868
| 0
| 0.034483
| 0
| 0
| 0.049055
| 0.009901
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0.017241
| 0.068966
| 0.034483
| 0.327586
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd2b8d0943d4247577bcc13dba218fa49f1ddda9
| 5,775
|
py
|
Python
|
classes.py
|
mattjoman/deep-RL-snake
|
c1b48ef3cb7ac0ad068887df1f60bc83a626f9d6
|
[
"MIT"
] | null | null | null |
classes.py
|
mattjoman/deep-RL-snake
|
c1b48ef3cb7ac0ad068887df1f60bc83a626f9d6
|
[
"MIT"
] | null | null | null |
classes.py
|
mattjoman/deep-RL-snake
|
c1b48ef3cb7ac0ad068887df1f60bc83a626f9d6
|
[
"MIT"
] | null | null | null |
import pygame
import numpy as np
import random
import torch
from torch import nn
from torch.nn import functional as F
class CNN(torch.nn.Module):
def __init__(self):
super(CNN, self).__init__()
torch.manual_seed(50)
self.layer1 = nn.Sequential(
# input: (1, 1, 10, 10)
# output: (1, 8, 18, 18)
nn.Conv2d(3, 32, (3, 3), stride=1),
nn.ReLU())
self.layer2 = nn.Sequential(
# input: (8, 8, 8, 8)
# output: (8, 8, 6, 6)
nn.Conv2d(32, 64, (3, 3), stride=1),
nn.ReLU())
self.layer3 = nn.Sequential(
# input: (8, 8, 6, 6)
# output: (8, 8, 4, 4)
nn.Conv2d(64, 32, (3, 3), stride=1),
nn.ReLU())
self.layer4 = nn.Sequential(
# input: (32*4*4)
nn.Linear(512, 128, bias=True),
nn.ReLU())
self.layer5 = nn.Sequential(
nn.Linear(128, 4, bias=True))
#self.optimiser = torch.optim.SGD(self.parameters(), lr=1)
self.optimiser = torch.optim.Adam(self.parameters(), lr=1)
def forward(self, x):
out = self.layer1(x.to(torch.float32))
out = self.layer2(out)
out = self.layer3(out)
out = out.view(out.size(0), -1) # flatten
out = self.layer4(out)
out = self.layer5(out)
#print(out)
return out
class Snake():
def __init__(self, rows=10, columns=10):
self.direction = 3
self.init_body(rows, columns)
self.apple = False
self.score = 0
self.timestep_counter = 0
def add_to_body(self):
if self.direction == 2:
new_head = [self.body[0][0] - 1, self.body[0][1]]
elif self.direction == 3:
new_head = [self.body[0][0] + 1, self.body[0][1]]
elif self.direction == 0:
new_head = [self.body[0][0], self.body[0][1] - 1]
else:
new_head = [self.body[0][0], self.body[0][1] + 1]
self.body.insert(0, new_head)
return
def remove_from_body(self):
del self.body[-1]
return
def move(self):
self.add_to_body()
self.timestep_counter += 1
if not self.apple:
self.remove_from_body()
else:
self.apple = False
return
def eat_apple(self):
self.apple = True
self.score += 1
return
def init_score(self):
self.score = 0
return
def init_timestep_counter(self):
self.timestep_counter = 0
return
def init_body(self, rows, columns):
self.body = [[np.random.randint(1, rows-1), np.random.randint(1, columns-1)]]
return
class Player(Snake):
def set_direction(self, keys):
if keys[pygame.K_LEFT]:
self.direction = 0 # left
elif keys[pygame.K_RIGHT]:
self.direction = 1 # right
elif keys[pygame.K_UP]:
self.direction = 2 # up
elif keys[pygame.K_DOWN]:
self.direction = 3 # down
return
class AI(Snake):
def __init__(self):
super().__init__()
self.epsilon = 0.1
self.gamma = 0.3
self.Q_net = CNN()
self.target_Q_net = self.Q_net
self.replay_mem = []
self.replay_mem_limit = 500
self.batch_size = 64
self.game_count = 0
def set_direction(self, state):
Q_vals = self.Q_net.forward(torch.from_numpy(state))
self.direction, _ = self.select_action(Q_vals)
return
def select_action(self, Q_vals):
""" Returns the action selected and Q vals for each action """
max_ = Q_vals.max().item()
for i in range(4):
if Q_vals[0][i].item() == max_:
greedy_direction = i
random_num = np.random.uniform(0, 1)
self.epsilon = 1 / (self.game_count ** (1/2.5))
if random_num > self.epsilon:
return greedy_direction, max_
else:
return np.random.random_integers(0, 3), Q_vals[0][i].item()
def learn_from_mem(self):
if self.timestep_counter % 5 == 0:
self.target_Q_net = self.Q_net
if len(self.replay_mem) < self.batch_size:
return
for b in range(self.batch_size):
mem = self.select_mem()
reward = mem[2]
Q_0_vals = self.Q_net.forward(torch.from_numpy(mem[0]))
Q_1_vals = self.target_Q_net.forward(torch.from_numpy(mem[3]))
Q_0 = Q_0_vals[0][mem[1]] # get Q val for the action taken
Q_1 = Q_1_vals.max().detach() # get the maximum Q val for the next state
loss = F.smooth_l1_loss(Q_0, (self.gamma * Q_1) + reward)
self.Q_net.optimiser.zero_grad()
loss.backward()
for param in self.Q_net.parameters():
param.grad.data.clamp_(-1, 1) # do we need to clamp?
self.Q_net.optimiser.step()
return
def update_replay_mem(self, s0, a0, r, s1):
if len(self.replay_mem) >= self.replay_mem_limit:
del self.replay_mem[0]
self.replay_mem.append([s0, a0, r, s1])
return
def select_mem(self):
index = np.random.random_integers(0, len(self.replay_mem)-1)
return self.replay_mem[index]
class Apple():
def __init__(self, rows, columns):
self.set_loc(rows, columns)
def set_loc(self, rows, columns):
self.loc = [random.randint(1, rows-2), random.randint(1, columns-2)]
return
if __name__ == "__main__":
ai = AI()
state = np.random.rand(20, 20)
ai.set_direction(state)
print(ai.direction)
print(ai.body[0])
ai.move()
print(ai.body[0])
| 26.612903
| 86
| 0.546667
| 809
| 5,775
| 3.731768
| 0.201483
| 0.029149
| 0.038755
| 0.019874
| 0.179861
| 0.14475
| 0.135807
| 0.084134
| 0.04836
| 0.04836
| 0
| 0.047424
| 0.324502
| 5,775
| 216
| 87
| 26.736111
| 0.72648
| 0.06684
| 0
| 0.222222
| 0
| 0
| 0.001491
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.124183
| false
| 0
| 0.039216
| 0
| 0.30719
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd34c031db159b934c285da9deacefad0961aecf
| 762
|
py
|
Python
|
src/server/alembic/versions/6b8cf99be000_add_user_journal_table.py
|
princessruthie/paws-data-pipeline
|
6f7095f99b9ad31b0171b256cf18849d63445c91
|
[
"MIT"
] | 27
|
2019-11-20T20:20:30.000Z
|
2022-01-31T17:24:55.000Z
|
src/server/alembic/versions/6b8cf99be000_add_user_journal_table.py
|
mrcrnkovich/paws-data-pipeline
|
7c0bd4c5f23276f541611cb564f2f5abbb6b9887
|
[
"MIT"
] | 348
|
2019-11-26T20:34:02.000Z
|
2022-02-27T20:28:20.000Z
|
src/server/alembic/versions/6b8cf99be000_add_user_journal_table.py
|
mrcrnkovich/paws-data-pipeline
|
7c0bd4c5f23276f541611cb564f2f5abbb6b9887
|
[
"MIT"
] | 20
|
2019-12-03T23:50:33.000Z
|
2022-02-09T18:38:25.000Z
|
"""Add user journal table
Revision ID: 6b8cf99be000
Revises: 36c4ecbfd11a
Create Date: 2020-12-21 15:08:07.784568
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import func
# revision identifiers, used by Alembic.
revision = "6b8cf99be000"
down_revision = "36c4ecbfd11a"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"pdp_user_journal",
sa.Column("_id", sa.Integer, primary_key=True),
sa.Column("stamp", sa.DateTime, nullable=False, server_default=func.now()),
sa.Column("username", sa.String(50), nullable=False),
sa.Column("event_type", sa.String(50)),
sa.Column("detail", sa.String(120)),
)
def downgrade():
op.drop_table('pdp_user_journal')
| 23.090909
| 83
| 0.692913
| 103
| 762
| 5
| 0.582524
| 0.07767
| 0.046602
| 0.073786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08121
| 0.175853
| 762
| 32
| 84
| 23.8125
| 0.738854
| 0.198163
| 0
| 0
| 0
| 0
| 0.145937
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.166667
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd368cbbf1f2713371fc20b46be0df6fde83d872
| 1,906
|
py
|
Python
|
Python/WearherTelegram/weatherbot.py
|
OnCode-channel/OnCode
|
4aa7022932bc5aece39121233b34ebea12063717
|
[
"CC0-1.0"
] | 3
|
2021-11-21T05:09:45.000Z
|
2021-11-21T09:55:02.000Z
|
Python/WearherTelegram/weatherbot.py
|
OnCode-channel/OnCode
|
4aa7022932bc5aece39121233b34ebea12063717
|
[
"CC0-1.0"
] | null | null | null |
Python/WearherTelegram/weatherbot.py
|
OnCode-channel/OnCode
|
4aa7022932bc5aece39121233b34ebea12063717
|
[
"CC0-1.0"
] | 1
|
2022-03-16T20:34:29.000Z
|
2022-03-16T20:34:29.000Z
|
import telebot
from pyowm import OWM
from pyowm.utils.config import get_default_config
bot = telebot.TeleBot("telegram API-key")
@bot.message_handler(commands=['start'])
def welcome(message):
bot.send_message(message.chat.id, 'Добро пожаловать, ' + str(message.from_user.first_name) + ',\n/start - запуск бота\n/help - команды бота\n/credits - автор бота\nЧтобы узнать погоду напишите в чат название города')
@bot.message_handler(commands=['help'])
def help(message):
bot.send_message(message.chat.id, '/start - запуск бота\n/help - команды бота\n/credits - автор бота\nЧтобы узнать погоду напишите в чат название города')
@bot.message_handler(content_types=['text'])
def test(message):
try:
place = message.text
config_dict = get_default_config()
config_dict['language'] = 'ru'
owm = OWM('owm api-key', config_dict)
mgr = owm.weather_manager()
observation = mgr.weather_at_place(place)
w = observation.weather
t = w.temperature("celsius")
t1 = t['temp']
t2 = t['feels_like']
t3 = t['temp_max']
t4 = t['temp_min']
wi = w.wind()['speed']
humi = w.humidity
cl = w.clouds
st = w.status
dt = w.detailed_status
ti = w.reference_time('iso')
pr = w.pressure['press']
vd = w.visibility_distance
bot.send_message(message.chat.id, "В городе " + str(place) + " температура " + str(t1) + " °C" + "\n" +
"Максимальная температура " + str(t3) + " °C" +"\n" +
"Минимальная температура " + str(t4) + " °C" + "\n" +
"Ощущается как" + str(t2) + " °C" + "\n" +
"Скорость ветра " + str(wi) + " м/с" + "\n" +
"Давление " + str(pr) + " мм.рт.ст" + "\n" +
"Влажность " + str(humi) + " %" + "\n" +
"Видимость " + str(vd) + " метров" + "\n" +
"Описание " + str(st) + "\n\n" + str(dt))
except:
bot.send_message(message.chat.id,"Такой город не найден!")
print(str(message.text),"- не найден")
bot.polling(none_stop=True, interval=0)
| 32.862069
| 217
| 0.64638
| 277
| 1,906
| 4.364621
| 0.444043
| 0.02316
| 0.046319
| 0.069479
| 0.281224
| 0.281224
| 0.236559
| 0.180314
| 0.180314
| 0.180314
| 0
| 0.005732
| 0.176285
| 1,906
| 57
| 218
| 33.438596
| 0.761783
| 0
| 0
| 0
| 0
| 0.043478
| 0.304302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.065217
| 0
| 0.130435
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd3c5ef2c1c57128342b4cbe674344dc894fe7e9
| 14,427
|
py
|
Python
|
projectroles/app_settings.py
|
olgabot/sodar_core
|
2a012c962c763fe970261839226e848d752d14d5
|
[
"MIT"
] | null | null | null |
projectroles/app_settings.py
|
olgabot/sodar_core
|
2a012c962c763fe970261839226e848d752d14d5
|
[
"MIT"
] | null | null | null |
projectroles/app_settings.py
|
olgabot/sodar_core
|
2a012c962c763fe970261839226e848d752d14d5
|
[
"MIT"
] | null | null | null |
"""Project and user settings API"""
import json
from projectroles.models import AppSetting, APP_SETTING_TYPES, SODAR_CONSTANTS
from projectroles.plugins import get_app_plugin, get_active_plugins
# SODAR constants
APP_SETTING_SCOPE_PROJECT = SODAR_CONSTANTS['APP_SETTING_SCOPE_PROJECT']
APP_SETTING_SCOPE_USER = SODAR_CONSTANTS['APP_SETTING_SCOPE_USER']
APP_SETTING_SCOPE_PROJECT_USER = SODAR_CONSTANTS[
'APP_SETTING_SCOPE_PROJECT_USER'
]
# Local constants
VALID_SCOPES = [
APP_SETTING_SCOPE_PROJECT,
APP_SETTING_SCOPE_USER,
APP_SETTING_SCOPE_PROJECT_USER,
]
class AppSettingAPI:
@classmethod
def _check_project_and_user(cls, scope, project, user):
"""
Ensure one of the project and user parameters is set.
:param scope: Scope of Setting (USER, PROJECT, PROJECT_USER)
:param project: Project object
:param user: User object
:raise: ValueError if none or both objects exist
"""
if scope == APP_SETTING_SCOPE_PROJECT:
if not project:
raise ValueError('Project unset for setting with project scope')
if user:
raise ValueError('User set for setting with project scope')
elif scope == APP_SETTING_SCOPE_USER:
if project:
raise ValueError('Project set for setting with user scope')
if not user:
raise ValueError('User unset for setting with user scope')
elif scope == APP_SETTING_SCOPE_PROJECT_USER:
if not project:
raise ValueError(
'Project unset for setting with project_user scope'
)
if not user:
raise ValueError(
'User unset for setting with project_user scope'
)
@classmethod
def _check_scope(cls, scope):
"""
Ensure the validity of a scope definition.
:param scope: String
:raise: ValueError if scope is not recognized
"""
if scope not in VALID_SCOPES:
raise ValueError('Invalid scope "{}"'.format(scope))
@classmethod
def _get_json_value(cls, value):
"""
Return JSON value as dict regardless of input type
:param value: Original value (string or dict)
:raise: json.decoder.JSONDecodeError if string value is not valid JSON
:raise: ValueError if value type is not recognized or if value is not
valid JSON
:return: dict
"""
if not value:
return {}
try:
if isinstance(value, str):
return json.loads(value)
else:
json.dumps(value) # Ensure this is valid
return value
except Exception:
raise ValueError('Value is not valid JSON: {}'.format(value))
@classmethod
def _compare_value(cls, setting_obj, input_value):
"""
Compare input value to value in an AppSetting object
:param setting_obj: AppSetting object
:param input_value: Input value (string, int, bool or dict)
:return: Bool
"""
if setting_obj.type == 'JSON':
return setting_obj.value_json == cls._get_json_value(input_value)
elif setting_obj.type == 'BOOLEAN':
# TODO: Also do conversion on input value here if necessary
return bool(int(setting_obj.value)) == input_value
return setting_obj.value == str(input_value)
@classmethod
def get_default_setting(cls, app_name, setting_name, post_safe=False):
"""
Get default setting value from an app plugin.
:param app_name: App name (string, must correspond to "name" in app
plugin)
:param setting_name: Setting name (string)
:param post_safe: Whether a POST safe value should be returned (bool)
:return: Setting value (string, integer or boolean)
:raise: KeyError if nothing is found with setting_name
"""
app_plugin = get_app_plugin(app_name)
if setting_name in app_plugin.app_settings:
if (
post_safe
and app_plugin.app_settings[setting_name]['type'] == 'JSON'
):
return json.dumps(
app_plugin.app_settings[setting_name]['default']
)
return app_plugin.app_settings[setting_name]['default']
raise KeyError(
'Setting "{}" not found in app plugin "{}"'.format(
setting_name, app_name
)
)
@classmethod
def get_app_setting(
cls, app_name, setting_name, project=None, user=None, post_safe=False
):
"""
Return app setting value for a project or an user. If not set, return
default.
:param app_name: App name (string, must correspond to "name" in app
plugin)
:param setting_name: Setting name (string)
:param project: Project object (can be None)
:param user: User object (can be None)
:param post_safe: Whether a POST safe value should be returned (bool)
:return: String or None
:raise: KeyError if nothing is found with setting_name
"""
try:
val = AppSetting.objects.get_setting_value(
app_name, setting_name, project=project, user=user
)
except AppSetting.DoesNotExist:
val = cls.get_default_setting(app_name, setting_name, post_safe)
# Handle post_safe for dict values (JSON)
if post_safe and isinstance(val, dict):
return json.dumps(val)
return val
@classmethod
def get_all_settings(cls, project=None, user=None, post_safe=False):
"""
Return all setting values. If the value is not found, return
the default.
:param project: Project object (can be None)
:param user: User object (can be None)
:param post_safe: Whether POST safe values should be returned (bool)
:return: Dict
:raise: ValueError if neither project nor user are set
"""
if not project and not user:
raise ValueError('Project and user are both unset')
ret = {}
app_plugins = get_active_plugins()
for plugin in app_plugins:
p_settings = cls.get_setting_defs(
APP_SETTING_SCOPE_PROJECT, plugin=plugin
)
for s_key in p_settings:
ret[
'settings.{}.{}'.format(plugin.name, s_key)
] = cls.get_app_setting(
plugin.name, s_key, project, user, post_safe
)
return ret
@classmethod
def get_all_defaults(cls, scope, post_safe=False):
"""
Get all default settings for a scope.
:param scope: Setting scope (PROJECT, USER or PROJECT_USER)
:param post_safe: Whether POST safe values should be returned (bool)
:return: Dict
"""
cls._check_scope(scope)
ret = {}
app_plugins = get_active_plugins()
for plugin in app_plugins:
p_settings = cls.get_setting_defs(scope, plugin=plugin)
for s_key in p_settings:
ret[
'settings.{}.{}'.format(plugin.name, s_key)
] = cls.get_default_setting(plugin.name, s_key, post_safe)
return ret
@classmethod
def set_app_setting(
cls,
app_name,
setting_name,
value,
project=None,
user=None,
validate=True,
):
"""
Set value of an existing project or user settings. Creates the object if
not found.
:param app_name: App name (string, must correspond to "name" in app
plugin)
:param setting_name: Setting name (string)
:param value: Value to be set
:param project: Project object (can be None)
:param user: User object (can be None)
:param validate: Validate value (bool, default=True)
:return: True if changed, False if not changed
:raise: ValueError if validating and value is not accepted for setting
type
:raise: ValueError if neither project nor user are set
:raise: KeyError if setting name is not found in plugin specification
"""
if not project and not user:
raise ValueError('Project and user are both unset')
try:
setting = AppSetting.objects.get(
app_plugin__name=app_name,
name=setting_name,
project=project,
user=user,
)
if cls._compare_value(setting, value):
return False
if validate:
cls.validate_setting(setting.type, value)
if setting.type == 'JSON':
setting.value_json = cls._get_json_value(value)
else:
setting.value = value
setting.save()
return True
except AppSetting.DoesNotExist:
app_plugin = get_app_plugin(app_name)
if setting_name not in app_plugin.app_settings:
raise KeyError(
'Setting "{}" not found in app plugin "{}"'.format(
setting_name, app_name
)
)
s_def = app_plugin.app_settings[setting_name]
s_type = s_def['type']
s_mod = (
bool(s_def['user_modifiable'])
if 'user_modifiable' in s_def
else True
)
cls._check_scope(s_def['scope'])
cls._check_project_and_user(s_def['scope'], project, user)
if validate:
v = cls._get_json_value(value) if s_type == 'JSON' else value
cls.validate_setting(s_type, v)
s_vals = {
'app_plugin': app_plugin.get_model(),
'project': project,
'user': user,
'name': setting_name,
'type': s_type,
'user_modifiable': s_mod,
}
if s_type == 'JSON':
s_vals['value_json'] = cls._get_json_value(value)
else:
s_vals['value'] = value
AppSetting.objects.create(**s_vals)
return True
@classmethod
def validate_setting(cls, setting_type, setting_value):
"""
Validate setting value according to its type.
:param setting_type: Setting type
:param setting_value: Setting value
:raise: ValueError if setting_type or setting_value is invalid
"""
if setting_type not in APP_SETTING_TYPES:
raise ValueError('Invalid setting type "{}"'.format(setting_type))
elif setting_type == 'BOOLEAN':
if not isinstance(setting_value, bool):
raise ValueError(
'Please enter a valid boolean value ({})'.format(
setting_value
)
)
elif setting_type == 'INTEGER':
if (
not isinstance(setting_value, int)
and not str(setting_value).isdigit()
):
raise ValueError(
'Please enter a valid integer value ({})'.format(
setting_value
)
)
elif setting_type == 'JSON':
try:
json.dumps(setting_value)
except TypeError:
raise ValueError(
'Please enter valid JSON ({})'.format(setting_value)
)
return True
@classmethod
def get_setting_def(cls, name, plugin=None, app_name=None):
"""
Return definition for a single app setting, either based on an app name
or the plugin object.
:param name: Setting name
:param plugin: Plugin object extending ProjectAppPluginPoint
:param app_name: Name of the app plugin (string)
:return: Dict
:raise: ValueError if neither app_name or plugin are set or if setting
is not found in plugin
"""
if not plugin and not app_name:
raise ValueError('Plugin and app name both unset')
elif not plugin:
plugin = get_app_plugin(app_name)
if not plugin:
raise ValueError(
'Plugin not found with app name "{}"'.format(app_name)
)
if name not in plugin.app_settings:
raise ValueError(
'App setting not found in app "{}" with name "{}"'.format(
plugin.name, name
)
)
return plugin.app_settings[name]
@classmethod
def get_setting_defs(
cls, scope, plugin=False, app_name=False, user_modifiable=False
):
"""
Return app setting definitions of a specific scope from a plugin.
:param scope: PROJECT, USER or PROJECT_USER
:param plugin: project app plugin object extending ProjectAppPluginPoint
:param app_name: Name of the app plugin (string)
:param user_modifiable: Only return modifiable settings if True
(boolean)
:return: Dict
:raise: ValueError if scope is invalid or if if neither app_name or
plugin are set
"""
if not plugin and not app_name:
raise ValueError('Plugin and app name both unset')
if not plugin:
plugin = get_app_plugin(app_name)
if not plugin:
raise ValueError(
'Plugin not found with app name "{}"'.format(app_name)
)
cls._check_scope(scope)
return {
k: v
for k, v in plugin.app_settings.items()
if (
'scope' in v
and v['scope'] == scope
and (
not user_modifiable
or (
'user_modifiable' not in v
or v['user_modifiable'] is True
)
)
)
}
| 32.938356
| 80
| 0.561932
| 1,633
| 14,427
| 4.785671
| 0.094305
| 0.029559
| 0.024952
| 0.025336
| 0.488548
| 0.434805
| 0.376328
| 0.329367
| 0.295457
| 0.26142
| 0
| 0
| 0.366674
| 14,427
| 437
| 81
| 33.01373
| 0.855314
| 0.266583
| 0
| 0.34252
| 0
| 0
| 0.109207
| 0.007903
| 0
| 0
| 0
| 0.002288
| 0
| 1
| 0.047244
| false
| 0
| 0.011811
| 0
| 0.133858
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd3d84abfc37890e97980406a58c52b188bedbc3
| 2,835
|
py
|
Python
|
util/2mass_catalog.py
|
spake/astrometry.net
|
12c76f4a44fe90a009eeb962f2ae28b0791829b8
|
[
"BSD-3-Clause"
] | 4
|
2018-02-13T23:11:40.000Z
|
2021-09-30T16:02:22.000Z
|
util/2mass_catalog.py
|
spake/astrometry.net
|
12c76f4a44fe90a009eeb962f2ae28b0791829b8
|
[
"BSD-3-Clause"
] | null | null | null |
util/2mass_catalog.py
|
spake/astrometry.net
|
12c76f4a44fe90a009eeb962f2ae28b0791829b8
|
[
"BSD-3-Clause"
] | 1
|
2019-02-11T06:56:30.000Z
|
2019-02-11T06:56:30.000Z
|
#! /usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
import sys
from optparse import OptionParser
try:
import pyfits
except ImportError:
try:
from astropy.io import fits as pyfits
except ImportError:
raise ImportError("Cannot import either pyfits or astropy.io.fits")
from numpy import *
from astrometry.util.fits import *
from astrometry.util.healpix import *
from astrometry.util.starutil_numpy import *
def get_2mass_sources(ra, dec, radius=1, basefn=None):
twomass_nside = 9
if basefn is None:
twomass_pat = '2mass_hp%03i.fits'
else:
twomass_pat = basefn
hps = healpix_rangesearch(ra, dec, radius, twomass_nside)
print('2MASS healpixes in range:', hps)
allU = None
for hp in hps:
fn = twomass_pat % hp
print('2MASS filename:', fn)
U = fits_table(fn)
print(len(U), 'sources')
I = (degrees_between(ra, dec, U.ra, U.dec) < radius)
print('%i 2MASS stars within range.' % sum(I))
U = U[I]
if allU is None:
allU = U
else:
allU.append(U)
return allU
if __name__ == '__main__':
parser = OptionParser(usage='%prog [options] <ra> <dec> <output-filename>')
parser.add_option('-r', dest='radius', type='float', help='Search radius, in deg (default 1 deg)')
parser.add_option('-b', dest='basefn', help='Base filename of 2MASS FITS files (default: 2mass_hp%03i.fits)')
parser.add_option('-B', dest='band', help='Band (J, H, or K) to use for cuts')
parser.set_defaults(radius=1.0, basefn=None, band='J')
(opt, args) = parser.parse_args()
if len(args) != 3:
parser.print_help()
print()
print('Got extra arguments:', args)
sys.exit(-1)
# parse RA,Dec.
ra = float(args[0])
dec = float(args[1])
outfn = args[2]
band = opt.band.lower()
# ugh!
opts = {}
for k in ['radius', 'basefn']:
opts[k] = getattr(opt, k)
X = get_2mass_sources(ra, dec, **opts)
print('Got %i 2MASS sources.' % len(X))
#print X.about()
print('Applying cuts...')
I = logical_not(X.minor_planet)
print('not minor planet:', sum(I))
qual = X.get(band + '_quality')
# work around dumb bug where it's a single-char column rather than a byte.
nobrightness = chr(0)
I = logical_and(I, (qual != nobrightness))
print('not NO_BRIGHTNESS', sum(I))
print(len(X))
print(len(X.j_cc))
cc = array(X.getcolumn(band + '_cc'))
ccnone = chr(0)
#print 'cc shape', cc.shape
#print cc[:10]
#print ccnone
I = logical_and(I, (cc == ccnone))
print('CC_NONE', sum(I))
X = X[I]
print('%i pass cuts' % len(X))
print('Writing to', outfn)
X.write_to(outfn)
| 28.35
| 113
| 0.610229
| 410
| 2,835
| 4.112195
| 0.382927
| 0.017794
| 0.035587
| 0.042705
| 0.04745
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013164
| 0.249735
| 2,835
| 99
| 114
| 28.636364
| 0.779502
| 0.083598
| 0
| 0.081081
| 0
| 0
| 0.190421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013514
| false
| 0.013514
| 0.162162
| 0
| 0.189189
| 0.22973
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd3f4f79ce1d8a927e706c3ca5d870ec9910cd7c
| 682
|
py
|
Python
|
models/nicknames.py
|
Tyson-Chicken-Nuggets/me-discord-leaderboard
|
d0e04c77e4f7a309cbb6315d24bd47929ba4ec54
|
[
"MIT"
] | 4
|
2018-12-13T04:15:26.000Z
|
2021-02-15T21:46:59.000Z
|
models/nicknames.py
|
Tyson-Chicken-Nuggets/me-discord-leaderboard
|
d0e04c77e4f7a309cbb6315d24bd47929ba4ec54
|
[
"MIT"
] | 2
|
2019-05-17T18:47:18.000Z
|
2020-09-26T01:31:39.000Z
|
models/nicknames.py
|
Tyson-Chicken-Nuggets/me-discord-leaderboard
|
d0e04c77e4f7a309cbb6315d24bd47929ba4ec54
|
[
"MIT"
] | 1
|
2018-06-08T17:08:29.000Z
|
2018-06-08T17:08:29.000Z
|
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from models.base import Base
from models.servers import Server
from models.users import User
class Nickname(Base):
__tablename__ = 'nicknames'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
server_id = Column(Integer, ForeignKey('servers.id'), nullable=False)
user = relationship(User)
server = relationship(Server)
display_name = Column(String)
def __init__(self, user, server, display_name):
self.user = user
self.server = server
self.display_name = display_name
| 31
| 73
| 0.727273
| 84
| 682
| 5.72619
| 0.357143
| 0.091476
| 0.093555
| 0.10395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183284
| 682
| 21
| 74
| 32.47619
| 0.863555
| 0
| 0
| 0
| 0
| 0
| 0.039589
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.294118
| 0
| 0.823529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd409f1079701595dd303cbae441bb3663ea68de
| 755
|
py
|
Python
|
hgtools/managers/library.py
|
jaraco/hgtools
|
1090d139e5dbdab864da8f1917a9e674331b6f9b
|
[
"MIT"
] | 1
|
2017-05-17T15:12:29.000Z
|
2017-05-17T15:12:29.000Z
|
hgtools/managers/library.py
|
jaraco/hgtools
|
1090d139e5dbdab864da8f1917a9e674331b6f9b
|
[
"MIT"
] | 12
|
2016-01-01T14:43:44.000Z
|
2021-10-03T02:13:19.000Z
|
hgtools/managers/library.py
|
jaraco/hgtools
|
1090d139e5dbdab864da8f1917a9e674331b6f9b
|
[
"MIT"
] | null | null | null |
import sys
from . import base
from . import cmd
from . import reentry
class MercurialInProcManager(cmd.Mercurial, base.RepoManager):
"""
A RepoManager implemented by invoking the hg command in-process.
"""
def _invoke(self, *params):
"""
Run the self.exe command in-process with the supplied params.
"""
cmd = [self.exe, '-R', self.location] + list(params)
with reentry.in_process_context(cmd) as result:
sys.modules['mercurial.dispatch'].run()
stdout = result.stdio.stdout.getvalue()
stderr = result.stdio.stderr.getvalue()
if not result.returncode == 0:
raise RuntimeError(stderr.strip() or stdout.strip())
return stdout.decode('utf-8')
| 30.2
| 69
| 0.637086
| 90
| 755
| 5.311111
| 0.566667
| 0.062762
| 0.066946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003521
| 0.247682
| 755
| 24
| 70
| 31.458333
| 0.838028
| 0.166887
| 0
| 0
| 0
| 0
| 0.042373
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd41e5e1e67e9d900eb2ff0bece445448ea41207
| 1,775
|
py
|
Python
|
controllers/__controller.py
|
VNCompany/vnforum
|
770aca3a94ad1ed54628d48867c299d83215f75a
|
[
"Unlicense"
] | null | null | null |
controllers/__controller.py
|
VNCompany/vnforum
|
770aca3a94ad1ed54628d48867c299d83215f75a
|
[
"Unlicense"
] | null | null | null |
controllers/__controller.py
|
VNCompany/vnforum
|
770aca3a94ad1ed54628d48867c299d83215f75a
|
[
"Unlicense"
] | null | null | null |
from flask import redirect, url_for, render_template
from flask_login import current_user
from components.pagination import html_pagination
from db_session import create_session
class Controller:
__view__ = None
__title__ = "Page"
view_includes = {}
jquery_enabled = True
db_session = None
def __init__(self):
self.view_includes.clear()
self.view_includes["css"] = ""
self.css("main.css")
self.view_includes["js"] = ""
self.javascript("jquery.js", "main.js")
self.db_session = create_session()
@staticmethod
def static(path: str):
return url_for('static', filename=path)
def view(self, **kwargs):
if self.__view__ is None:
raise AttributeError
elif current_user.is_authenticated and current_user.is_banned():
return redirect("/logout")
else:
return render_template(str(self.__view__).replace(".", "/") + ".html",
**kwargs,
**self.view_includes,
title=self.__title__)
def css(self, *names):
if "css" not in self.view_includes.keys():
self.view_includes["css"] = ""
for name in names:
self.view_includes["css"] += f'<link type="text/css" rel="stylesheet" href="' \
f'{self.static("css/" + name)}">\n'
def javascript(self, *names):
for name in names:
self.view_includes["js"] += f'<script type="text/javascript" src="{self.static("js/" + name)}"></script>\n'
def pagination(self, max_page, pos: int, link: str):
self.view_includes["pagination_string"] = html_pagination(max_page, pos, link)
| 34.803922
| 119
| 0.580845
| 203
| 1,775
| 4.807882
| 0.35468
| 0.090164
| 0.147541
| 0.058402
| 0.061475
| 0.061475
| 0.061475
| 0
| 0
| 0
| 0
| 0
| 0.292394
| 1,775
| 50
| 120
| 35.5
| 0.77707
| 0
| 0
| 0.097561
| 0
| 0.02439
| 0.131831
| 0.025352
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.097561
| 0.02439
| 0.463415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd468535a193a7786f5ac49b546150a18ebcd261
| 1,172
|
py
|
Python
|
setup.py
|
themightyoarfish/svcca
|
23faa374489067c1c76cee44d92663c120603bdc
|
[
"Apache-2.0"
] | 8
|
2019-01-17T14:20:07.000Z
|
2021-07-08T12:16:23.000Z
|
setup.py
|
themightyoarfish/svcca
|
23faa374489067c1c76cee44d92663c120603bdc
|
[
"Apache-2.0"
] | 1
|
2019-01-30T11:44:25.000Z
|
2019-02-07T15:02:02.000Z
|
setup.py
|
themightyoarfish/svcca-gpu
|
23faa374489067c1c76cee44d92663c120603bdc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
import setuptools
import os
root_dir = os.path.abspath(os.path.dirname(__file__))
with open(f'{root_dir}/README.md') as f:
readme = f.read()
with open(f'{root_dir}/requirements.txt') as f:
requirements = f.read().split()
packages = setuptools.find_packages('.', include='svcca.*')
setup(name='svcca',
version='0.0.1',
description='SVCCA on Numpy, Cupy, and PyTorch',
long_description=readme,
author='Rasmus Diederichsen',
author_email='rasmus@peltarion.com',
url='https://github.com/themightyoarfish/svcca-gpu',
classifiers=['Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Visualization',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache License',
'Intended Audience :: Developers',
],
keywords='deep-learning pytorch cupy numpy svcca neural-networks machine-learning'.split(),
install_requires=requirements,
packages=packages,
zip_safe=False, # don't install egg, but source
)
| 33.485714
| 97
| 0.636519
| 133
| 1,172
| 5.518797
| 0.654135
| 0.02861
| 0.024523
| 0.035422
| 0.043597
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005549
| 0.231229
| 1,172
| 34
| 98
| 34.470588
| 0.809101
| 0.042662
| 0
| 0
| 0
| 0
| 0.399107
| 0.04375
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd4835795e462053f9d98a0abafa853d67dd9bfc
| 829
|
py
|
Python
|
urls.py
|
CodeForPhilly/philly_legislative
|
5774100325b5374a0510674b4a542171fff3fcd3
|
[
"BSD-Source-Code"
] | 2
|
2017-08-29T22:27:05.000Z
|
2019-04-27T20:21:31.000Z
|
urls.py
|
CodeForPhilly/philly_legislative
|
5774100325b5374a0510674b4a542171fff3fcd3
|
[
"BSD-Source-Code"
] | null | null | null |
urls.py
|
CodeForPhilly/philly_legislative
|
5774100325b5374a0510674b4a542171fff3fcd3
|
[
"BSD-Source-Code"
] | null | null | null |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
#(r'^philly_legislative/', include('philly_legislative.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/(.*)', admin.site.root),
(r'^subs/$', 'phillyleg.views.index'),
(r'^subs/create/$', 'phillyleg.views.create'),
(r'^subs/unsubscribe/$', 'phillyleg.views.unsubscribe'),
#(r'^subs/(?P<subscription_id>\d+)/$', 'phillyleg.views.edit'),
(r'^subs/delete/$', 'phillyleg.views.delete')
)
| 33.16
| 76
| 0.671894
| 104
| 829
| 5.317308
| 0.451923
| 0.045208
| 0.057866
| 0.057866
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147165
| 829
| 24
| 77
| 34.541667
| 0.782178
| 0.495778
| 0
| 0
| 0
| 0
| 0.383863
| 0.224939
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd486d1d0f1328a725ad7af4079cf4b9fc30ab88
| 2,510
|
py
|
Python
|
irf/scripts/read_corsika_headers.py
|
fact-project/irf
|
d82a3d4ae8b9ef15d9f473cdcd01a5f9c92d42a2
|
[
"MIT"
] | null | null | null |
irf/scripts/read_corsika_headers.py
|
fact-project/irf
|
d82a3d4ae8b9ef15d9f473cdcd01a5f9c92d42a2
|
[
"MIT"
] | 8
|
2017-04-25T11:19:32.000Z
|
2019-05-28T07:24:32.000Z
|
irf/scripts/read_corsika_headers.py
|
fact-project/irf
|
d82a3d4ae8b9ef15d9f473cdcd01a5f9c92d42a2
|
[
"MIT"
] | null | null | null |
from corsikaio import CorsikaFile
from fact.io import to_h5py
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
import os
import click
import pandas as pd
import numpy as np
from glob import glob
def get_headers(f):
with CorsikaFile(f) as cf:
run_header, event_headers, run_end = cf.read_headers()
return run_header, event_headers, run_end
event_columns = [
'run_number',
'event_number',
'particle_id',
'total_energy',
'starting_altitude',
'first_target_id',
'first_interaction_height',
'momentum_x',
'momentum_y',
'momentum_minus_z',
'zenith',
'azimuth',
]
run_header_columns = [
'run_number',
'date',
'energy_spectrum_slope',
'energy_min',
'energy_max',
]
@click.command()
@click.argument('outputfile')
@click.argument(
'inputdir',
nargs=-1,
type=click.Path(exists=True, file_okay=False, dir_okay=True),
)
def main(outputfile, inputdir):
inputfiles = []
for d in inputdir:
inputfiles.extend(glob(os.path.join(d, 'cer*')))
for f in inputfiles[:]:
if f + '.gz' in inputfiles:
inputfiles.remove(f + '.gz')
print('Processing', len(inputfiles), 'files')
with Pool(cpu_count()) as pool:
results = pool.imap_unordered(get_headers, inputfiles)
run_headers = []
run_ends = []
for run_header, event_headers, run_end in tqdm(results, total=len(inputfiles)):
run_headers.append(run_header)
run_ends.append(run_end)
df = pd.DataFrame(event_headers[event_columns])
to_h5py(df, outputfile, key='corsika_events', mode='a')
print('saving runwise information')
runs = pd.DataFrame(np.array(run_headers)[run_header_columns])
# some runs might have failed and thus no run end block
for run_end in run_ends:
if run_end is not None:
dtype = run_end.dtype
break
else:
raise IOError('All run_end blocks are None, all runs failed.')
dummy = np.array([(b'RUNE', np.nan, np.nan)], dtype=dtype)[0]
run_ends = [r if r is not None else dummy for r in run_ends]
run_ends = np.array(run_ends)
print('Number of failed runs:', np.count_nonzero(np.isnan(run_ends['n_events'])))
runs['n_events'] = run_ends['n_events']
to_h5py(runs, outputfile, key='corsika_runs', mode='a')
print('done')
if __name__ == '__main__':
main()
| 25.1
| 89
| 0.632669
| 336
| 2,510
| 4.497024
| 0.39881
| 0.035738
| 0.027796
| 0.041694
| 0.053607
| 0.053607
| 0
| 0
| 0
| 0
| 0
| 0.002655
| 0.249801
| 2,510
| 99
| 90
| 25.353535
| 0.799788
| 0.021116
| 0
| 0.026667
| 0
| 0
| 0.166599
| 0.01833
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026667
| false
| 0
| 0.12
| 0
| 0.16
| 0.053333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd491d9bbf97708bde610843ff7316857a2a3334
| 6,452
|
py
|
Python
|
assignment 1/question3/q3.py
|
Eunoia1729/soft-computing
|
d7fc155378d1bb0b914a6f660095653e32d2c0b8
|
[
"Apache-2.0"
] | 1
|
2021-11-14T15:02:35.000Z
|
2021-11-14T15:02:35.000Z
|
assignment 1/question3/q3.py
|
Eunoia1729/soft-computing
|
d7fc155378d1bb0b914a6f660095653e32d2c0b8
|
[
"Apache-2.0"
] | null | null | null |
assignment 1/question3/q3.py
|
Eunoia1729/soft-computing
|
d7fc155378d1bb0b914a6f660095653e32d2c0b8
|
[
"Apache-2.0"
] | null | null | null |
"""## Question 3: Scrap Hotel Data
The below code is for India and can be extended to other countries by adding an outer loop given in the last part. The below codes takes several minutes to run.
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
hotelname_list = []
city_list = []
countries_list = []
rating_list = []
prince_list = []
Amenities_list = []
HotelDescription_list = []
Review1_list = []
Review2_list = []
hotel_name = ""
city_name = ""
country_name = ""
ratingl = ""
pricel = ""
amenities = ""
descriptionl = ""
review1l = ""
review2l = ""
url = 'https://www.goibibo.com/destinations/all-states-in-india/'
data = requests.get(url)
html = data.text
soup = BeautifulSoup(html, 'html.parser')
cards = soup.find_all('div', {'class' : 'col-md-4 col-sm-4 col-xs-12 filtr-item posrel'})
state_urls = []
state_names = []
for card in cards :
for a in card.find_all('a', href=True):
if a.text.rstrip():
state_urls.append(a['href'])
state_names.append(a.text.rstrip())
length = len(state_urls)
for i in range(length):
url = state_urls[i]
country_name = 'India'
data = requests.get(url)
html = data.text
soup = BeautifulSoup(html, 'html.parser')
places_to_visit = soup.find('div', {'class' : 'place-to-visit-container'})
if(places_to_visit):
card = places_to_visit.find('div', {'class' : 'col-md-12'})
city_urls = {}
for a in card.find_all('a', href=True):
if a['href']:
list = a['href'].split('/')
city_urls[list[4]] = 'https://www.goibibo.com/hotels/hotels-in-' + list[4] + '-ct/'
for city in city_urls:
print(f'Extracting for city : {city}')
city_name = city
url = city_urls[city]
response = requests.get(url)
data = BeautifulSoup(response.text, 'html.parser')
cards_price_data = data.find_all('p', attrs={'class', 'HotelCardstyles__CurrentPrice-sc-1s80tyk-27 czKsrL'})
cards_url_data = data.find_all('div', attrs={'class', 'HotelCardstyles__HotelNameWrapperDiv-sc-1s80tyk-11 hiiHjq'})
hotel_price = {}
hotel_url = {}
for i in range(0, len(cards_price_data)):
hotel_price[cards_url_data[i].text.rstrip()] = cards_price_data[i].text.rstrip()
hotel_url[cards_url_data[i].text.rstrip()] = 'https://www.goibibo.com' + cards_url_data[i].find('a', href = True)['href']
for i in range(0, len(cards_price_data)):
url = hotel_url[cards_url_data[i].text.rstrip()]
data = requests.get(url)
html = data.text
hotel_name = cards_url_data[i].text.rstrip()
pricel = hotel_price[cards_url_data[i].text.rstrip()]
# print('Extracting for hotel : ' + cards_url_data[i].text.rstrip())
soup = BeautifulSoup(html, 'html.parser')
div = soup.find('div', { 'id': 'root' })
description = div.find('section', {'class' : 'HotelDetailsMain__HotelDetailsContainer-sc-2p7gdu-0 kpmitu'})
descriptiont = description.find('span', {'itemprop' : 'streetAddress'})
if descriptiont:
address = descriptiont.text.rstrip().replace(' View on Map', '')
descriptionl = address
rating = 'Rating not found'
ratingdata = description.find('span', {'itemprop' : 'ratingValue'}) #contains rating
if ratingdata:
rating = ratingdata.text.rstrip()
ratingl = rating
review1 = 'Review not found'
review2 = 'Review not found'
reviews = div.find_all('span', {'class' : 'UserReviewstyles__UserReviewTextStyle-sc-1y05l7z-4 dTkBBw'})
if(len(reviews) > 1):
review1 = reviews[0].text.rstrip()
if(len(reviews) > 3):
review2 = reviews[3].text.rstrip()
review1l = review1
review2l = review2
amenities_list = [] #contains all the amenities.
amenitiesdiv = div.find('div', {'class' : 'Amenitiesstyles__AmenitiesListBlock-sc-10opy4a-4 cMbIgg'})
if amenitiesdiv:
for amenity in amenitiesdiv.find_all('span', {'class':'Amenitiesstyles__AmenityItemText-sc-10opy4a-8 iwRmcg'}) :
if amenity:
amenities_list.append(amenity.text.rstrip())
else:
amenities_list.append('Amenity Not Found')
amenities = amenities_list
hotelname_list.append(hotel_name)
city_list.append(city_name)
countries_list.append(country_name)
rating_list.append(ratingl)
prince_list.append(pricel)
Amenities_list.append(amenities)
HotelDescription_list.append(descriptionl)
Review1_list.append(review1l)
Review2_list.append(review2l)
print(f'Extracted {len(cards_price_data)} hotels at {city} successfully')
dict = {'Hotel_Name': hotelname_list, 'City_Name': city_list, 'country_name': countries_list,
'Rating' : rating_list, 'Price/Night' : prince_list, 'Amenities' : Amenities_list,
'Description' : HotelDescription_list, 'Review1' : Review1_list, 'Review2' : Review2_list}
df = pd.DataFrame(dict)
df.to_csv('hotels.csv')
"""To extract for all the countries, we need to use the below code in the outer loop"""
hotelname_list = []
city_list = []
countries_list = []
rating_list = []
prince_list = []
Amenities_list = []
HotelDescription_list = []
Review1_list = []
Review2_list = []
hotel_name = ""
city_name = ""
country_name = ""
ratingl = ""
pricel = ""
amenities = ""
descriptionl = ""
review1l = ""
review2l = ""
url = 'https://www.goibibo.com/destinations/intl/all-countries/'
data = requests.get(url)
html = data.text
soup = BeautifulSoup(html, 'html.parser')
cards = soup.find_all('div', {'class' : 'col-md-4 col-sm-4 col-xs-12 filtr-item posrel'})
country_urls = []
country_names = []
for card in cards :
for a in card.find_all('a', href=True):
if a['href']:
country_urls.append(a['href'])
country_names.append(a.text.rstrip())
length = len(country_urls)
for i in range(length):
url = country_urls[i]
country_name = country_names[i]
data = requests.get(url)
html = data.text
soup = BeautifulSoup(html, 'html.parser')
places_to_visit = soup.find('div', {'class' : 'place-to-visit-container'})
if(places_to_visit):
card = places_to_visit.find('div', {'class' : 'col-md-12'})
city_urls = {}
for a in card.find_all('a', href=True):
if a['href']:
list = a['href'].split('/')
city_urls[list[3]] = 'https://www.goibibo.com/hotels/intl-hotels-in-' + list[3] + '-ct/'
print(country_name)
| 36.451977
| 160
| 0.653131
| 843
| 6,452
| 4.829181
| 0.204033
| 0.036846
| 0.023581
| 0.022353
| 0.416851
| 0.397445
| 0.386146
| 0.351756
| 0.320314
| 0.306067
| 0
| 0.014283
| 0.196993
| 6,452
| 177
| 161
| 36.451977
| 0.771473
| 0.046962
| 0
| 0.481013
| 0
| 0
| 0.205319
| 0.059134
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018987
| 0
| 0.018987
| 0.018987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd4965798452f29205244dc8f8464e898af885fa
| 234
|
py
|
Python
|
groundstation/ROV/OCR/SScrop.py
|
iturov/rov2018
|
ca1949806d105a2caddf2cf7a1361e2d3f6a1246
|
[
"MIT"
] | 3
|
2018-01-26T14:00:50.000Z
|
2018-08-08T06:44:21.000Z
|
groundstation/ROV/OCR/SScrop.py
|
iturov/rov2018
|
ca1949806d105a2caddf2cf7a1361e2d3f6a1246
|
[
"MIT"
] | null | null | null |
groundstation/ROV/OCR/SScrop.py
|
iturov/rov2018
|
ca1949806d105a2caddf2cf7a1361e2d3f6a1246
|
[
"MIT"
] | 2
|
2018-08-08T06:44:23.000Z
|
2020-10-24T11:36:33.000Z
|
import pyscreenshot as ImageGrab
i=0
src_path ="C:\\Users\\Public\\ROV\OCR\\"
if __name__ == "__main__":
# part of the screen
im=ImageGrab.grab(bbox=(200,100,1100,600)) # X1,Y1,X2,Y2
im.save(src_path + 'init.png')
| 14.625
| 60
| 0.645299
| 38
| 234
| 3.710526
| 0.894737
| 0.099291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 0.179487
| 234
| 15
| 61
| 15.6
| 0.640625
| 0.128205
| 0
| 0
| 0
| 0
| 0.22335
| 0.142132
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd4ba76a5fa9e5f97446998ac4f6a5e6ee41ec63
| 3,008
|
py
|
Python
|
tests/http_client/conftest.py
|
sjaensch/aiobravado
|
d3f1eb71883b1f24c4b592917890160eb3d3cbcc
|
[
"BSD-3-Clause"
] | 19
|
2017-11-20T22:47:12.000Z
|
2021-12-23T15:56:41.000Z
|
tests/http_client/conftest.py
|
sjaensch/aiobravado
|
d3f1eb71883b1f24c4b592917890160eb3d3cbcc
|
[
"BSD-3-Clause"
] | 10
|
2018-01-11T12:53:01.000Z
|
2020-01-27T20:05:51.000Z
|
tests/http_client/conftest.py
|
sjaensch/aiobravado
|
d3f1eb71883b1f24c4b592917890160eb3d3cbcc
|
[
"BSD-3-Clause"
] | 4
|
2017-11-18T12:37:14.000Z
|
2021-03-19T14:48:13.000Z
|
# -*- coding: utf-8 -*-
import threading
import time
import bottle
import ephemeral_port_reserve
import pytest
import umsgpack
from bravado_core.content_type import APP_JSON
from bravado_core.content_type import APP_MSGPACK
from six.moves import urllib
ROUTE_1_RESPONSE = b'HEY BUDDY'
ROUTE_2_RESPONSE = b'BYE BUDDY'
API_RESPONSE = {'answer': 42}
SWAGGER_SPEC_DICT = {
'swagger': '2.0',
'info': {'version': '1.0.0', 'title': 'Integration tests'},
'definitions': {
'api_response': {
'properties': {
'answer': {
'type': 'integer'
},
},
'required': ['answer'],
'type': 'object',
'x-model': 'api_response',
'title': 'api_response',
}
},
'basePath': '/',
'paths': {
'/json': {
'get': {
'operationId': 'get_json',
'tags': ['json'],
'produces': ['application/json'],
'responses': {
'200': {
'description': 'HTTP/200',
'schema': {'$ref': '#/definitions/api_response'},
},
},
},
},
'/msgpack': {
'get': {
'produces': ['application/msgpack'],
'responses': {
'200': {
'description': 'HTTP/200',
'schema': {'$ref': '#/definitions/api_response'},
}
}
}
}
}
}
@bottle.get('/swagger.json')
def swagger_spec():
return SWAGGER_SPEC_DICT
@bottle.get('/json')
def api_json():
bottle.response.content_type = APP_JSON
return API_RESPONSE
@bottle.route('/msgpack')
def api_msgpack():
bottle.response.content_type = APP_MSGPACK
return umsgpack.packb(API_RESPONSE)
@bottle.route('/1')
def one():
return ROUTE_1_RESPONSE
@bottle.route('/2')
def two():
return ROUTE_2_RESPONSE
@bottle.post('/double')
def double():
x = bottle.request.params['number']
return str(int(x) * 2)
@bottle.get('/sleep')
def sleep_api():
sec_to_sleep = float(bottle.request.GET.get('sec', '1'))
time.sleep(sec_to_sleep)
return sec_to_sleep
def wait_unit_service_starts(url, timeout=10):
start = time.time()
while time.time() < start + timeout:
try:
urllib.request.urlopen(url, timeout=2)
except urllib.error.HTTPError:
return
except urllib.error.URLError:
time.sleep(0.1)
@pytest.yield_fixture(scope='session')
def threaded_http_server():
port = ephemeral_port_reserve.reserve()
thread = threading.Thread(
target=bottle.run, kwargs={'host': 'localhost', 'port': port},
)
thread.daemon = True
thread.start()
server_address = 'http://localhost:{port}'.format(port=port)
wait_unit_service_starts(server_address)
yield server_address
| 24.064
| 73
| 0.539894
| 307
| 3,008
| 5.104235
| 0.358306
| 0.056158
| 0.042119
| 0.028079
| 0.158264
| 0.122527
| 0.122527
| 0.077856
| 0.077856
| 0.077856
| 0
| 0.016019
| 0.31516
| 3,008
| 124
| 74
| 24.258065
| 0.74466
| 0.006981
| 0
| 0.098039
| 0
| 0
| 0.176549
| 0.01742
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.088235
| 0.029412
| 0.254902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd4bd1dde3eae994bf4970c151cbd96f077c070c
| 1,479
|
py
|
Python
|
test/test_convvae.py
|
kejiejiang/UnsupervisedDeepLearning-Pytorch
|
6ea7b7151ae62bf0130b56cc023f2be068aa87f0
|
[
"MIT"
] | 87
|
2017-11-22T02:59:24.000Z
|
2022-01-16T13:08:40.000Z
|
test/test_convvae.py
|
CauchyLagrange/UnsupervisedDeepLearning-Pytorch
|
6ea7b7151ae62bf0130b56cc023f2be068aa87f0
|
[
"MIT"
] | 3
|
2018-04-24T11:46:51.000Z
|
2020-01-07T00:01:46.000Z
|
test/test_convvae.py
|
CauchyLagrange/UnsupervisedDeepLearning-Pytorch
|
6ea7b7151ae62bf0130b56cc023f2be068aa87f0
|
[
"MIT"
] | 25
|
2018-03-15T04:02:21.000Z
|
2021-12-30T09:24:19.000Z
|
import torch
import torch.utils.data
from torchvision import datasets, transforms
import numpy as np
from udlp.autoencoder.convVAE import ConvVAE
import argparse
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--lr', type=float, default=0.0001, metavar='N',
help='learning rate for training (default: 0.001)')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--save', type=str, default="", metavar='N',
help='number of epochs to train (default: 10)')
args = parser.parse_args()
train_loader = torch.utils.data.DataLoader(
datasets.SVHN('./dataset/svhn', split='train', download=True,
transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(
datasets.SVHN('./dataset/svhn', split='test', download=True, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True, num_workers=2)
vae = ConvVAE(width=32, height=32, nChannels=3, hidden_size=500, z_dim=100, binary=True,
nFilters=64)
vae.fit(train_loader, test_loader, lr=args.lr, num_epochs=args.epochs)
if args.save!="":
torch.save(vae.state_dict(), args.save)
| 46.21875
| 98
| 0.697769
| 202
| 1,479
| 5.014851
| 0.410891
| 0.053307
| 0.067127
| 0.035538
| 0.361303
| 0.361303
| 0.361303
| 0.361303
| 0.361303
| 0.361303
| 0
| 0.028916
| 0.158215
| 1,479
| 31
| 99
| 47.709677
| 0.784739
| 0
| 0
| 0.142857
| 0
| 0
| 0.171062
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd57860debea07d7b1dee00c8d3f246398e5a1ff
| 573
|
py
|
Python
|
modules/yats/middleware/header.py
|
PrathameshBolade/yats
|
93bb5271255120b7131a3bc416e3386428a4d3ec
|
[
"MIT"
] | 54
|
2015-01-26T07:56:59.000Z
|
2022-03-10T18:48:05.000Z
|
modules/yats/middleware/header.py
|
PrathameshBolade/yats
|
93bb5271255120b7131a3bc416e3386428a4d3ec
|
[
"MIT"
] | 8
|
2015-03-15T18:33:39.000Z
|
2021-12-21T14:23:11.000Z
|
modules/yats/middleware/header.py
|
PrathameshBolade/yats
|
93bb5271255120b7131a3bc416e3386428a4d3ec
|
[
"MIT"
] | 23
|
2015-02-19T16:55:35.000Z
|
2022-03-11T19:49:06.000Z
|
# -*- coding: utf-8 -*-
from socket import gethostname
def ResponseInjectHeader(get_response):
def middleware(request):
setattr(request, '_dont_enforce_csrf_checks', True)
response = get_response(request)
# response['Access-Control-Allow-Origin'] = '*'
# response['Access-Control-Allow-Methods'] = 'GET, POST'
response['X-ProcessedBy'] = gethostname()
response['Cache-Control'] = 'no-cache, must-revalidate'
response['Expires'] = 'Sat, 26 Jul 1997 05:00:00 GMT'
return response
return middleware
| 30.157895
| 64
| 0.649215
| 62
| 573
| 5.903226
| 0.645161
| 0.060109
| 0.114754
| 0.142077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028953
| 0.216405
| 573
| 18
| 65
| 31.833333
| 0.786192
| 0.212914
| 0
| 0
| 0
| 0
| 0.250559
| 0.055928
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd58051ac5d7683774d3d6e01bb0dea25252af19
| 1,334
|
py
|
Python
|
handshake_client/sockets.py
|
naoki-maeda/handshake-client-py
|
286884b358e15f84965f3c3224cfabd83e1a1406
|
[
"MIT"
] | 3
|
2020-12-31T08:29:20.000Z
|
2021-08-14T14:41:22.000Z
|
handshake_client/sockets.py
|
naoki-maeda/handshake-client-py
|
286884b358e15f84965f3c3224cfabd83e1a1406
|
[
"MIT"
] | null | null | null |
handshake_client/sockets.py
|
naoki-maeda/handshake-client-py
|
286884b358e15f84965f3c3224cfabd83e1a1406
|
[
"MIT"
] | 1
|
2020-05-25T14:26:33.000Z
|
2020-05-25T14:26:33.000Z
|
import logging
import socketio
logger = logging.getLogger("handshake.socket")
sio = socketio.AsyncClient(logger=logger)
async def get_connection(
url: str, api_key: str, watch_chain: bool = True, watch_mempool: bool = True,
) -> socketio.AsyncClient:
"""
see https://hsd-dev.org/guides/events.html
"""
assert type(url) == str
assert type(api_key) == str
assert type(watch_chain) == bool
assert type(watch_mempool) == bool
if sio.connected is False:
await sio.connect(url, transports=["websocket"])
await sio.call("auth", api_key)
if watch_chain:
await sio.call("watch chain")
if watch_mempool:
await sio.call("watch mempool")
return sio
@sio.event
async def disconnect() -> None:
logger.info("closing socket connection")
if sio.connected:
await sio.disconnect()
async def get_wallet_connection(
url: str, api_key: str, wallet_id: str = "*",
) -> socketio.AsyncClient:
"""
see https://hsd-dev.org/guides/events.html
"""
assert type(url) == str
assert type(api_key) == str
assert type(wallet_id) == str
if sio.connected is False:
await sio.connect(url, transports=["websocket"])
await sio.call("auth", api_key)
await sio.call("join", wallet_id)
return sio
| 26.68
| 81
| 0.642429
| 174
| 1,334
| 4.821839
| 0.293103
| 0.076281
| 0.071514
| 0.045292
| 0.481526
| 0.481526
| 0.421931
| 0.421931
| 0.421931
| 0.421931
| 0
| 0
| 0.230885
| 1,334
| 49
| 82
| 27.22449
| 0.817739
| 0
| 0
| 0.4
| 0
| 0
| 0.078818
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| false
| 0
| 0.057143
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd586c3a691480974c3b96292cc74640fddadda5
| 869
|
py
|
Python
|
generator01/testing/test_generator01.py
|
sku899/World_Travel_Language_Wizard
|
a9e009336e2f53c5fc0f3e40af51f34335645e5f
|
[
"MIT"
] | null | null | null |
generator01/testing/test_generator01.py
|
sku899/World_Travel_Language_Wizard
|
a9e009336e2f53c5fc0f3e40af51f34335645e5f
|
[
"MIT"
] | null | null | null |
generator01/testing/test_generator01.py
|
sku899/World_Travel_Language_Wizard
|
a9e009336e2f53c5fc0f3e40af51f34335645e5f
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from flask import url_for, Response, request
from flask_testing import TestCase
from random import randint
from app import app
class TestBase(TestCase):
def create_app(self):
return app
class TestResponse(TestBase):
def rand_country(self):
countries = ['German', 'Spanish', 'French', 'Russian', 'Chinese', 'Portuguese','Hindi','Arabic','Japanese', 'Korean']
response = self.client.get(url_for("random_generator"))
self.assertIn(countries[int(response.data)-1], countries)
def test_country(self):
with patch("requests.get") as g:
g.return_value.text = b"1"
response = self.client.get(url_for("random_generator"))
random_output = ['1','2','3','4','5','6','7','8','9','10']
self.assertIn(response.data.decode('utf-8'), random_output)
| 34.76
| 126
| 0.653625
| 113
| 869
| 4.920354
| 0.548673
| 0.032374
| 0.064748
| 0.07554
| 0.151079
| 0.151079
| 0.151079
| 0.151079
| 0
| 0
| 0
| 0.02
| 0.194476
| 869
| 24
| 127
| 36.208333
| 0.774286
| 0
| 0
| 0.105263
| 0
| 0
| 0.148446
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.157895
| false
| 0
| 0.263158
| 0.052632
| 0.578947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd5b35b49e23eb6c89bb23b5e7b7a0d158afacb3
| 14,640
|
py
|
Python
|
assets/arguments.py
|
YuhangSong/Arena-Baselines-Depreciated
|
78c33994e67aede7565dda3f68f5cebe0d5ee6e6
|
[
"Apache-2.0"
] | null | null | null |
assets/arguments.py
|
YuhangSong/Arena-Baselines-Depreciated
|
78c33994e67aede7565dda3f68f5cebe0d5ee6e6
|
[
"Apache-2.0"
] | null | null | null |
assets/arguments.py
|
YuhangSong/Arena-Baselines-Depreciated
|
78c33994e67aede7565dda3f68f5cebe0d5ee6e6
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import torch
import assets.utils as utils
def log_args(args, tf_summary):
args_dict = args.__dict__
from pytablewriter import MarkdownTableWriter
writer = MarkdownTableWriter()
writer.table_name = "Configurations (Args)"
writer.headers = ["Parameter", "Value"]
print('# INFO: [{} start >>>>]'.format(writer.table_name))
writer.value_matrix = []
for key in args_dict.keys():
print('# INFO: [Config/Args][{} : {}]'.format(key, args_dict[key]))
writer.value_matrix += [[str(key), str(args_dict[key])]]
print('# INFO: [>>>> {} end]'.format(writer.table_name))
args_markdown_str = writer.dumps()
tf_summary.add_text(writer.table_name, args_markdown_str)
def generate_env_related_args(args, envs):
args.obs_shape = envs.observation_space.shape
args.action_space = envs.action_space
args.num_agents = envs.unwrapped.num_agents
if args.population_number == 1:
print('# INFO: baseline: self-play')
args.learning_agent_ids = [0]
elif args.population_number > 1:
print('# INFO: baseline: population-based training')
args.learning_agent_ids = range(args.num_agents)
if args.population_number < args.num_agents:
raise Exception(
'# ERROR: population_number should be at least the same as num_agents')
else:
raise Exception('# ERROR: invalid population_number')
return args
def get_args():
parser = argparse.ArgumentParser(description='RL')
'''general args'''
parser.add_argument('--mode', type=str, default='train',
help='\
[train: standard training]\
[vis_train: visualize training, using one process and full render]\
[pth2nn: convert pytorch .pth checkpoint to .nn checkpoint that can be used in unity editor]\
[eval_population: evaluate population performance]\
[eval_human: evaluate against human player]\
[eval_round: evaluate agent against agent]\
[scaler2fig: convert scalers logged in tensorboardX to fig]\
')
parser.add_argument('--env-name',
help='[general][environment to train on]')
parser.add_argument('--obs-type', default='visual',
help='[general][observation type: visual, ram]')
parser.add_argument('--num-env-steps', type=int, default=10e6,
help='[general][number of environment steps to train (default: 10e6)]')
parser.add_argument('--store-interval', type=int, default=int(60 * 10),
help='[general][save interval (in seconds)')
parser.add_argument('--log-dir', default='/tmp/gym/',
help='[general][directory to save agent logs (default: /tmp/gym)]')
parser.add_argument('--log-episode-every-minutes', type=float, default=20.0,
help='[general][log episode every x minutes]')
parser.add_argument('--seed', type=int, default=1,
help='[general][random seed (default: 1)]')
parser.add_argument('--cuda-deterministic', action='store_true', default=False,
help="[general][sets flags for determinism when using CUDA (potentially slow!)]")
parser.add_argument('--no-cuda', action='store_true', default=False,
help='[general][disables CUDA training]')
parser.add_argument('--num-eval-episodes', type=int, default=10,
help='[general][how many episodes to run for one evaluation]')
parser.add_argument('--arena-start-index', type=int, default=2394,
help='[general][each arena runs on a port, specify the ports to run the arena]')
parser.add_argument('--aux', type=str, default='',
help='[general][some aux information you may want to record along with this run]')
'''brain args'''
parser.add_argument('--add-timestep', action='store_true', default=False,
help='[brain][if add timestep to observations]')
parser.add_argument('--num-frame-stack', type=int, default=4,
help='[brain][num of stacked frames per observation]')
parser.add_argument('--recurrent-brain', action='store_true', default=False,
help='[brain][if use a recurrent policy]')
parser.add_argument('--normalize-obs', action='store_true', default=False,
help='[brain][if normalize observation with a running mean and variance]')
parser.add_argument('--batch-normalize', action='store_true', default=False,
help='[brain][if use batch normalize]')
parser.add_argument('--normalize-field', action='store_true', default=False,
help='[brain][C4NN][if normalize field]')
parser.add_argument('--normalize-kernal', action='store_true', default=False,
help='[brain][C4NN][if normalize kernal]')
parser.add_argument('--normalize-cross-coefficient', action='store_true', default=False,
help='[brain][C4NN][if normalize cross coefficient]')
parser.add_argument('--geographical-net', action='store_true', default=False,
help='[brain][GN][if use geographical network]')
'''trainer args'''
parser.add_argument('--trainer-id', default='a2c',
help='[trainer][trainer to use: a2c | ppo | acktr]')
parser.add_argument('--lr', type=float, default=7e-4,
help='[trainer][learning rate (default: 7e-4)]')
parser.add_argument('--eps', type=float, default=1e-5,
help='[trainer][RMSprop optimizer epsilon (default: 1e-5)]')
parser.add_argument('--alpha', type=float, default=0.99,
help='[trainer][RMSprop optimizer apha (default: 0.99)]')
parser.add_argument('--gamma', type=float, default=0.99,
help='[trainer][discount factor for rewards (default: 0.99)]')
parser.add_argument('--use-gae', action='store_true', default=False,
help='[trainer][use generalized advantage estimation]')
parser.add_argument('--tau', type=float, default=0.95,
help='[trainer][gae parameter (default: 0.95)]')
parser.add_argument('--entropy-coef', type=float, default=0.01,
help='[trainer][entropy term coefficient (default: 0.01)]')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='[trainer][value loss coefficient (default: 0.5)]')
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='[trainer][max norm of gradients (default: 0.5)]')
parser.add_argument('--num-processes', type=int, default=16,
help='[trainer][how many training CPU processes to use (default: 16)]')
parser.add_argument('--num-steps', type=int, default=5,
help='[trainer][number of forward steps in A2C (default: 5)]')
parser.add_argument('--ppo-epoch', type=int, default=4,
help='[trainer][number of ppo epochs (default: 4)]')
parser.add_argument('--num-mini-batch', type=int, default=32,
help='[trainer][number of batches for ppo (default: 32)]')
parser.add_argument('--clip-param', type=float, default=0.2,
help='[trainer][ppo clip parameter (default: 0.2)]')
parser.add_argument('--use-linear-lr-decay', action='store_true', default=False,
help='[trainer][use a linear schedule on the learning rate]')
parser.add_argument('--use-linear-clip-decay', action='store_true', default=False,
help='[trainer][use a linear schedule on the ppo clipping parameter]')
'''multi-agent args'''
parser.add_argument('--population-number', type=int, default=1,
help='[multi-agent][number of agents in population train]')
parser.add_argument('--reload-agents-interval', type=int, default=(60 * 1),
help='[multi-agent][interval to reload agents (in seconds)]')
parser.add_argument('--reload-playing-agents-principle', type=str, default='OpenAIFive',
help='[multi-agent][principle of choosing a agents from historical checkpoints]\
[\
recent(the most recent checkpoint),\
uniform(uniformly sample from historical checkpoint),\
OpenAIFive(0.8 probability to be recent, 0.2 probability to be uniform)\
]')
parser.add_argument('--playing-agents-deterministic', action='store_false', default=True,
help='[eval][if playing agent act deterministically]')
'''eval args'''
parser.add_argument('--population-eval-start', type=int, default=0,
help='[eval][population-eval][when do population eval, start from x checkpoint]')
parser.add_argument('--population-eval-skip-interval', type=int, default=4,
help='[eval][population-eval][when do population eval, skip every x checkpoints]')
parser.add_argument('--learning-agents-deterministic', action='store_true', default=False,
help='[eval][if learning agent act deterministically]')
parser.add_argument('--record-screen', action='store_true', default=False,
help='[eval][if record the screen]')
parser.add_argument('--human-controled-agent-ids', type=str, default='',
help='set the list of agents (specified by its id) that is controlled by human, example: 1,2,4')
args = parser.parse_args()
args.log_dir = '../results/'
if (args.mode in ['vis_train']) or ('eval' in args.mode):
print('# WARNING: set num_processes to 1 for eval purpose')
args.num_processes = 1
args.num_mini_batch = 1
def add_to_log_dir(key_, value_):
args.log_dir = '{}__{}-{}'.format(
args.log_dir,
key_,
value_,
)
'''general'''
add_to_log_dir('en', args.env_name)
add_to_log_dir('ot', args.obs_type)
'''brain'''
add_to_log_dir('nfs', args.num_frame_stack)
add_to_log_dir('rb', args.recurrent_brain)
add_to_log_dir('no', args.normalize_obs)
add_to_log_dir('bn', args.batch_normalize)
add_to_log_dir('nf', args.normalize_field)
add_to_log_dir('nk', args.normalize_kernal)
add_to_log_dir('ncc', args.normalize_cross_coefficient)
add_to_log_dir('gn', args.geographical_net)
'''trainer'''
add_to_log_dir('ti', args.trainer_id)
'''multi-agent settings'''
add_to_log_dir('pn', args.population_number)
add_to_log_dir('rpap', args.reload_playing_agents_principle)
add_to_log_dir('pad', args.playing_agents_deterministic)
'''general'''
add_to_log_dir('a', args.aux)
'''generated args'''
if args.obs_type in ['visual']:
args.use_visual = True
elif args.obs_type in ['ram']:
args.use_visual = False
else:
raise Exception('# ERROR: obs_type is not supported')
if args.mode in ['vis_train']:
args.is_envs_train_mode = False
else:
args.is_envs_train_mode = True
if 'NoFrameskip' in args.env_name:
args.game_class = 'Atari'
else:
args.game_class = 'Arena'
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.device = torch.device("cuda:0" if args.cuda else "cpu")
args.num_updates = int(
args.num_env_steps) // args.num_steps // args.num_processes
args.batch_size = args.num_processes * args.num_steps
args.mini_batch_size = args.batch_size // args.num_mini_batch
if args.trainer_id in ['ppo']:
args.use_clipped_value_loss = True
args.human_controled_agent_ids = args.human_controled_agent_ids.split(',')
_human_controled_agent_ids = []
for id in args.human_controled_agent_ids:
try:
_human_controled_agent_ids += [
int(id)
]
except Exception as e:
pass
args.human_controled_agent_ids = _human_controled_agent_ids
args.num_human_in_loop = len(args.human_controled_agent_ids)
args.is_human_in_loop = (args.num_human_in_loop > 0)
if args.is_human_in_loop:
input('# WARNING: human in loop, controling agent of id: {}'.format(
args.human_controled_agent_ids
))
args.is_shuffle_agents = False
# check configurations
if args.num_processes > 1:
input('# WARNING: only process 0 is controlled by human')
if (args.game_class in ['Arena']) and (args.is_envs_train_mode in [True]):
input('# WARNING: Arena env is running in train mode (faster and smaller), could be unsuitable for human in loop')
if args.num_human_in_loop > 1:
input('# WARNING: for now, only support one human in loop')
# init for human in loop
import pygame
pygame.init()
screen = pygame.display.set_mode((200, 150))
pygame.display.set_caption('Control Window')
else:
args.is_shuffle_agents = True
'''check args'''
assert args.trainer_id in ['a2c', 'ppo', 'acktr']
if args.recurrent_brain:
assert args.trainer_id in ['a2c', 'ppo'], \
'Recurrent policy is not implemented for ACKTR'
assert args.batch_size >= args.num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(args.num_processes, args.num_steps, args.num_processes * args.num_steps, args.num_mini_batch))
if args.recurrent_brain:
assert args.num_processes >= args.num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(args.num_processes, args.num_mini_batch))
'''prepare torch'''
torch.set_num_threads(1)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
return args
| 48.476821
| 126
| 0.616189
| 1,813
| 14,640
| 4.804744
| 0.199117
| 0.049592
| 0.093675
| 0.020204
| 0.291011
| 0.218804
| 0.173688
| 0.106991
| 0.07083
| 0.061417
| 0
| 0.010876
| 0.252596
| 14,640
| 301
| 127
| 48.637874
| 0.785231
| 0.002937
| 0
| 0.045267
| 0
| 0.00823
| 0.300125
| 0.02915
| 0
| 0
| 0
| 0
| 0.016461
| 1
| 0.016461
| false
| 0.004115
| 0.020576
| 0
| 0.045267
| 0.024691
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd5c3b4cdcb7e58a2c1873f564ec41c534d2da13
| 687
|
py
|
Python
|
khtube/download_ffmpeg.py
|
KodersHub/khtube
|
b1a8f96b7ff27cbb7eae615e8aee7d27260f80e8
|
[
"MIT"
] | 1
|
2021-08-09T14:01:12.000Z
|
2021-08-09T14:01:12.000Z
|
khtube/download_ffmpeg.py
|
KodersHub/khtube
|
b1a8f96b7ff27cbb7eae615e8aee7d27260f80e8
|
[
"MIT"
] | null | null | null |
khtube/download_ffmpeg.py
|
KodersHub/khtube
|
b1a8f96b7ff27cbb7eae615e8aee7d27260f80e8
|
[
"MIT"
] | null | null | null |
from google_drive_downloader import GoogleDriveDownloader as gdd
import sys
import os
import requests
def get_platform():
platforms = {
'linux1' : 'Linux',
'linux2' : 'Linux',
'darwin' : 'OS X',
'win32' : 'Windows'
}
if sys.platform not in platforms:
return sys.platform
return platforms[sys.platform]
platform = get_platform()
if platform == "linux":
print("Nothing needs to install")
else:
print("Installing ffmpeg")
gdd.download_file_from_google_drive(file_id='1Q5zbaXonPEUNQmclp1WMIVVodnUuJdKo',
dest_path='./ffmpeg.exe',
unzip=False)
| 25.444444
| 84
| 0.604076
| 70
| 687
| 5.785714
| 0.614286
| 0.081481
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014523
| 0.298399
| 687
| 27
| 85
| 25.444444
| 0.825726
| 0
| 0
| 0
| 0
| 0
| 0.196221
| 0.047965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.181818
| 0
| 0.318182
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd5ce8afa891dc4561f13cf8c918df7e99c18b1f
| 1,231
|
py
|
Python
|
climbing (1).py
|
VamsiKrishna1211/Hacker_rank_solutions
|
a683a36fcc2f011c120eb4d52aa08468deccc820
|
[
"Apache-2.0"
] | null | null | null |
climbing (1).py
|
VamsiKrishna1211/Hacker_rank_solutions
|
a683a36fcc2f011c120eb4d52aa08468deccc820
|
[
"Apache-2.0"
] | null | null | null |
climbing (1).py
|
VamsiKrishna1211/Hacker_rank_solutions
|
a683a36fcc2f011c120eb4d52aa08468deccc820
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the climbingLeaderboard function below.
def climbingLeaderboard(scores, alice):
li=[]
lis=[0 for i in range(len(scores))]
lis[0]=1
for i in range(1,len(scores)):
#print(i)
if scores[i]<scores[i-1]:
lis[i]=lis[i-1]+1
else:
lis[i]=lis[i-1]
#print(lis)
num=len(scores)-1
for i in range(len(alice)):
lis.append(lis[len(lis)-1]+1)
scores.append(alice[i])
for k in range(num,-1,-1):
if scores[len(scores)-1]>=scores[k]:
lis[len(lis)-1]=lis[k]
else:
break;
num=k+1
li.append(lis[len(lis)-1])
scores.pop(len(scores)-1)
lis.pop(len(lis)-1)
return li
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
scores_count = int(input())
scores = list(map(int, input().rstrip().split()))
alice_count = int(input())
alice = list(map(int, input().rstrip().split()))
result = climbingLeaderboard(scores, alice)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 20.864407
| 53
| 0.541024
| 174
| 1,231
| 3.764368
| 0.316092
| 0.068702
| 0.042748
| 0.050382
| 0.218321
| 0.079389
| 0
| 0
| 0
| 0
| 0
| 0.02291
| 0.29082
| 1,231
| 58
| 54
| 21.224138
| 0.727377
| 0.064988
| 0
| 0.052632
| 0
| 0
| 0.020924
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.131579
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd5d2da4c7eb58adfbaff7779a18bcc9d814e736
| 25,661
|
py
|
Python
|
game_manager/machine_learning/block_controller_train.py
|
EndoNrak/tetris
|
0ce4863348d644b401c53e6c9a50cdc6f7430ed1
|
[
"MIT"
] | 1
|
2022-01-29T15:23:15.000Z
|
2022-01-29T15:23:15.000Z
|
game_manager/machine_learning/block_controller_train.py
|
EndoNrak/tetris
|
0ce4863348d644b401c53e6c9a50cdc6f7430ed1
|
[
"MIT"
] | null | null | null |
game_manager/machine_learning/block_controller_train.py
|
EndoNrak/tetris
|
0ce4863348d644b401c53e6c9a50cdc6f7430ed1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from datetime import datetime
import pprint
import random
import copy
import torch
import torch.nn as nn
from model.deepqnet import DeepQNetwork,DeepQNetwork_v2
import omegaconf
from hydra import compose, initialize
import os
from tensorboardX import SummaryWriter
from collections import deque
from random import random, sample,randint
import numpy as np
import subprocess
class Block_Controller(object):
# init parameter
board_backboard = 0
board_data_width = 0
board_data_height = 0
ShapeNone_index = 0
CurrentShape_class = 0
NextShape_class = 0
def __init__(self,load_weight=None):
# init parameter
self.mode = None
# train
self.init_train_parameter_flag = False
# predict
self.init_predict_parameter_flag = False
def set_parameter(self,weight=None):
cfg = self.yaml_read()
os.makedirs(cfg.common.dir,exist_ok=True)
self.saved_path = cfg.common.dir + "/" + cfg.common.weight_path
os.makedirs(self.saved_path ,exist_ok=True)
subprocess.run("cp config/default.yaml %s/"%(cfg.common.dir), shell=True)
self.writer = SummaryWriter(cfg.common.dir+"/"+cfg.common.log_path)
self.log = cfg.common.dir+"/log.txt"
self.log_score = cfg.common.dir+"/score.txt"
self.log_reward = cfg.common.dir+"/reward.txt"
self.state_dim = cfg.state.dim
with open(self.log,"w") as f:
print("start...", file=f)
with open(self.log_score,"w") as f:
print(0, file=f)
with open(self.log_reward,"w") as f:
print(0, file=f)
#=====Set tetris parameter=====
self.height = cfg.tetris.board_height
self.width = cfg.tetris.board_width
self.max_tetrominoes = cfg.tetris.max_tetrominoes
#=====load Deep Q Network=====
print("model name: %s"%(cfg.model.name))
if cfg.model.name=="DQN":
self.model = DeepQNetwork(self.state_dim)
self.initial_state = torch.FloatTensor([0 for i in range(self.state_dim)])
self.get_next_func = self.get_next_states
self.reward_func = self.step
elif cfg.model.name=="DQNv2":
self.model = DeepQNetwork_v2()
self.initial_state = torch.FloatTensor([[[0 for i in range(10)] for j in range(22)]])
self.get_next_func = self.get_next_states_v2
self.reward_func = self.step_v2
self.reward_weight = cfg.train.reward_weight
self.load_weight = cfg.common.load_weight
self.double_dqn = cfg.train.double_dqn
self.target_net = cfg.train.target_net
if self.double_dqn:
self.target_net = True
if self.target_net:
print("set target network...")
self.target_model = copy.deepcopy(self.model)
self.target_copy_intarval = cfg.train.target_copy_intarval
if self.mode=="predict":
if not weight==None:
print("load ",weight)
self.model = torch.load(weight)
self.model.eval()
else:
if not os.path.exists(self.load_weight):
print("%s is not existed!!"%(self.load_weight))
exit()
#self.model.load_state_dict(torch.load(self.load_weight))
self.model = torch.load(self.load_weight)
self.model.eval()
if torch.cuda.is_available():
self.model.cuda()
#=====Set hyper parameter=====
self.batch_size = cfg.train.batch_size
self.lr = cfg.train.lr
self.replay_memory_size = cfg.train.replay_memory_size
self.replay_memory = deque(maxlen=self.replay_memory_size)
self.num_decay_epochs = cfg.train.num_decay_epochs
self.num_epochs = cfg.train.num_epoch
self.initial_epsilon = cfg.train.initial_epsilon
self.final_epsilon = cfg.train.final_epsilon
self.save_interval = cfg.train.save_interval
#=====Set loss function and optimizer=====
if cfg.train.optimizer=="Adam" or cfg.train.optimizer=="ADAM":
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
self.scheduler = None
else:
self.momentum =cfg.train.lr_momentum
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr, momentum=self.momentum)
self.lr_step_size = cfg.train.lr_step_size
self.lr_gamma = cfg.train.lr_gamma
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.lr_step_size , gamma=self.lr_gamma)
self.criterion = nn.MSELoss()
#=====Initialize parameter=====
self.epoch = 0
self.score = 0
self.max_score = -99999
self.epoch_reward = 0
self.cleared_lines = 0
self.iter = 0
self.state = self.initial_state
self.tetrominoes = 0
self.gamma = cfg.train.gamma
self.reward_clipping = cfg.train.reward_clipping
self.score_list = cfg.tetris.score_list
self.reward_list = cfg.train.reward_list
self.penalty = self.reward_list[5]
if self.reward_clipping:
self.norm_num =max(max(self.reward_list),abs(self.penalty))
self.reward_list =[r/self.norm_num for r in self.reward_list]
self.penalty /= self.norm_num
self.penalty = min(cfg.train.max_penalty,self.penalty)
#=====Prioritized Experience Replay=====
self.prioritized_replay = cfg.train.prioritized_replay
if self.prioritized_replay:
from machine_learning.qlearning import PRIORITIZED_EXPERIENCE_REPLAY as PER
self.PER = PER(self.replay_memory_size,gamma=self.gamma)
#更新
def update(self):
if self.mode=="train":
self.score += self.score_list[5]
self.replay_memory[-1][1] += self.penalty
self.replay_memory[-1][3] = True #store False to done lists.
self.epoch_reward += self.penalty
if len(self.replay_memory) < self.replay_memory_size / 10:
print("================pass================")
print("iter: {} ,meory: {}/{} , score: {}, clear line: {}, block: {} ".format(self.iter,
len(self.replay_memory),self.replay_memory_size / 10,self.score,self.cleared_lines
,self.tetrominoes ))
else:
print("================update================")
self.epoch += 1
if self.prioritized_replay:
batch,replay_batch_index = self.PER.sampling(self.replay_memory,self.batch_size)
else:
batch = sample(self.replay_memory, min(len(self.replay_memory),self.batch_size))
state_batch, reward_batch, next_state_batch, done_batch = zip(*batch)
state_batch = torch.stack(tuple(state for state in state_batch))
reward_batch = torch.from_numpy(np.array(reward_batch, dtype=np.float32)[:, None])
next_state_batch = torch.stack(tuple(state for state in next_state_batch))
done_batch = torch.from_numpy(np.array(done_batch)[:, None])
#max_next_state_batch = torch.stack(tuple(state for state in max_next_state_batch))
q_values = self.model(state_batch)
if self.target_net:
if self.epoch %self.target_copy_intarval==0 and self.epoch>0:
print("target_net update...")
self.target_model = torch.load(self.max_weight)
#self.target_model = copy.copy(self.model)
#self.max_score = -99999
self.target_model.eval()
#======predict Q(S_t+1 max_a Q(s_(t+1),a))======
with torch.no_grad():
next_prediction_batch = self.target_model(next_state_batch)
else:
self.model.eval()
with torch.no_grad():
next_prediction_batch = self.model(next_state_batch)
self.model.train()
y_batch = torch.cat(
tuple(reward if done[0] else reward + self.gamma * prediction for done ,reward, prediction in
zip(done_batch,reward_batch, next_prediction_batch)))[:, None]
self.optimizer.zero_grad()
if self.prioritized_replay:
loss_weights = self.PER.update_priority(replay_batch_index,reward_batch,q_values,next_prediction_batch)
#print(loss_weights *nn.functional.mse_loss(q_values, y_batch))
loss = (loss_weights *self.criterion(q_values, y_batch)).mean()
#loss = self.criterion(q_values, y_batch)
loss.backward()
else:
loss = self.criterion(q_values, y_batch)
loss.backward()
self.optimizer.step()
if self.scheduler!=None:
self.scheduler.step()
log = "Epoch: {} / {}, Score: {}, block: {}, Reward: {:.1f} Cleared lines: {}".format(
self.epoch,
self.num_epochs,
self.score,
self.tetrominoes,
self.epoch_reward,
self.cleared_lines
)
print(log)
with open(self.log,"a") as f:
print(log, file=f)
with open(self.log_score,"a") as f:
print(self.score, file=f)
with open(self.log_reward,"a") as f:
print(self.epoch_reward, file=f)
self.writer.add_scalar('Train/Score', self.score, self.epoch - 1)
self.writer.add_scalar('Train/Reward', self.epoch_reward, self.epoch - 1)
self.writer.add_scalar('Train/block', self.tetrominoes, self.epoch - 1)
self.writer.add_scalar('Train/clear lines', self.cleared_lines, self.epoch - 1)
if self.epoch > self.num_epochs:
with open(self.log,"a") as f:
print("finish..", file=f)
exit()
else:
self.epoch += 1
log = "Epoch: {} / {}, Score: {}, block: {}, Reward: {:.1f} Cleared lines: {}".format(
self.epoch,
self.num_epochs,
self.score,
self.tetrominoes,
self.epoch_reward,
self.cleared_lines
)
pass
#パラメータ読み込み
def yaml_read(self):
initialize(config_path="../../config", job_name="tetris")
cfg = compose(config_name="default")
return cfg
#累積値の初期化
def reset_state(self):
if self.score > self.max_score:
torch.save(self.model, "{}/tetris_epoch_{}_score{}".format(self.saved_path,self.epoch,self.score))
self.max_score = self.score
self.max_weight = "{}/tetris_epoch_{}_score{}".format(self.saved_path,self.epoch,self.score)
self.state = self.initial_state
self.score = 0
self.cleared_lines = 0
self.epoch_reward = 0
self.tetrominoes = 0
#削除される列を数える
def check_cleared_rows(self,board):
board_new = np.copy(board)
lines = 0
empty_line = np.array([0 for i in range(self.width)])
for y in range(self.height - 1, -1, -1):
blockCount = np.sum(board[y])
if blockCount == self.width:
lines += 1
board_new = np.delete(board_new,y,0)
board_new = np.vstack([empty_line,board_new ])
return lines,board_new
#各列毎の高さの差を計算
def get_bumpiness_and_height(self,board):
mask = board != 0
invert_heights = np.where(mask.any(axis=0), np.argmax(mask, axis=0), self.height)
heights = self.height - invert_heights
total_height = np.sum(heights)
currs = heights[:-1]
nexts = heights[1:]
diffs = np.abs(currs - nexts)
total_bumpiness = np.sum(diffs)
return total_bumpiness, total_height
#各列の穴の個数を数える
def get_holes(self, board):
num_holes = 0
for i in range(self.width):
col = board[:,i]
row = 0
while row < self.height and col[row] == 0:
row += 1
num_holes += len([x for x in col[row + 1:] if x == 0])
return num_holes
#
def get_state_properties(self, board):
lines_cleared, board = self.check_cleared_rows(board)
holes = self.get_holes(board)
bumpiness, height = self.get_bumpiness_and_height(board)
return torch.FloatTensor([lines_cleared, holes, bumpiness, height])
def get_state_properties_v2(self, board):
lines_cleared, board = self.check_cleared_rows(board)
holes = self.get_holes(board)
bumpiness, height = self.get_bumpiness_and_height(board)
max_row = self.get_max_height(board)
return torch.FloatTensor([lines_cleared, holes, bumpiness, height,max_row])
def get_max_height(self, board):
sum_ = np.sum(board,axis=1)
row = 0
while row < self.height and sum_[row] ==0:
row += 1
return self.height - row
#次の状態を取得(2次元用)
def get_next_states_v2(self,curr_backboard,piece_id,CurrentShape_class):
states = {}
if piece_id == 5: # O piece
num_rotations = 1
elif piece_id == 1 or piece_id == 6 or piece_id == 7:
num_rotations = 2
else:
num_rotations = 4
for direction0 in range(num_rotations):
x0Min, x0Max = self.getSearchXRange(CurrentShape_class, direction0)
for x0 in range(x0Min, x0Max):
# get board data, as if dropdown block
board = self.getBoard(curr_backboard, CurrentShape_class, direction0, x0)
reshape_backboard = self.get_reshape_backboard(board)
reshape_backboard = torch.from_numpy(reshape_backboard[np.newaxis,:,:]).float()
states[(x0, direction0)] = reshape_backboard
return states
#次の状態を取得(1次元用)
def get_next_states(self,curr_backboard,piece_id,CurrentShape_class):
states = {}
if piece_id == 5: # O piece
num_rotations = 1
elif piece_id == 1 or piece_id == 6 or piece_id == 7:
num_rotations = 2
else:
num_rotations = 4
for direction0 in range(num_rotations):
x0Min, x0Max = self.getSearchXRange(CurrentShape_class, direction0)
for x0 in range(x0Min, x0Max):
# get board data, as if dropdown block
board = self.getBoard(curr_backboard, CurrentShape_class, direction0, x0)
board = self.get_reshape_backboard(board)
states[(x0, direction0)] = self.get_state_properties(board)
return states
#ボードを2次元化
def get_reshape_backboard(self,board):
board = np.array(board)
reshape_board = board.reshape(self.height,self.width)
reshape_board = np.where(reshape_board>0,1,0)
return reshape_board
#報酬を計算(2次元用)
def step_v2(self, curr_backboard,action,curr_shape_class):
x0, direction0 = action
board = self.getBoard(curr_backboard, curr_shape_class, direction0, x0)
board = self.get_reshape_backboard(board)
bampiness,height = self.get_bumpiness_and_height(board)
max_height = self.get_max_height(board)
hole_num = self.get_holes(board)
lines_cleared, board = self.check_cleared_rows(board)
reward = self.reward_list[lines_cleared]
reward -= self.reward_weight[0] *bampiness
reward -= self.reward_weight[1] * max(0,max_height-(self.height/2))
reward -= self.reward_weight[2] * hole_num
self.epoch_reward += reward
self.score += self.score_list[lines_cleared]
self.cleared_lines += lines_cleared
self.tetrominoes += 1
return reward
#報酬を計算(1次元用)
def step(self, curr_backboard,action,curr_shape_class):
x0, direction0 = action
board = self.getBoard(curr_backboard, curr_shape_class, direction0, x0)
board = self.get_reshape_backboard(board)
lines_cleared, board = self.check_cleared_rows(board)
reward = self.reward_list[lines_cleared]
self.epoch_reward += reward
self.score += self.score_list[lines_cleared]
self.cleared_lines += lines_cleared
self.tetrominoes += 1
return reward
def GetNextMove(self, nextMove, GameStatus,weight=None):
t1 = datetime.now()
self.mode = GameStatus["judge_info"]["mode"]
if self.init_train_parameter_flag == False:
self.init_train_parameter_flag = True
self.set_parameter(weight=weight)
self.ind =GameStatus["block_info"]["currentShape"]["index"]
curr_backboard = GameStatus["field_info"]["backboard"]
# default board definition
self.board_data_width = GameStatus["field_info"]["width"]
self.board_data_height = GameStatus["field_info"]["height"]
curr_shape_class = GameStatus["block_info"]["currentShape"]["class"]
next_shape_class= GameStatus["block_info"]["nextShape"]["class"]
# next shape info
self.ShapeNone_index = GameStatus["debug_info"]["shape_info"]["shapeNone"]["index"]
curr_piece_id =GameStatus["block_info"]["currentShape"]["index"]
next_piece_id =GameStatus["block_info"]["nextShape"]["index"]
reshape_backboard = self.get_reshape_backboard(curr_backboard)
#self.state = reshape_backboard
next_steps =self.get_next_func(curr_backboard,curr_piece_id,curr_shape_class)
if self.mode == "train":
# init parameter
epsilon = self.final_epsilon + (max(self.num_decay_epochs - self.epoch, 0) * (
self.initial_epsilon - self.final_epsilon) / self.num_decay_epochs)
u = random()
random_action = u <= epsilon
next_actions, next_states = zip(*next_steps.items())
next_states = torch.stack(next_states)
if torch.cuda.is_available():
next_states = next_states.cuda()
self.model.train()
with torch.no_grad():
predictions = self.model(next_states)[:, 0]
if random_action:
index = randint(0, len(next_steps) - 1)
else:
index = torch.argmax(predictions).item()
next_state = next_states[index, :]
action = next_actions[index]
reward = self.reward_func(curr_backboard,action,curr_shape_class)
done = False #game over flag
#======predict max_a Q(s_(t+1),a)======
#if use double dqn, predicted by main model
if self.double_dqn:
next_backboard = self.getBoard(curr_backboard, curr_shape_class, action[1], action[0])
next2_steps =self.get_next_func(next_backboard,next_piece_id,next_shape_class)
next2_actions, next2_states = zip(*next2_steps.items())
next2_states = torch.stack(next2_states)
if torch.cuda.is_available():
next2_states = next2_states.cuda()
self.model.train()
with torch.no_grad():
next_predictions = self.model(next2_states)[:, 0]
next_index = torch.argmax(next_predictions).item()
next2_state = next2_states[next_index, :]
#if use target net, predicted by target model
elif self.target_net:
next_backboard = self.getBoard(curr_backboard, curr_shape_class, action[1], action[0])
next2_steps =self.get_next_func(next_backboard,next_piece_id,next_shape_class)
next2_actions, next2_states = zip(*next2_steps.items())
next2_states = torch.stack(next2_states)
if torch.cuda.is_available():
next2_states = next2_states.cuda()
self.target_model.train()
with torch.no_grad():
next_predictions = self.target_model(next2_states)[:, 0]
next_index = torch.argmax(next_predictions).item()
next2_state = next2_states[next_index, :]
#if not use target net,predicted by main model
else:
next_backboard = self.getBoard(curr_backboard, curr_shape_class, action[1], action[0])
next2_steps =self.get_next_func(next_backboard,next_piece_id,next_shape_class)
next2_actions, next2_states = zip(*next2_steps.items())
next2_states = torch.stack(next2_states)
if torch.cuda.is_available():
next2_states = next2_states.cuda()
self.model.train()
with torch.no_grad():
next_predictions = self.model(next2_states)[:, 0]
epsilon = self.final_epsilon + (max(self.num_decay_epochs - self.epoch, 0) * (
self.initial_epsilon - self.final_epsilon) / self.num_decay_epochs)
u = random()
random_action = u <= epsilon
if random_action:
next_index = randint(0, len(next2_steps) - 1)
else:
next_index = torch.argmax(next_predictions).item()
next2_state = next2_states[next_index, :]
#=======================================
self.replay_memory.append([next_state, reward, next2_state,done])
if self.prioritized_replay:
self.PER.store()
#self.replay_memory.append([self.state, reward, next_state,done])
nextMove["strategy"]["direction"] = action[1]
nextMove["strategy"]["x"] = action[0]
nextMove["strategy"]["y_operation"] = 1
nextMove["strategy"]["y_moveblocknum"] = 1
self.state = next_state
elif self.mode == "predict":
self.model.eval()
next_actions, next_states = zip(*next_steps.items())
next_states = torch.stack(next_states)
predictions = self.model(next_states)[:, 0]
index = torch.argmax(predictions).item()
action = next_actions[index]
nextMove["strategy"]["direction"] = action[1]
nextMove["strategy"]["x"] = action[0]
nextMove["strategy"]["y_operation"] = 1
nextMove["strategy"]["y_moveblocknum"] = 1
return nextMove
def getSearchXRange(self, Shape_class, direction):
#
# get x range from shape direction.
#
minX, maxX, _, _ = Shape_class.getBoundingOffsets(direction) # get shape x offsets[minX,maxX] as relative value.
xMin = -1 * minX
xMax = self.board_data_width - maxX
return xMin, xMax
def getShapeCoordArray(self, Shape_class, direction, x, y):
#
# get coordinate array by given shape.
#
coordArray = Shape_class.getCoords(direction, x, y) # get array from shape direction, x, y.
return coordArray
def getBoard(self, board_backboard, Shape_class, direction, x):
#
# get new board.
#
# copy backboard data to make new board.
# if not, original backboard data will be updated later.
board = copy.deepcopy(board_backboard)
_board = self.dropDown(board, Shape_class, direction, x)
return _board
def dropDown(self, board, Shape_class, direction, x):
#
# internal function of getBoard.
# -- drop down the shape on the board.
#
dy = self.board_data_height - 1
coordArray = self.getShapeCoordArray(Shape_class, direction, x, 0)
# update dy
for _x, _y in coordArray:
_yy = 0
while _yy + _y < self.board_data_height and (_yy + _y < 0 or board[(_y + _yy) * self.board_data_width + _x] == self.ShapeNone_index):
_yy += 1
_yy -= 1
if _yy < dy:
dy = _yy
# get new board
_board = self.dropDownWithDy(board, Shape_class, direction, x, dy)
return _board
def dropDownWithDy(self, board, Shape_class, direction, x, dy):
#
# internal function of dropDown.
#
_board = board
coordArray = self.getShapeCoordArray(Shape_class, direction, x, 0)
for _x, _y in coordArray:
_board[(_y + dy) * self.board_data_width + _x] = Shape_class.shape
return _board
BLOCK_CONTROLLER_TRAIN = Block_Controller()
| 41.929739
| 145
| 0.575465
| 2,986
| 25,661
| 4.723041
| 0.113195
| 0.018436
| 0.017018
| 0.011345
| 0.476849
| 0.403248
| 0.356165
| 0.334822
| 0.303765
| 0.280933
| 0
| 0.012581
| 0.318538
| 25,661
| 612
| 146
| 41.929739
| 0.793904
| 0.063287
| 0
| 0.403471
| 0
| 0
| 0.040344
| 0.005257
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047722
| false
| 0.004338
| 0.034707
| 0
| 0.136659
| 0.036876
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd5ec06ae412be00165dc082fa38f505f00c44d7
| 2,959
|
py
|
Python
|
qa/rpc-tests/checkpoint-load.py
|
ericramos1980/energi
|
aadc44f714f9d52433ab3595a9f33a61433c60c9
|
[
"MIT"
] | 2
|
2021-12-28T21:47:07.000Z
|
2022-02-09T21:04:29.000Z
|
qa/rpc-tests/checkpoint-load.py
|
reddragon34/energi
|
4cc6c426d9d4b6b9053912de9b2197eba071201e
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/checkpoint-load.py
|
reddragon34/energi
|
4cc6c426d9d4b6b9053912de9b2197eba071201e
|
[
"MIT"
] | 1
|
2019-10-07T19:17:55.000Z
|
2019-10-07T19:17:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Energi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
class CheckpointLoadTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
node_args = ["-keypool=10", "-debug=stake", "-debug=net",
"-addcheckpoint=10:abcdef01234456789", "-checkpoints=0"]
self.extra_args = [node_args, node_args, node_args]
self.node_args = node_args
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 1, 2)
self.is_network_split=False
def run_test(self):
self.sync_all()
logging.info("Generating initial blockchain")
self.nodes[0].generate(20)
self.sync_all()
assert_equal(self.nodes[0].getinfo()['blocks'], 20)
logging.info("Enabling checkpoints")
stop_nodes(self.nodes)
node_args = list(self.node_args)
node_args[-1] = "-checkpoints=1"
self.extra_args[0] = node_args;
self.setup_network()
sync_blocks(self.nodes[1:])
assert_equal(self.nodes[0].getinfo()['blocks'], 9)
assert_equal(self.nodes[1].getinfo()['blocks'], 20)
assert_equal(self.nodes[2].getinfo()['blocks'], 20)
logging.info("Adding more blocks")
self.nodes[1].generate(3)
sync_blocks(self.nodes[1:])
assert_equal(self.nodes[0].getinfo()['blocks'], 9)
assert_equal(self.nodes[1].getinfo()['blocks'], 23)
assert_equal(self.nodes[2].getinfo()['blocks'], 23)
logging.info("Adding more block on alt chain")
stop_nodes(self.nodes)
self.extra_args[0] = self.node_args
self.nodes = start_nodes(1, self.options.tmpdir, self.extra_args)
self.nodes[0].generate(30)
stop_nodes(self.nodes)
self.setup_network()
self.sync_all()
assert_equal(self.nodes[0].getinfo()['blocks'], 39)
assert_equal(self.nodes[1].getinfo()['blocks'], 39)
assert_equal(self.nodes[2].getinfo()['blocks'], 39)
logging.info("Restart to check no issues appear")
stop_nodes(self.nodes)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
self.sync_all()
assert_equal(self.nodes[0].getinfo()['blocks'], 39)
assert_equal(self.nodes[1].getinfo()['blocks'], 39)
assert_equal(self.nodes[2].getinfo()['blocks'], 39)
if __name__ == '__main__':
CheckpointLoadTest().main()
| 37.935897
| 86
| 0.639743
| 384
| 2,959
| 4.723958
| 0.270833
| 0.13892
| 0.107497
| 0.14333
| 0.502756
| 0.404079
| 0.377619
| 0.321389
| 0.321389
| 0.321389
| 0
| 0.034662
| 0.220007
| 2,959
| 77
| 87
| 38.428571
| 0.7513
| 0.06759
| 0
| 0.366667
| 0
| 0
| 0.113249
| 0.012704
| 0
| 0
| 0
| 0
| 0.216667
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.116667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd5ffb792de44849ba525e817187b550fe21e9d9
| 648
|
py
|
Python
|
python/setup.py
|
tcolgate/gracetests
|
552c8113b0554d49cf146e6d7cfd573c8b4cbf8f
|
[
"MIT"
] | 2
|
2019-07-30T16:50:20.000Z
|
2021-11-26T22:46:29.000Z
|
python/setup.py
|
tcolgate/gracetests
|
552c8113b0554d49cf146e6d7cfd573c8b4cbf8f
|
[
"MIT"
] | null | null | null |
python/setup.py
|
tcolgate/gracetests
|
552c8113b0554d49cf146e6d7cfd573c8b4cbf8f
|
[
"MIT"
] | 1
|
2019-07-30T16:50:54.000Z
|
2019-07-30T16:50:54.000Z
|
import os
from setuptools import find_packages, setup
DIR = os.path.dirname(os.path.abspath(__file__))
setup(
name='graceful',
version='1.2.0',
description='test of graceful shutdown',
url='https://github.com/qubitdigital/graceful/python',
author='Infra',
author_email='infra@qubit.com',
license='All rights reserved.',
packages=find_packages(),
install_requires=[
'sanic==0.7.0',
'ujson==1.35',
'python-dotenv==0.8.2',
'cchardet==2.1.1',
],
zip_safe=False,
entry_points={
'console_scripts': [
'graceful=graceful.server:main',
]
}
)
| 22.344828
| 58
| 0.603395
| 78
| 648
| 4.871795
| 0.692308
| 0.063158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.236111
| 648
| 28
| 59
| 23.142857
| 0.737374
| 0
| 0
| 0
| 0
| 0
| 0.350309
| 0.044753
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.08
| 0
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd61ede10dd7a8e91db98cff1eeb2bd9cfadde8d
| 637
|
py
|
Python
|
convert_assets.py
|
michaelgold/usdzconvert
|
f4e6e552db4e27a3e088649f19f6bd61977501c1
|
[
"MIT"
] | null | null | null |
convert_assets.py
|
michaelgold/usdzconvert
|
f4e6e552db4e27a3e088649f19f6bd61977501c1
|
[
"MIT"
] | null | null | null |
convert_assets.py
|
michaelgold/usdzconvert
|
f4e6e552db4e27a3e088649f19f6bd61977501c1
|
[
"MIT"
] | null | null | null |
import glob
import os
import subprocess
import shutil
source_file_list = glob.glob("../source/assets/*.glb")
for input_file_name in source_file_list:
base_file_name = os.path.split(input_file_name)[1]
output_file_name = "../dist/assets/{}.usdz".format(os.path.splitext(base_file_name)[0])
print(output_file_name)
subprocess.call("python run_usd.py usdzconvert/usdzconvert {} {}".format(input_file_name, output_file_name), shell=True)
for glb_file in source_file_list:
print(glb_file)
destination = "../dist/assets/{}".format(os.path.split(glb_file)[1])
shutil.move(glb_file, destination)
| 35.388889
| 125
| 0.726845
| 94
| 637
| 4.638298
| 0.37234
| 0.146789
| 0.09633
| 0.073395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005464
| 0.138148
| 637
| 17
| 126
| 37.470588
| 0.788707
| 0
| 0
| 0
| 0
| 0
| 0.174194
| 0.108065
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd64daf0644c28687a4705d4e8b356d44e031ab4
| 2,190
|
py
|
Python
|
tests/test_examples.py
|
timgates42/goless
|
3c8742fa0f94d0a365840aae404da4e8eaed9d71
|
[
"Apache-2.0"
] | 266
|
2015-01-03T04:18:48.000Z
|
2022-02-16T03:08:38.000Z
|
tests/test_examples.py
|
timgates42/goless
|
3c8742fa0f94d0a365840aae404da4e8eaed9d71
|
[
"Apache-2.0"
] | 19
|
2015-03-06T11:04:53.000Z
|
2021-06-09T15:08:57.000Z
|
tests/test_examples.py
|
timgates42/goless
|
3c8742fa0f94d0a365840aae404da4e8eaed9d71
|
[
"Apache-2.0"
] | 20
|
2015-01-03T03:45:08.000Z
|
2022-03-05T06:05:32.000Z
|
"""
Idiomatic Go examples converted to use goless.
"""
from __future__ import print_function
import time
from . import BaseTests
import goless
class Examples(BaseTests):
def test_select(self):
# https://gobyexample.com/select
c1 = goless.chan()
c2 = goless.chan()
def func1():
time.sleep(.1)
c1.send('one')
goless.go(func1)
def func2():
time.sleep(.2)
c2.send('two')
goless.go(func2)
# We don't print since we run this as a test.
callbacks = []
for i in range(2):
_, val = goless.select([goless.rcase(c1), goless.rcase(c2)])
callbacks.append(val)
self.assertEqual(callbacks, ['one', 'two'])
def test_range_over_channels(self):
# https://gobyexample.com/range-over-channels
queue = goless.chan(2)
queue.send('one')
queue.send('two')
queue.close()
elements = [elem for elem in queue]
self.assertEqual(elements, ['one', 'two'])
def test_worker_pool(self):
# https://gobyexample.com/worker-pools
jobs_done = []
# noinspection PyShadowingNames,PyShadowingBuiltins
def worker(id, jobs, results):
for j in jobs:
jobs_done.append('w %s j %s' % (id, j))
time.sleep(.01)
results.send(j * 2)
jobs = goless.chan(100)
results = goless.chan(100)
for w in range(1, 4):
goless.go(lambda: worker(w, jobs, results))
for j in range(1, 10):
jobs.send(j)
jobs.close()
for a in range(1, 10):
results.recv()
self.assertEqual(len(jobs_done), 9)
def test_case_switch(self):
chan = goless.chan()
cases = [goless.rcase(chan), goless.scase(chan, 1), goless.dcase()]
chosen, value = goless.select(cases)
if chosen is cases[0]:
print('Received %s' % value)
elif chosen is cases[1]:
assert value is None
print('Sent.')
else:
assert chosen is cases[2], chosen
print('Default...')
| 26.071429
| 75
| 0.541096
| 265
| 2,190
| 4.407547
| 0.354717
| 0.05137
| 0.05137
| 0.059075
| 0.02911
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024658
| 0.333333
| 2,190
| 83
| 76
| 26.385542
| 0.775342
| 0.115525
| 0
| 0
| 0
| 0
| 0.030649
| 0
| 0
| 0
| 0
| 0
| 0.087719
| 1
| 0.122807
| false
| 0
| 0.070175
| 0
| 0.210526
| 0.070175
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd67590d08d500fd8ab7568abbfffa79b1097a7f
| 3,211
|
py
|
Python
|
Utils/Messaging.py
|
philshams/FC_analysis
|
cabe2385d5061d206a21b230605bfce9e39ec7f2
|
[
"MIT"
] | null | null | null |
Utils/Messaging.py
|
philshams/FC_analysis
|
cabe2385d5061d206a21b230605bfce9e39ec7f2
|
[
"MIT"
] | null | null | null |
Utils/Messaging.py
|
philshams/FC_analysis
|
cabe2385d5061d206a21b230605bfce9e39ec7f2
|
[
"MIT"
] | null | null | null |
from slackclient import SlackClient
import requests
import os
from Config import slack_env_var_token, slack_username
"""
These functions take care of sending slack messages and emails
"""
def slack_chat_messenger(message):
# NEVER LEAVE THE TOKEN IN YOUR CODE ON GITHUB, EVERYBODY WOULD HAVE ACCESS TO THE CHANNEL!
slack_token = os.environ.get(slack_env_var_token)
slack_client = SlackClient(slack_token)
api_call = slack_client.api_call("im.list")
user_slack_id = slack_username
# You should either know the user_slack_id to send a direct msg to the user
if api_call.get('ok'):
for im in api_call.get("ims"):
if im.get("user") == user_slack_id:
im_channel = im.get("id")
slack_client.api_call("chat.postMessage", channel=im_channel, text=message, as_user=False)
def slack_chat_attachments(filepath):
slack_chat_messenger('Trying to send you {}'.format(filepath))
slack_token = os.environ.get(slack_env_var_token)
my_file = {
'file': (filepath+'.png', open(filepath+'.png', 'rb'), 'image/png', {
'Expires': '0'
})
}
payload = {
"filename":filepath+'.png',
"token":slack_token,
"channels": ['@Fede'],
"media": my_file
}
r = requests.post("https://slack.com/api/files.upload", params=payload, files=my_file)
print(r.text)
def upload_file( filepath ):
"""Upload file to channel
Note:
URLs can be constructed from:
https://api.slack.com/methods/files.upload/test
"""
slack_chat_messenger('Trying to send you {}'.format(filepath))
slack_token = os.environ.get(slack_env_var_token)
data = {}
data['token'] = slack_token
data['file'] = filepath
data['filename'] = filepath
data['channels'] = [slack_username]
data['display_as_bot'] = True
filepath = data['file']
files = {
'content': (filepath, open(filepath, 'rb'), 'image/png', {
'Expires': '0'
})
}
data['media'] = files
response = requests.post(
url='https://slack.com/api/files.upload',
data=data,
headers={'Accept': 'application/json'},
files=files)
print(response.text)
def send_email_attachments(filename, filepath):
import smtplib
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# Create the container (outer) email message.
msg = MIMEMultipart()
msg['Subject'] = filename
# me == the sender's email address
# family = the list of all recipients' email addresses
msg['From'] = 'federicopython@gmail.com'
msg['To'] = 'federicoclaudi@gmail.com'
body = "Analysis results"
msg.attach(MIMEText(body, 'plain'))
with open(filepath+'.png', 'rb') as fp:
img = MIMEImage(fp.read())
msg.attach(img)
# Send the email via our own SMTP server.
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login('federicopython@gmail.com', '')
server.sendmail('federicopython@gmail.com', 'federicoclaudi@gmail.com', msg.as_string())
server.quit()
| 28.669643
| 106
| 0.646528
| 411
| 3,211
| 4.917275
| 0.36253
| 0.029688
| 0.021771
| 0.031667
| 0.160317
| 0.129639
| 0.102919
| 0.102919
| 0.102919
| 0.084117
| 0
| 0.002003
| 0.222672
| 3,211
| 111
| 107
| 28.927928
| 0.807692
| 0.140143
| 0
| 0.125
| 0
| 0
| 0.181887
| 0.045096
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.111111
| 0
| 0.166667
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd67c81828221987d83cf924bc48aff8f98affa6
| 3,364
|
py
|
Python
|
fluid.py
|
fomightez/stable-fluids
|
a7bdbb0960c746022a1dfc216dbfe928ee98947b
|
[
"Unlicense"
] | 1
|
2020-04-20T12:14:59.000Z
|
2020-04-20T12:14:59.000Z
|
fluid.py
|
fomightez/stable-fluids
|
a7bdbb0960c746022a1dfc216dbfe928ee98947b
|
[
"Unlicense"
] | null | null | null |
fluid.py
|
fomightez/stable-fluids
|
a7bdbb0960c746022a1dfc216dbfe928ee98947b
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import scipy.sparse as sp
from scipy.ndimage import map_coordinates
from scipy.sparse.linalg import factorized
import operators as ops
class Fluid:
def __init__(self, shape, viscosity, quantities):
self.shape = shape
# Defining these here keeps the code somewhat more readable vs. computing them every time they're needed.
self.size = np.product(shape)
self.dimensions = len(shape)
# Variable viscosity, both in time and in space, is easy to set up; but it conflicts with the use of
# SciPy's factorized function because the diffusion matrix must be recalculated every frame.
# In order to keep the simulation speedy I use fixed viscosity.
self.viscosity = viscosity
# By dynamically creating advected-diffused quantities as needed prototyping becomes much easier.
self.quantities = {}
for q in quantities:
self.quantities[q] = np.zeros(self.size)
self.velocity_field = np.zeros((self.size, self.dimensions))
# The reshaping here corresponds to a partial flattening so that self.indices
# has the same shape as self.velocity_field.
# This makes calculating the advection map as simple as a single vectorized subtraction each frame.
self.indices = np.dstack(np.indices(self.shape)).reshape(self.size, self.dimensions)
self.gradient = ops.matrices(shape, ops.differences(1, (1,) * self.dimensions), False)
# Both viscosity and pressure equations are just Poisson equations similar to the steady state heat equation.
laplacian = ops.matrices(shape, ops.differences(1, (2,) * self.dimensions), True)
self.pressure_solver = factorized(laplacian)
# Making sure I use the sparse version of the identity function here so I don't cast to a dense matrix.
self.viscosity_solver = factorized(sp.identity(self.size) - laplacian * viscosity)
def advect_diffuse(self):
# Advection is computed backwards in time as described in Jos Stam's Stable Fluids whitepaper.
advection_map = np.moveaxis(self.indices - self.velocity_field, -1, 0)
def kernel(field):
# Credit to Philip Zucker for pointing out the aptness of map_coordinates here.
# Initially I was using SciPy's griddata function.
# While both of these functions do essentially the same thing, griddata is much slower.
advected = map_coordinates(field.reshape(self.shape), advection_map, order=2).flatten()
return self.viscosity_solver(advected) if self.viscosity > 0 else advected
# Apply viscosity and advection to each axis of the velocity field and each user-defined quantity.
for d in range(self.dimensions):
self.velocity_field[..., d] = kernel(self.velocity_field[..., d])
for k, q in self.quantities.items():
self.quantities[k] = kernel(q)
def project(self):
# Pressure is calculated from divergence which is in turn calculated from the gradient of the velocity field.
divergence = sum(self.gradient[d].dot(self.velocity_field[..., d]) for d in range(self.dimensions))
pressure = self.pressure_solver(divergence)
for d in range(self.dimensions):
self.velocity_field[..., d] -= self.gradient[d].dot(pressure)
| 51.753846
| 117
| 0.69352
| 456
| 3,364
| 5.070175
| 0.407895
| 0.050606
| 0.051471
| 0.031142
| 0.108131
| 0.074827
| 0.037197
| 0.037197
| 0.037197
| 0.037197
| 0
| 0.003082
| 0.2283
| 3,364
| 64
| 118
| 52.5625
| 0.887519
| 0.412901
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.147059
| 0
| 0.323529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd69e272fd1cf6715ec8277d234fe3f1835d95b2
| 879
|
py
|
Python
|
setup.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
setup.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
setup.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
PROJECT_URLS = {
'Bug Tracker': 'https://github.com/ngocjr7/geneticpython/issues',
'Documentation': 'https://github.com/ngocjr7/geneticpython/blob/master/README.md',
'Source Code': 'https://github.com/ngocjr7/geneticpython'
}
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
setup(name='geneticpython',
description='A simple and friendly Python framework for genetic-based algorithms',
author='Ngoc Bui',
long_description=long_description,
long_description_content_type="text/markdown",
author_email='ngocjr7@gmail.com',
project_urls=PROJECT_URLS,
version='0.0.2',
packages=find_packages(),
install_requires=install_requires,
python_requires='>=3.6')
| 33.807692
| 88
| 0.703072
| 108
| 879
| 5.574074
| 0.592593
| 0.099668
| 0.069767
| 0.104651
| 0.169435
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012129
| 0.155859
| 879
| 25
| 89
| 35.16
| 0.799191
| 0
| 0
| 0
| 0
| 0
| 0.386803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.047619
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd767e6d50fc90c7d830096cddd6903575b2142e
| 1,290
|
py
|
Python
|
server_common/helpers.py
|
GustavLero/EPICS-inst_servers
|
4bcdd6a80f1d9e074de3f0f7c66968d506981988
|
[
"BSD-3-Clause"
] | null | null | null |
server_common/helpers.py
|
GustavLero/EPICS-inst_servers
|
4bcdd6a80f1d9e074de3f0f7c66968d506981988
|
[
"BSD-3-Clause"
] | null | null | null |
server_common/helpers.py
|
GustavLero/EPICS-inst_servers
|
4bcdd6a80f1d9e074de3f0f7c66968d506981988
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import os
import sys
from server_common.ioc_data_source import IocDataSource
from server_common.mysql_abstraction_layer import SQLAbstraction
from server_common.utilities import print_and_log, SEVERITY
def register_ioc_start(ioc_name, pv_database=None, prefix=None):
"""
A helper function to register the start of an ioc.
Args:
ioc_name: name of the ioc to start
pv_database: doctionary of pvs in the iov
prefix: prefix of pvs in this ioc
"""
try:
exepath = sys.argv[0]
if pv_database is None:
pv_database = {}
if prefix is None:
prefix = "none"
ioc_data_source = IocDataSource(SQLAbstraction("iocdb", "iocdb", "$iocdb"))
ioc_data_source.insert_ioc_start(ioc_name, os.getpid(), exepath, pv_database, prefix)
except Exception as e:
print_and_log("Error registering ioc start: {}: {}".format(e.__class__.__name__, e), SEVERITY.MAJOR)
def get_macro_values():
"""
Parse macro environment JSON into dict. To make this work use the icpconfigGetMacros program.
Returns: Macro Key:Value pairs as dict
"""
macros = json.loads(os.environ.get("REFL_MACROS", ""))
macros = {key: value for (key, value) in macros.items()}
return macros
| 32.25
| 108
| 0.686047
| 178
| 1,290
| 4.758427
| 0.460674
| 0.059032
| 0.056671
| 0.035419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001001
| 0.225581
| 1,290
| 39
| 109
| 33.076923
| 0.846847
| 0.242636
| 0
| 0
| 0
| 0
| 0.071274
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.285714
| 0
| 0.428571
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd782114838d338a027967eb958ee0dd0d6070b0
| 12,799
|
py
|
Python
|
rman_ui/rman_ui_txmanager.py
|
ian-hsieh/RenderManForBlender
|
c827f029f4cbbd1fcc71ed8d3694fc5ac58cc468
|
[
"MIT"
] | 12
|
2019-05-03T21:58:15.000Z
|
2022-02-24T07:02:21.000Z
|
rman_ui/rman_ui_txmanager.py
|
ian-hsieh/RenderManForBlender
|
c827f029f4cbbd1fcc71ed8d3694fc5ac58cc468
|
[
"MIT"
] | 4
|
2019-03-07T18:20:16.000Z
|
2020-09-24T21:53:15.000Z
|
rman_ui/rman_ui_txmanager.py
|
ian-hsieh/RenderManForBlender
|
c827f029f4cbbd1fcc71ed8d3694fc5ac58cc468
|
[
"MIT"
] | 3
|
2019-05-25T01:17:09.000Z
|
2019-09-13T14:43:12.000Z
|
import bpy
from bpy.props import StringProperty, IntProperty, CollectionProperty, EnumProperty, BoolProperty
from bpy.types import PropertyGroup, UIList, Operator, Panel
from bpy_extras.io_utils import ImportHelper
from .rman_ui_base import _RManPanelHeader
from ..txmanager3 import txparams
from ..rman_utils import texture_utils
from .. import txmanager3 as txmngr3
import os
import uuid
class TxFileItem(PropertyGroup):
"""UIList item representing a TxFile"""
name: StringProperty(
name="Name",
description="Image name",
default="")
tooltip: StringProperty(
name="tooltip",
description="Tool Tip",
default="")
nodeID: StringProperty(
name="nodeID",
description="Node ID (hidden)",
default="")
state: IntProperty(
name="state",
description="",
default=0
)
enable: BoolProperty(
name="enable",
description="Enable or disable this TxFileItem",
default=True
)
txsettings = ['texture_type',
'smode',
'tmode',
'texture_format',
'data_type',
'resize']
items = []
for item in txparams.TX_TYPES:
items.append((item, item, ''))
texture_type: EnumProperty(
name="Texture Type",
items=items,
description="Texture Type",
default=txparams.TX_TYPE_REGULAR)
items = []
for item in txparams.TX_WRAP_MODES:
items.append((item, item, ''))
smode: EnumProperty(
name="S Wrap",
items=items,
default=txparams.TX_WRAP_MODE_PERIODIC)
tmode: EnumProperty(
name="T Wrap",
items=items,
default=txparams.TX_WRAP_MODE_PERIODIC)
items = []
for item in txparams.TX_FORMATS:
items.append((item, item, ''))
texture_format: EnumProperty(
name="Format",
default=txparams.TX_FORMAT_PIXAR,
items=items,
description="Texture format")
items = []
items.append(('default', 'default', ''))
for item in txparams.TX_DATATYPES:
items.append((item, item, ''))
data_type: EnumProperty(
name="Data Type",
default=txparams.TX_DATATYPE_FLOAT,
items=items,
description="The data storage txmake uses")
items = []
for item in txparams.TX_RESIZES:
items.append((item, item, ''))
resize: EnumProperty(
name="Resize",
default=txparams.TX_RESIZE_UP_DASH,
items=items,
description="The type of resizing flag to pass to txmake")
class PRMAN_UL_Renderman_txmanager_list(UIList):
"""RenderMan TxManager UIList."""
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
icons_map = {txmngr3.STATE_MISSING: 'ERROR',
txmngr3.STATE_EXISTS: 'CHECKBOX_HLT',
txmngr3.STATE_IS_TEX: 'TEXTURE',
txmngr3.STATE_IN_QUEUE: 'PLUS',
txmngr3.STATE_PROCESSING: 'TIME',
txmngr3.STATE_ERROR: 'CANCEL',
txmngr3.STATE_REPROCESS: 'TIME',
txmngr3.STATE_UNKNOWN: 'CANCEL',
txmngr3.STATE_INPUT_MISSING: 'ERROR'}
txfile = None
if item.nodeID != "":
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID)
else:
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_path(item.name)
if txfile:
custom_icon = icons_map[txfile.state]
else:
custom_icon = 'CANCEL'
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.label(text=item.name, icon = custom_icon)
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
layout.label(text="", icon = custom_icon)
class PRMAN_OT_Renderman_txmanager_parse_scene(Operator):
"""Parse scene for textures to add to to the txmanager"""
bl_idname = "rman_txmgr_list.parse_scene"
bl_label = "Parse Scene"
def execute(self, context):
rman_txmgr_list = context.scene.rman_txmgr_list
rman_txmgr_list.clear()
texture_utils.get_txmanager().txmanager.reset()
texture_utils.parse_for_textures(context.scene)
texture_utils.get_txmanager().txmake_all(blocking=False)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_pick_images(Operator, ImportHelper):
"""Pick images from a directory."""
bl_idname = "rman_txmgr_list.pick_images"
bl_label = "Pick Images"
filename: StringProperty(maxlen=1024)
directory: StringProperty(maxlen=1024)
files: CollectionProperty(type=bpy.types.PropertyGroup)
def execute(self, context):
rman_txmgr_list = context.scene.rman_txmgr_list
rman_txmgr_list.clear()
texture_utils.get_txmanager().txmanager.reset()
if len(self.files) > 0:
for f in self.files:
img = os.path.join(self.directory, f.name)
item = context.scene.rman_txmgr_list.add()
item.nodeID = str(uuid.uuid1())
texture_utils.get_txmanager().txmanager.add_texture(item.nodeID, img)
item.name = img
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_clear_all_cache(Operator):
"""Clear RenderMan Texture cache"""
bl_idname = "rman_txmgr_list.clear_all_cache"
bl_label = "Clear Texture Cache"
def execute(self, context):
# needs to call InvalidateTexture
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_reconvert_all(Operator):
"""Clear all .tex files and re-convert."""
bl_idname = "rman_txmgr_list.reconvert_all"
bl_label = "RE-Convert All"
def execute(self, context):
texture_utils.get_txmanager().txmanager.delete_texture_files()
texture_utils.get_txmanager().txmake_all(blocking=False)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_apply_preset(Operator):
"""Apply current settings to the selected texture."""
bl_idname = "rman_txmgr_list.apply_preset"
bl_label = "Apply preset"
def execute(self, context):
idx = context.scene.rman_txmgr_list_index
item = context.scene.rman_txmgr_list[idx]
txsettings = dict()
for attr in item.txsettings:
val = getattr(item, attr)
if attr == 'data_type' and val == 'default':
val = None
txsettings[attr] = val
if txsettings:
txfile = None
if item.nodeID != "":
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID)
else:
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_path(item.name)
txfile.params.set_params_from_dict(txsettings)
return{'FINISHED'}
class PRMAN_OT_Renderman_txmanager_add_texture(Operator):
"""Add texture."""
bl_idname = "rman_txmgr_list.add_texture"
bl_label = "add_texture"
filepath: StringProperty()
nodeID: StringProperty()
def execute(self, context):
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_path(self.filepath)
if not txfile:
return{'FINISHED'}
item = None
# check if nodeID already exists in the list
for i in context.scene.rman_txmgr_list:
if i.nodeID == self.nodeID:
item = i
break
if not item:
item = context.scene.rman_txmgr_list.add()
item.nodeID = self.nodeID
item.name = txfile.input_image
params = txfile.params
item.texture_type = params.texture_type
item.smode = params.smode
item.tmode = params.tmode
item.texture_type = params.texture_type
if params.data_type is not None:
item.data_type = params.data_type
item.resize = params.resize
item.state = txfile.state
if txfile.state == txmngr3.STATE_IS_TEX:
item.enable = False
item.tooltip = '\n' + txfile.tooltip_text()
# FIXME: should also add the nodes that this texture is referenced in
return{'FINISHED'}
class PRMAN_PT_Renderman_txmanager_list(_RManPanelHeader, Panel):
"""RenderMan Texture Manager Panel."""
bl_label = "RenderMan Texture Manager"
bl_idname = "PRMAN_PT_Renderman_txmanager_list"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
def draw(self, context):
layout = self.layout
scene = context.scene
row = layout.row()
row.operator('rman_txmgr_list.parse_scene', text='Parse Scene')
# FIXME: not totally working. The done callbacks fail
#row.operator('rman_txmgr_list.pick_images', text='Pick Images')
row.operator('rman_txmgr_list.reconvert_all', text='Reconvert')
row.operator('rman_txmgr_list.clear_all_cache', text='Clear All Cache')
if scene.rman_txmgr_list_index >= 0 and scene.rman_txmgr_list:
row = layout.row()
row.template_list("PRMAN_UL_Renderman_txmanager_list", "The_List", scene,
"rman_txmgr_list", scene, "rman_txmgr_list_index", item_dyntip_propname="tooltip")
item = scene.rman_txmgr_list[scene.rman_txmgr_list_index]
row = layout.row()
row.label(text='Texture Settings')
row = layout.row()
row.enabled = item.enable
row.prop(item, "texture_type")
row = layout.row()
row.enabled = item.enable
row.prop(item, "smode")
row.prop(item, "tmode")
row = layout.row()
row.enabled = item.enable
row.prop(item, "texture_format")
row = layout.row()
row.enabled = item.enable
row.prop(item, "data_type")
row = layout.row()
row.enabled = item.enable
row.prop(item, "resize")
row = layout.row()
row.enabled = item.enable
row.alignment = 'RIGHT'
row.operator('rman_txmgr_list.apply_preset', text='Apply')
row = layout.row()
row.alignment='CENTER'
in_list = len(context.scene.rman_txmgr_list)
progress = 'All Converted'
qsize = texture_utils.get_txmanager().txmanager.workQueue.qsize()
if qsize != 0:
progress = 'Converting...%d left to convert' % (qsize)
row.label(text=progress)
def index_updated(self, context):
'''
When the index updates, make sure the texture settings
are in sync with the txmanager.
'''
idx = context.scene.rman_txmgr_list_index
if idx < 0:
return
item = context.scene.rman_txmgr_list[idx]
txfile = None
if item.nodeID != "":
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID)
else:
txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_path(item.name)
if txfile:
params = txfile.params
item.texture_type = params.texture_type
item.smode = params.smode
item.tmode = params.tmode
item.texture_type = params.texture_type
if params.data_type is not None:
item.data_type = params.data_type
item.resize = params.resize
if txfile.state == txmngr3.STATE_IS_TEX:
item.enable = False
classes = [
TxFileItem,
PRMAN_UL_Renderman_txmanager_list,
PRMAN_OT_Renderman_txmanager_parse_scene,
PRMAN_OT_Renderman_txmanager_pick_images,
PRMAN_OT_Renderman_txmanager_clear_all_cache,
PRMAN_OT_Renderman_txmanager_reconvert_all,
PRMAN_OT_Renderman_txmanager_apply_preset,
PRMAN_OT_Renderman_txmanager_add_texture,
PRMAN_PT_Renderman_txmanager_list
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.rman_txmgr_list = CollectionProperty(type = TxFileItem)
bpy.types.Scene.rman_txmgr_list_index = IntProperty(name = "RenderMan Texture Manager",
default = 0, update=index_updated)
def unregister():
del bpy.types.Scene.rman_txmgr_list
del bpy.types.Scene.rman_txmgr_list_index
for cls in classes:
bpy.utils.unregister_class(cls)
| 32.734015
| 110
| 0.615595
| 1,446
| 12,799
| 5.20816
| 0.161826
| 0.041827
| 0.060417
| 0.047802
| 0.476564
| 0.384942
| 0.31377
| 0.259726
| 0.255079
| 0.220555
| 0
| 0.00319
| 0.28971
| 12,799
| 391
| 111
| 32.734015
| 0.825212
| 0.051176
| 0
| 0.357388
| 0
| 0
| 0.096969
| 0.030722
| 0
| 0
| 0
| 0.002558
| 0
| 1
| 0.037801
| false
| 0.003436
| 0.037801
| 0.003436
| 0.243986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd788c7b5bde6a0a3088e641302680a262892fc0
| 943
|
py
|
Python
|
cousins-in-binary-tree/cousins-in-binary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2
|
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
cousins-in-binary-tree/cousins-in-binary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
cousins-in-binary-tree/cousins-in-binary-tree.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isCousins(self, root: Optional[TreeNode], x: int, y: int) -> bool:
# condition to be cousin: (1) diff.parents (2) same level
stack=[(root, 0, -1)]
xlevel, ylevel = -1, -1
xparent, yparent = -1, -1
while(stack):
cur, depth, parent = stack.pop(0)
if cur.val==x:
xlevel, xparent = depth, parent
if cur.val==y:
ylevel, yparent = depth, parent
if cur.left:
stack.append((cur.left, depth+1, cur.val))
if cur.right:
stack.append((cur.right, depth+1, cur.val))
if xlevel==ylevel and xparent!=yparent:
return True
else:
return False
| 36.269231
| 74
| 0.520679
| 118
| 943
| 4.127119
| 0.432203
| 0.041068
| 0.032854
| 0.065708
| 0.057495
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0.35737
| 943
| 26
| 75
| 36.269231
| 0.783828
| 0.249205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd7d61b4fcf318d454a05f755e0919c0dd18ea88
| 2,964
|
py
|
Python
|
pycalc/MAVProxy/modules/mavproxy_gopro.py
|
joakimzhang/python-electron
|
79bc174a14c5286ca739bb7d8ce6522fdc6e9e80
|
[
"CC0-1.0"
] | null | null | null |
pycalc/MAVProxy/modules/mavproxy_gopro.py
|
joakimzhang/python-electron
|
79bc174a14c5286ca739bb7d8ce6522fdc6e9e80
|
[
"CC0-1.0"
] | 8
|
2021-01-28T19:26:22.000Z
|
2022-03-24T18:07:24.000Z
|
pycalc/MAVProxy/modules/mavproxy_gopro.py
|
joakimzhang/python-electron
|
79bc174a14c5286ca739bb7d8ce6522fdc6e9e80
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
'''gopro control over mavlink for the solo-gimbal
To use this module connect to a Solo with a GoPro installed on the gimbal.
'''
import time, os
from MAVProxy.modules.lib import mp_module
from pymavlink import mavutil
class GoProModule(mp_module.MPModule):
def __init__(self, mpstate):
super(GoProModule, self).__init__(mpstate, "gopro", "gopro handling")
self.add_command('gopro', self.cmd_gopro, 'gopro control', [
'status',
'shutter <start|stop>',
'mode <video|camera>',
'power <on|off>'])
def cmd_gopro(self, args):
'''gopro commands'''
usage = "status, shutter <start|stop>, mode <video|camera>, power <on|off>"
mav = self.master.mav
if args[0] == "status":
self.cmd_gopro_status(args[1:])
return
if args[0] == "shutter":
name = args[1].lower()
if name == 'start':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_SHUTTER, 1)
return
elif name == 'stop':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_SHUTTER, 0)
return
else:
print("unrecognized")
return
if args[0] == "mode":
name = args[1].lower()
if name == 'video':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_CAPTURE_MODE, 0)
return
elif name == 'camera':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_CAPTURE_MODE, 1)
return
else:
print("unrecognized")
return
if args[0] == "power":
name = args[1].lower()
if name == 'on':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_POWER, 1)
return
elif name == 'off':
mav.gopro_set_request_send(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,
mavutil.mavlink.GOPRO_COMMAND_POWER, 0)
return
else:
print("unrecognized")
return
print(usage)
def cmd_gopro_status(self, args):
'''show gopro status'''
master = self.master
if 'GOPRO_HEARTBEAT' in master.messages:
print(master.messages['GOPRO_HEARTBEAT'])
else:
print("No GOPRO_HEARTBEAT messages")
def init(mpstate):
'''initialise module'''
return GoProModule(mpstate)
| 34.068966
| 83
| 0.5361
| 319
| 2,964
| 4.761755
| 0.247649
| 0.110599
| 0.04345
| 0.071099
| 0.515471
| 0.515471
| 0.45293
| 0.45293
| 0.400263
| 0.400263
| 0
| 0.010616
| 0.364372
| 2,964
| 86
| 84
| 34.465116
| 0.795648
| 0.065452
| 0
| 0.4
| 0
| 0
| 0.109534
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.046154
| 0
| 0.292308
| 0.092308
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd7f9dbcfe5bd13ce56beb5ae807d4bb63f3c4df
| 1,609
|
py
|
Python
|
Program_python/Extractfolderimage.py
|
pection/MN-furniture
|
4c796f072662c15b2a263272ef2637e221c42cab
|
[
"MIT"
] | 1
|
2022-02-22T06:20:56.000Z
|
2022-02-22T06:20:56.000Z
|
Program_python/Extractfolderimage.py
|
pection/MN-furniture
|
4c796f072662c15b2a263272ef2637e221c42cab
|
[
"MIT"
] | null | null | null |
Program_python/Extractfolderimage.py
|
pection/MN-furniture
|
4c796f072662c15b2a263272ef2637e221c42cab
|
[
"MIT"
] | 1
|
2020-11-24T18:18:42.000Z
|
2020-11-24T18:18:42.000Z
|
import os
import sys
import numpy as np
from PIL import Image
num=1
path ="/Users/pection/Documents/mn_furniture/AddwatermarkProgram/Lastday/"
#we shall store all the file names in this list
filelist=[]
for root, dirs, files in os.walk(path):
for file in files:
if(file.endswith(".jpg")):
filelist.append(os.path.join(root,file))
print (filelist)
logo=Image.open('logo.png')
logo2=Image.open('logo2.png')
watermark = Image.open('WatermarkB5.png')
watermark2 = Image.open('WatermarkB3.png')
logoWidth = watermark.width
logoHeight = watermark.height
watermarkW=watermark.width
watermarkH=watermark.height
logo2Width = watermark2.width
logo2Height = watermark2.height
for filename in filelist:
image = Image.open(filename)
# imageWidth = image.width
# imageHeight = image.height
# if imageWidth<500 :
# img_w, img_h = image.size
# bg_w, bg_h = watermark2.size
# offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
# image.paste(logo2, (0, 0), logo2)
# image2=image.copy()
# image2.paste(watermark2,(int((img_w-logo2Width)/2),int((img_h-logo2Height)/2)),watermark2)
# else :
# img_w, img_h = image.size
# bg_w, bg_h = watermark.size
# offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
# image.paste(logo, (0, 0), logo)
# image2=image.copy()
# image2.paste(watermark,(int((img_w-logoWidth)/2),int((img_h-logoHeight)/2)),watermark)
num += 1
# image.save(filename)
image.save('/Users/pection/Documents/mn_furniture/AddwatermarkProgram/Extract/'+str(num)+'.png')
| 35.755556
| 100
| 0.661902
| 223
| 1,609
| 4.67713
| 0.345291
| 0.023011
| 0.040268
| 0.044104
| 0.260786
| 0.21093
| 0.113135
| 0.113135
| 0.113135
| 0.113135
| 0
| 0.028396
| 0.19018
| 1,609
| 45
| 101
| 35.755556
| 0.772064
| 0.429459
| 0
| 0
| 0
| 0
| 0.207547
| 0.146504
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd7fb45e0f3cff64598edf9ddf119adc6b039b8e
| 1,986
|
py
|
Python
|
BrainML/__init__.py
|
bogdan124/DeepML
|
ad5e904cc9fcd3c499bbca3538525d83fde003f5
|
[
"Apache-2.0"
] | null | null | null |
BrainML/__init__.py
|
bogdan124/DeepML
|
ad5e904cc9fcd3c499bbca3538525d83fde003f5
|
[
"Apache-2.0"
] | null | null | null |
BrainML/__init__.py
|
bogdan124/DeepML
|
ad5e904cc9fcd3c499bbca3538525d83fde003f5
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from BrainML.activation import Activator
from BrainML.layers import *
from BrainML.optimizer import Optimizer
from tensorflow.python.util import deprecation
##deprecation._PRINT_DEPRECATION_WARNINGS = False
##tf.compat.v1.disable_eager_execution()
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class Network:
def __init__(self,layers=None, name=None):
self.model=None
self.output=None
self.layers=layers
self.compile=None
self.name=name
newLayers=[]
##if layers[0].shape!=None:
## newLayers.append(tf.keras.Input(input_shape=layers[0].shape))
for i in range(0,len(layers)):
newLayers.append(self.layers[i].layer)
##newLayers[i].value_to_feed=
self.model=tf.keras.Sequential()##newLayers, name
for i in newLayers:
self.model.add(i)
def train(self,x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None,
validation_split=0.0, validation_data=None, shuffle=True, class_weight=None,
sample_weight=None, initial_epoch=0, steps_per_epoch=None,
validation_steps=None, validation_batch_size=None, validation_freq=1,
max_queue_size=10, workers=1, use_multiprocessing=False,optimizer='rmsprop',
loss=None, metrics=None, loss_weights=None,weighted_metrics=None, run_eagerly=None):
if loss==None:
loss="mse"
elif metrics==None or metrics[0]=="all":
metrics=["mae", "acc"]
else:
optimizer="rmsprop"
self.compile=self.model.compile(optimizer, loss, metrics, loss_weights,weighted_metrics, run_eagerly)##initial_epoch,steps_per_epoch
self.output=self.model.fit(x, y, batch_size, epochs, verbose, callbacks,validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
return self.output
def Summary(self):
self.model.summary()
## if __name__ == "__main__":
## pass
| 38.192308
| 268
| 0.735146
| 276
| 1,986
| 5.068841
| 0.369565
| 0.038599
| 0.017155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008839
| 0.145519
| 1,986
| 52
| 269
| 38.192308
| 0.815557
| 0.140483
| 0
| 0
| 0
| 0
| 0.027811
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.162162
| 0
| 0.297297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd81524e1e000d2bbdd8e39c55a281ea1c78ab94
| 1,336
|
py
|
Python
|
config.py
|
MGorr/icons_updater
|
aa9f9177a565fbe590cf959f625f049024e01efb
|
[
"MIT"
] | 1
|
2021-06-18T06:58:15.000Z
|
2021-06-18T06:58:15.000Z
|
config.py
|
MGorr/icons_updater
|
aa9f9177a565fbe590cf959f625f049024e01efb
|
[
"MIT"
] | null | null | null |
config.py
|
MGorr/icons_updater
|
aa9f9177a565fbe590cf959f625f049024e01efb
|
[
"MIT"
] | null | null | null |
"""Configuration class for icons updating."""
import os
from configparser import ConfigParser
_DESTINATION_NAME = 'dst'
_MAGICK_NAME = 'path'
_SOURCES_NAME = 'src'
class Config:
"""Configuration class."""
def __init__(self, config_file=None, src=None, dst=None):
"""Constructor."""
parser = ConfigParser()
if config_file:
parser.read(config_file)
section = parser['settings'] if config_file else None
if config_file and _MAGICK_NAME in section:
os.environ['PATH'] += os.pathsep + \
os.path.abspath(section[_MAGICK_NAME])
if not src and config_file:
src = section[_SOURCES_NAME]
elif not src:
raise RuntimeError('Source folder should be set!')
self._src = os.path.normpath(src)
if not dst and config_file:
dst = section[_DESTINATION_NAME]
elif not dst:
raise RuntimeError('Destination folder should be set!')
self._dst = os.path.normpath(dst)
assert self._dst, 'Destination folder should be set!'
assert self._src, 'Sources folder should be set!'
def destination(self):
"""Destination folder."""
return self._dst
def sources(self):
"""Sources folder."""
return self._src
| 30.363636
| 72
| 0.610778
| 155
| 1,336
| 5.064516
| 0.296774
| 0.089172
| 0.071338
| 0.086624
| 0.103185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.289671
| 1,336
| 43
| 73
| 31.069767
| 0.827187
| 0.081587
| 0
| 0
| 0
| 0
| 0.120733
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.1
| false
| 0
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd837f67ec7177838bf8a526749af097805f6779
| 15,142
|
py
|
Python
|
CalsCamera/main.py
|
NoDrones/Imaging
|
555c8aeced98097379b80f448689f2bf2974c3e9
|
[
"MIT"
] | 1
|
2019-01-28T21:55:53.000Z
|
2019-01-28T21:55:53.000Z
|
CalsCamera/main.py
|
NoDrones/Imaging
|
555c8aeced98097379b80f448689f2bf2974c3e9
|
[
"MIT"
] | null | null | null |
CalsCamera/main.py
|
NoDrones/Imaging
|
555c8aeced98097379b80f448689f2bf2974c3e9
|
[
"MIT"
] | null | null | null |
#Author: Calvin Ryan
import sensor, image, time, pyb, ustruct, math, utime
def get_gain():
gain_reg_val = sensor.__read_reg(0x00)
#print("gain_reg_val: " + str(gain_reg_val))
bitwise_gain_range = (gain_reg_val & 0b11110000) >> 4 #get the highest four bits which correspond to gain range. Depends on the bits set. Can be 0 > 4 for a total of 5 ranges.
#print("bitwise_gain_range: " + str(bin(bitwise_gain_range)))
gain_range = ((bitwise_gain_range & 0b1000) >> 3) + ((bitwise_gain_range & 0b0100) >> 2) + ((bitwise_gain_range & 0b0010) >> 1) + (bitwise_gain_range & 0b0001) #get an int for the number of bits set
#print("read_gain_range: " + str(gain_range))
gain_LSBs = gain_reg_val & 0b00001111 #The 4 lsbs represent the fine tuning gain control.
#print("gain_LSBs: " + str(gain_LSBs))
gain_curve_index = 16 * gain_range + gain_LSBs # this gives you an index from 0 > 79 which is the range of points you need to describe every possible gain setting along the new gain curve
#print("gain_curve_index: " + str(gain_curve_index))
gain = 10 ** (30 * gain_curve_index / 79 / 20) #10** = 10 ^, calculate the gain along the new exponential gain curve I defined earlier on
#print("gain: " + str(gain))
return gain
def set_gain(gain_db):
# gain_correlation_equation = 20*log(gain_db) = 30*(index)/79
gain_curve_index = (79 * 20 * math.log(gain_db, 10)) / 30 #return an index from the new exponential gain curve...
#... Can be 0 > 79 which is the # of points needed to describe every gain setting along the new curve
#print("gain_curve_index: " + str(gain_curve_index))
gain_range = int(gain_curve_index/16) #find a 0 > 4 value for the gain range. This range is defined by the 4 msbs. Thus we divide and round down by the LSB of the 4 MSBs (16)
#print("gain_range: " + str(gain_range))
gain_LSBs = int(gain_curve_index - 16 * gain_range) & 0b00001111 #Find how many LSBs above the gain range the index is. This is your fine tuning gain control
#print("gain_LSBs: " + str(bin(gain_LSBs)))
bitwise_gain_range = (0b1111 << gain_range) & 0b11110000 #make the gain range bitwise
#print("bitwise_gain_range: " + str(bin(bitwise_gain_range)))
gain_reg_val = bitwise_gain_range | gain_LSBs #OR
#print("gain to set: " + str(bin(gain_reg_val)))
sensor.__write_reg(0x00, gain_reg_val)
return gain_reg_val
def set_custom_exposure(high_l_mean_thresh = 17, low_l_mean_thresh = 16):
try:
print("Starting Exposure Adjustment...")
b_gain = sensor.__read_reg(0x01)
r_gain = sensor.__read_reg(0x02)
g_gain = sensor.__read_reg(0x03)
r_gain = round(r_gain/4)
g_gain = round(g_gain/4)
b_gain = round(b_gain/4)
sensor.__write_reg(0x01, b_gain)
sensor.__write_reg(0x02, r_gain)
sensor.__write_reg(0x03, g_gain)
img = sensor.snapshot() # Take a picture and return the image.
img_stats = img.get_statistics()
l_mean = img_stats.l_mean()
count = 0
cur_gain = get_gain()
while(((l_mean > high_l_mean_thresh) | (l_mean < low_l_mean_thresh))) & (count < 256) & (cur_gain >= 0):
img = sensor.snapshot() # Take a picture and return the image.
img_stats = img.get_statistics()
l_mean = img_stats.l_mean()
if ((cur_gain < 1) | (cur_gain > 32)):
break
if l_mean > high_l_mean_thresh:
new_gain = cur_gain - .1
elif l_mean < low_l_mean_thresh:
new_gain = cur_gain + .1
else:
break #we're in the range now!
set_gain(new_gain)
cur_gain = new_gain
count += 1
if (count < 310) | (cur_gain == 0):
print("Exposure Adjustment Complete.")
return l_mean
else:
print("Exposure Adjustment Incomplete.")
return -1
except Exception as e:
print(e)
print("Error occured!")
return -2
if __name__ == "__main__":
########### SETUP STUFF
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
i2c_obj = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12)
i2c_obj.deinit() # Fully reset I2C device...
i2c_obj = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12)
#get in focus balance. You have two seconds.
t_start = time.ticks()
t_elapsed = 0
while(t_elapsed < 1): #ignore bc 1 ms
img = sensor.snapshot()
t_elapsed = time.ticks() - t_start
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
sensor.set_auto_exposure(False)
sensor.set_contrast(+3)
print()
pre_adjust_r_gain = sensor.__read_reg(0x02)
pre_adjust_g_gain = sensor.__read_reg(0x03)
pre_adjust_b_gain = sensor.__read_reg(0x01)
pre_adjust_overall_gain = sensor.__read_reg(0x00)
pre_adjust_exposure = (sensor.__read_reg(0x08) << 8) + sensor.__read_reg(0x10)
print("R gain: " + str(pre_adjust_r_gain))
print("G gain: " + str(pre_adjust_g_gain))
print("B gain: " + str(pre_adjust_b_gain))
print("Overall gain: " + str(pre_adjust_overall_gain))
print("exposure: " + str(pre_adjust_exposure))
print('------------------------------------')
set_l_mean = set_custom_exposure() #default thresholds
print(set_l_mean)
post_adjust_r_gain = sensor.__read_reg(0x02)
post_adjust_g_gain = sensor.__read_reg(0x03)
post_adjust_b_gain = sensor.__read_reg(0x01)
post_adjust_overall_gain = sensor.__read_reg(0x00)
post_adjust_exposure = (sensor.__read_reg(0x08) << 8) + sensor.__read_reg(0x10)
print("R gain: " + str(post_adjust_r_gain))
print("G gain: " + str(post_adjust_g_gain))
print("B gain: " + str(post_adjust_b_gain))
print("Overall gain: " + str(post_adjust_overall_gain))
print("exposure: " + str(post_adjust_exposure))
print()
img = sensor.snapshot()
# should pull img_number from a text file and read the plant_id from a qr code or beaglebone
# default mode is pyb.usb_mode('VCP+MSC')
'''
pyb.usb_mode('VCP+HID')
utime.sleep_ms(1000)
last_photo_id_path = "last_photo_id.txt"
last_photo_id_fd = open(last_photo_id_path, "w+")
img_number_str = last_photo_id_fd.read()
print(img_number_str)
img_number_str = last_photo_id_fd.write("696969")
print("Written bytes: " + str(img_number_str))
img_number_str = last_photo_id_fd.read()
print(img_number_str)
last_photo_id_fd.close()
img_number = 1
plant_id = 1
img_id = str(img_number) + "_plant_" + str(plant_id)
raw_str = "raw_" + str(img_id)
raw_write = image.ImageWriter(raw_str)
raw_write.add_frame(img)
raw_write.close()
img.compress(quality = 100)
img.save("img_" + str(img_id))
raw_read = image.ImageReader(raw_str)
img = raw_read.next_frame(copy_to_fb = True, loop = False)
raw_read.close()
'''
'''
L = Lightness where 0 is black and 100 is white
A = -127 is green and 128 is red
B = -127 is blue and 128 is yellow.
'''
img_stats = img.get_statistics()
########### FIND BAD BLOBS
unhealthy_full_l_mean = 0
unhealthy_full_a_mean = 0
unhealthy_full_b_mean = 0
unhealthy_centroid_l_mean = 0
unhealthy_centroid_a_mean = 0
unhealthy_centroid_b_mean = 0
unhealthy_blob_l_mean = 0
unhealthy_blob_a_mean = 0
unhealthy_blob_b_mean = 0
healthy_full_l_mean = 0
healthy_full_a_mean = 0
healthy_full_b_mean = 0
healthy_centroid_l_mean = 0
healthy_centroid_a_mean = 0
healthy_centroid_b_mean = 0
healthy_blob_l_mean = 0
healthy_blob_a_mean = 0
healthy_blob_b_mean = 0
blob_index = -1
stage_one_bad_thresholds = [(20, 100, -10, 127, 3, 128)]
for blob_index, stage_one_bad_blob in enumerate(img.find_blobs(stage_one_bad_thresholds, pixels_threshold=100, area_threshold=100, merge = False, margin = 15)):
rect_stats = img.get_statistics(roi = stage_one_bad_blob.rect())
print("stage_one_bad_blob: " + str(stage_one_bad_blob))
print("density: " + str(stage_one_bad_blob.density()))
print("full: " + str(rect_stats))
unhealthy_full_l_mean += rect_stats[0]
unhealthy_full_a_mean += rect_stats[8]
unhealthy_full_b_mean += rect_stats[16]
side_l = stage_one_bad_blob.density() * min(stage_one_bad_blob[2], stage_one_bad_blob[3])
partial_hist = img.get_histogram(roi = (stage_one_bad_blob.cx() - round(side_l/2), stage_one_bad_blob.cy() - round(side_l/2), round(side_l), round(side_l)))
partial_stats = partial_hist.get_statistics()
print("partial: "+ str(partial_stats))
unhealthy_centroid_l_mean += partial_stats[0]
unhealthy_centroid_a_mean += partial_stats[8]
unhealthy_centroid_b_mean += partial_stats[16]
blob_stats = img.get_statistics(roi = stage_one_bad_blob.rect(), thresholds = stage_one_bad_thresholds)
print("blob: "+ str(blob_stats))
print("\n")
unhealthy_blob_l_mean += blob_stats[0]
unhealthy_blob_a_mean += blob_stats[8]
unhealthy_blob_b_mean += blob_stats[16]
img.draw_rectangle(stage_one_bad_blob.rect(), color = (255, 255, 255)) #purple
#img.draw_rectangle((stage_one_bad_blob.cx() - round(side_l/2), stage_one_bad_blob.cy() - round(side_l/2), round(side_l), round(side_l)), color = (255, 85, 0))
if blob_index != -1:
unhealthy_full_l_mean = unhealthy_full_l_mean/(blob_index + 1)
unhealthy_full_a_mean = unhealthy_full_a_mean/(blob_index + 1)
unhealthy_full_b_mean = unhealthy_full_b_mean/(blob_index + 1)
unhealthy_centroid_l_mean = unhealthy_centroid_l_mean/(blob_index + 1)
unhealthy_centroid_a_mean = unhealthy_centroid_a_mean/(blob_index + 1)
unhealthy_centroid_b_mean = unhealthy_centroid_b_mean/(blob_index + 1)
unhealthy_blob_l_mean = unhealthy_blob_l_mean/(blob_index + 1)
unhealthy_blob_a_mean = unhealthy_blob_a_mean/(blob_index + 1)
unhealthy_blob_b_mean = unhealthy_blob_b_mean/(blob_index + 1)
print("------------------------------------------------------------------------")
########### FIND GOOD BLOBS
#stage_one_good_thresholds = [(img_stats.l_mean() - 1, 100, -127, img_stats.a_mean() - 4, img_stats.b_mean() - 8, 60)]
stage_one_good_thresholds = [(25, 100, -127, -3, -15, 3)]
for blob_index, stage_one_good_blob in enumerate(img.find_blobs(stage_one_good_thresholds, pixels_threshold=100, area_threshold=100, merge = False, margin = 15)):
rect_stats = img.get_statistics(roi = stage_one_good_blob.rect())
print("stage_one_good_blob: " + str(stage_one_good_blob))
print("density: " + str(stage_one_good_blob.density()))
print("full: "+ str(rect_stats))
healthy_full_l_mean += rect_stats[0]
healthy_full_a_mean += rect_stats[8]
healthy_full_b_mean += rect_stats[16]
side_l = stage_one_good_blob.density() * min(stage_one_good_blob[2], stage_one_good_blob[3])
partial_hist = img.get_histogram(roi = (stage_one_good_blob.cx() - round(side_l/2), stage_one_good_blob.cy() - round(side_l/2), round(side_l), round(side_l)))
partial_stats = partial_hist.get_statistics()
print("partial: "+ str(partial_stats))
healthy_centroid_l_mean += partial_stats[0]
healthy_centroid_a_mean += partial_stats[8]
healthy_centroid_b_mean += partial_stats[16]
blob_stats = img.get_statistics(roi = stage_one_good_blob.rect(), thresholds = stage_one_good_thresholds)
print("blob: "+ str(blob_stats))
print("\n")
healthy_blob_l_mean += blob_stats[0]
healthy_blob_a_mean += blob_stats[8]
healthy_blob_b_mean += blob_stats[16]
img.draw_rectangle(stage_one_good_blob.rect(), color = (0, 0, 0)) #black
#img.draw_rectangle((stage_one_good_blob.cx() - round(side_l/2), stage_one_good_blob.cy() - round(side_l/2), round(side_l), round(side_l)), color = (255, 85, 0))
########## COLOR IT ALL IN
for x in range(stage_one_good_blob[2]):
for y in range(stage_one_good_blob[3]):
pix_location = (stage_one_good_blob[0] + x, stage_one_good_blob[1] + y)
pix_vals = img.get_pixel(pix_location[0], pix_location[1])
lab_pix_vals = image.rgb_to_lab(pix_vals)
if ((lab_pix_vals[1] < (blob_stats.a_mean() + 2 * blob_stats.a_stdev())) & (lab_pix_vals[0] >= (blob_stats.l_mean() - .1 * blob_stats.l_stdev()))): #& (abs(lab_pix_vals[2] - lab_pix_vals[1]) > 10) & (lab_pix_vals[0] > (blob_stats.l_mean() - 10)):
pass
else:
pass
#img.set_pixel(pix_location[0], pix_location[1], (255, 0, 0))
if blob_index != -1:
healthy_full_l_mean = healthy_full_l_mean/(blob_index + 1)
healthy_full_a_mean = healthy_full_a_mean/(blob_index + 1)
healthy_full_b_mean = healthy_full_b_mean/(blob_index + 1)
healthy_centroid_l_mean = healthy_centroid_l_mean/(blob_index + 1)
healthy_centroid_a_mean = healthy_centroid_a_mean/(blob_index + 1)
healthy_centroid_b_mean = healthy_centroid_b_mean/(blob_index + 1)
healthy_blob_l_mean = healthy_blob_l_mean/(blob_index + 1)
healthy_blob_a_mean = healthy_blob_a_mean/(blob_index + 1)
healthy_blob_b_mean = healthy_blob_b_mean/(blob_index + 1)
print(img.compress_for_ide(quality = 100))
print("~~~~~~~~~~~~~~~ RESULTS: ~~~~~~~~~~~~~~~~")
print("good thresholds: " + str(stage_one_good_thresholds))
print("bad thresholds: " + str(stage_one_bad_thresholds))
print("unhealthy full l mean: " + str(unhealthy_full_l_mean))
print("unhealthy full a mean: " + str(unhealthy_full_a_mean))
print("unhealthy full b mean: " + str(unhealthy_full_b_mean))
#print("unhealthy centroid l mean: " + str(unhealthy_centroid_l_mean))
#print("unhealthy centroid a mean: " + str(unhealthy_centroid_a_mean))
#print("unhealthy centroid b mean: " + str(unhealthy_centroid_b_mean))
print("unhealthy blob l mean: " + str(unhealthy_blob_l_mean))
print("unhealthy blob a mean: " + str(unhealthy_blob_a_mean))
print("unhealthy blob b mean: " + str(unhealthy_blob_b_mean))
print("healthy full l mean: " + str(healthy_full_l_mean))
print("healthy full a mean: " + str(healthy_full_a_mean))
print("healthy full b mean: " + str(healthy_full_b_mean))
#print("healthy centroid l mean: " + str(healthy_centroid_l_mean))
#print("healthy centroid a mean: " + str(healthy_centroid_a_mean))
#print("healthy centroid b mean: " + str(healthy_centroid_b_mean))
print("healthy blob l mean: " + str(healthy_blob_l_mean))
print("healthy blob a mean: " + str(healthy_blob_a_mean))
print("healthy blob b mean: " + str(healthy_blob_b_mean))
| 43.636888
| 262
| 0.66616
| 2,274
| 15,142
| 4.046614
| 0.134565
| 0.030428
| 0.029993
| 0.027385
| 0.526081
| 0.43697
| 0.312649
| 0.232667
| 0.197892
| 0.197892
| 0
| 0.036568
| 0.212588
| 15,142
| 346
| 263
| 43.763006
| 0.735218
| 0.190992
| 0
| 0.148837
| 0
| 0
| 0.068106
| 0.009678
| 0
| 0
| 0.007886
| 0
| 0
| 1
| 0.013953
| false
| 0.009302
| 0.004651
| 0
| 0.04186
| 0.223256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd88982df37b33dce441276837b7773dc3af6b26
| 1,311
|
py
|
Python
|
tests/garage/tf/spaces/test_dict_space.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | 3
|
2019-08-11T22:26:55.000Z
|
2020-11-28T10:23:50.000Z
|
tests/garage/tf/spaces/test_dict_space.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | null | null | null |
tests/garage/tf/spaces/test_dict_space.py
|
shadiakiki1986/garage
|
095bb5d25b32df1d44b47e99a78a9b01796941d9
|
[
"MIT"
] | 2
|
2019-08-11T22:30:14.000Z
|
2021-03-25T02:57:50.000Z
|
"""This script tests garage.tf.spaces.dict functionality."""
import unittest
from garage.misc import ext
from garage.tf.envs import TfEnv
from tests.fixtures.envs.dummy import DummyDictEnv
class TestDictSpace(unittest.TestCase):
def test_dict_space(self):
ext.set_seed(0)
# A dummy dict env
dummy_env = DummyDictEnv()
dummy_act = dummy_env.action_space
dummy_act_sample = dummy_act.sample()
# A dummy dict env wrapped by garage.tf
tf_env = TfEnv(dummy_env)
tf_act = tf_env.action_space
tf_obs = tf_env.observation_space
# flat_dim
assert tf_act.flat_dim == tf_act.flatten(dummy_act_sample).shape[-1]
# flat_dim_with_keys
assert tf_obs.flat_dim == tf_obs.flat_dim_with_keys(
iter(["achieved_goal", "desired_goal", "observation"]))
# un/flatten
assert tf_act.unflatten(
tf_act.flatten(dummy_act_sample)) == dummy_act_sample
# un/flatten_n
samples = [dummy_act.sample() for _ in range(10)]
assert tf_act.unflatten_n(tf_act.flatten_n(samples)) == samples
# un/flatten_with_keys
assert tf_act.unflatten_with_keys(
tf_act.flatten_with_keys(dummy_act_sample, iter(["action"])),
iter(["action"]))
| 31.214286
| 76
| 0.661327
| 180
| 1,311
| 4.494444
| 0.316667
| 0.055624
| 0.121137
| 0.074166
| 0.116193
| 0.116193
| 0
| 0
| 0
| 0
| 0
| 0.004032
| 0.243326
| 1,311
| 41
| 77
| 31.97561
| 0.811492
| 0.139588
| 0
| 0
| 0
| 0
| 0.043011
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 1
| 0.043478
| false
| 0
| 0.173913
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd8913997853973a6abd55f95d60d2c6a230000b
| 3,429
|
py
|
Python
|
utils/compare_MRAE.py
|
Liuhongzhi2018/SSRGAN
|
b5be922db1600aabb6a06ee52fb1c83ee738d794
|
[
"Apache-2.0"
] | 1
|
2022-01-21T09:01:48.000Z
|
2022-01-21T09:01:48.000Z
|
utils/compare_MRAE.py
|
Liuhongzhi2018/SSRGAN
|
b5be922db1600aabb6a06ee52fb1c83ee738d794
|
[
"Apache-2.0"
] | 1
|
2021-08-18T11:33:43.000Z
|
2021-08-18T11:33:43.000Z
|
utils/compare_MRAE.py
|
Liuhongzhi2018/SSRGAN
|
b5be922db1600aabb6a06ee52fb1c83ee738d794
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import cv2
import numpy as np
import hdf5storage as hdf5
from scipy.io import loadmat
from matplotlib import pyplot as plt
from SpectralUtils import savePNG, projectToRGB
from EvalMetrics import computeMRAE
BIT_8 = 256
# read path
def get_files(path):
# read a folder, return the complete path
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-4:] == '.mat':
ret.append(os.path.join(root, filespath))
return ret
def get_jpgs(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
if filespath[-4:] == '.mat':
ret.append(filespath)
return ret
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def demo_track1(filePath, filtersPath):
#filePath = "F:\\NTIRE 2020\\spectral reconstruction\\code1\\en4_track1\\ARAD_HS_0451.mat"
#filtersPath = "./resources/cie_1964_w_gain.npz"
# Load HS image and filters
cube = hdf5.loadmat(filePath)['cube']
#cube = loadmat(filePath)['cube']
filters = np.load(filtersPath)['filters']
# Project image to RGB
rgbIm = np.true_divide(projectToRGB(cube, filters), BIT_8)
# Save image file
path = 'temp_clean.png'
savePNG(rgbIm, path)
# Display RGB image
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.title('Example "Clean" Output Image')
plt.show()
def single_img_mrae(generated_mat_path, groundtruth_mat_path):
#generated_mat_path = "F:\\NTIRE 2020\\spectral reconstruction\\code1\\en4_track1\\ARAD_HS_0451.mat"
#groundtruth_mat_path = "F:\\NTIRE 2020\\spectral reconstruction\\NTIRE2020_Validation_Spectral\\ARAD_HS_0451.mat"
generated_mat = hdf5.loadmat(generated_mat_path)['cube'] # shape: (482, 512, 31)
groundtruth_mat = hdf5.loadmat(groundtruth_mat_path)['cube'] # shape: (482, 512, 31)
mrae = computeMRAE(generated_mat, groundtruth_mat)
print(mrae)
return mrae
def folder_img_mrae(generated_folder_path, groundtruth_folder_path):
#generated_folder_path = "F:\\NTIRE 2020\\spectral reconstruction\\code1\\en4_track1"
#groundtruth_folder_path = "F:\\NTIRE 2020\\spectral reconstruction\\NTIRE2020_Validation_Spectral"
matlist = get_jpgs(generated_folder_path)
avg_mrae = 0
for i, matname in enumerate(matlist):
generated_mat_path = os.path.join(generated_folder_path, matname)
groundtruth_mat_path = os.path.join(groundtruth_folder_path, matname)
generated_mat = hdf5.loadmat(generated_mat_path)['cube'] # shape: (482, 512, 31)
groundtruth_mat = hdf5.loadmat(groundtruth_mat_path)['cube'] # shape: (482, 512, 31)
mrae = computeMRAE(generated_mat, groundtruth_mat)
avg_mrae = avg_mrae + mrae
print('The %d-th mat\'s mrae:' % (i + 1), mrae)
avg_mrae = avg_mrae / len(matlist)
print('The average mrae is:', avg_mrae)
return avg_mrae
generated_folder_path = "F:\\NTIRE 2020\\spectral reconstruction\\ensemble\\ensemble\\track1"
generated_folder_path = "F:\\NTIRE 2020\\spectral reconstruction\\ensemble\\ensemble\\track2"
groundtruth_folder_path = "F:\\NTIRE 2020\\spectral reconstruction\\NTIRE2020_Validation_Spectral"
avg_mrae = folder_img_mrae(generated_folder_path, groundtruth_folder_path)
| 36.870968
| 118
| 0.706328
| 460
| 3,429
| 5.06087
| 0.271739
| 0.051546
| 0.034364
| 0.061856
| 0.515464
| 0.500859
| 0.480241
| 0.477663
| 0.47122
| 0.37543
| 0
| 0.044516
| 0.181102
| 3,429
| 92
| 119
| 37.271739
| 0.784544
| 0.238262
| 0
| 0.258065
| 0
| 0
| 0.121481
| 0.049749
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.145161
| 0
| 0.306452
| 0.048387
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd895eff6bdbc6e4f11421a7c77e8c3865e7d03d
| 2,435
|
py
|
Python
|
board/send_message.py
|
ben741863140/cfsystem
|
227e269f16533719251962f4d8caee8b51091d2f
|
[
"Apache-2.0"
] | 4
|
2018-02-22T01:59:07.000Z
|
2020-07-09T06:28:46.000Z
|
board/send_message.py
|
ben741863140/cfsystem
|
227e269f16533719251962f4d8caee8b51091d2f
|
[
"Apache-2.0"
] | null | null | null |
board/send_message.py
|
ben741863140/cfsystem
|
227e269f16533719251962f4d8caee8b51091d2f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import gzip
import re
import http.cookiejar
import urllib.request
import urllib.parse
# from logreg.sender import use_sender, sender
def send_message(handle, content, captcha):
def ungzip(data):
return gzip.decompress(data)
def get_csrf(data):
cer = re.compile('data-csrf=\'(.*?)\'> </span>', re.S)
return cer.findall(data)[0]
def getOpener(head):
# deal with coookie
cj = http.cookiejar.CookieJar()
pro = urllib.request.HTTPCookieProcessor(cj)
opener = urllib.request.build_opener(pro)
header = []
for key, value in head.items():
elem = (key, value)
header.append(elem)
opener.addheaders = header
return opener
header = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Accept-Encoding': 'gzip, deflate',
'Host': 'www.codeforces.com',
'DNT': '1'
}
url = 'http://codeforces.com/enter'
opener = getOpener(header)
data = opener.open(url).read()
data = ungzip(data)
csrf_token = get_csrf(data.decode())
# print(data)
# use = str(sender(use_sender()))
post_dict = {
'csrf_token': csrf_token,
'action': 'enter',
'ftaa': 'facg0yyl14awvys2jp',
'bfaa': 'd3165a769f306b8a47053d749e2d920a',
'handleOrEmail': 'scau_support',
'password': 'Aa123456',
'_tta': '435'
}
# print(use)
# print(handle)
# print(data)
# if 'scau_support' not in str(data):
# return -1
post_data = urllib.parse.urlencode(post_dict).encode()
opener.open(url, post_data)
url = 'http://codeforces.com/usertalk?other=' + str(handle)
data = opener.open(url).read()
data = ungzip(data)
if 'scau_support' not in str(data):
return -1
csrf_token = get_csrf(data.decode())
post_dict = {
'csrf_token': csrf_token,
'action': 'sendMessage',
'content': content,
'_tta': '435'
}
post_data = urllib.parse.urlencode(post_dict).encode()
data = opener.open(url, post_data).read()
data = ungzip(data)
# print(data)
if captcha not in str(data):
return 1
return 0
| 29.695122
| 93
| 0.588501
| 299
| 2,435
| 4.705686
| 0.404682
| 0.03838
| 0.036958
| 0.036247
| 0.283582
| 0.253731
| 0.203269
| 0.157783
| 0.051173
| 0.051173
| 0
| 0.034884
| 0.258316
| 2,435
| 81
| 94
| 30.061728
| 0.744186
| 0.092813
| 0
| 0.234375
| 0
| 0.03125
| 0.238507
| 0.045061
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.015625
| 0.078125
| 0.015625
| 0.234375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd8f9e880d1c5b15888f038a47c041322592d1b0
| 2,177
|
py
|
Python
|
arfit/run_carma_pack.py
|
farr/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | 5
|
2015-04-29T21:46:52.000Z
|
2021-05-13T04:59:23.000Z
|
arfit/run_carma_pack.py
|
afcarl/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | null | null | null |
arfit/run_carma_pack.py
|
afcarl/arfit
|
7ff6def331ef98f43f623da2d9867d1ac967448b
|
[
"MIT"
] | 2
|
2015-12-03T12:08:32.000Z
|
2018-05-26T16:20:31.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import carmcmc as cm
import numpy as np
import os
import plotutils.autocorr as ac
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, metavar='FILE', help='input file')
parser.add_argument('--output', required=True, metavar='FILE', help='chain output')
parser.add_argument('--p', default=3, type=int, metavar='P', help='AR order (default: %(default)s)')
parser.add_argument('--q', default=2, type=int, metavar='Q', help='MA order (default: %(default)s)')
parser.add_argument('--neff', default=1000, type=int, metavar='N', help='number of independent samples (default: %(default)s)')
parser.add_argument('--tmax', default=100.0, type=float, metavar='T', help='maximum temperature')
parser.add_argument('--ntemp', default=10, type=int, metavar='N', help='number of temperatures')
args = parser.parse_args()
data = np.loadtxt(args.input)
times, tind = np.unique(data[:,0], return_index=True)
data = data[tind, :]
model = cm.CarmaModel(data[:,0], data[:,1], data[:,2], p=args.p, q=args.q)
thin = 1
nsamp = 10*args.neff
out, ext = os.path.splitext(args.output)
outtemp = out + '.TEMP' + ext
while True:
sample = model.run_mcmc(nsamp, nthin=thin, nburnin=thin*nsamp/2, tmax=args.tmax, ntemperatures=args.ntemp)
np.savetxt(outtemp, np.column_stack((sample.trace, sample.get_samples('loglik'), sample.get_samples('logpost'))))
os.rename(outtemp, args.output)
taus = []
for j in range(sample.trace.shape[1]):
taus.append(ac.autocorrelation_length_estimate(sample.trace[:,j]))
taus = np.array(taus)
if np.any(np.isnan(taus)):
neff_achieved = 0
else:
neff_achieved = sample.trace.shape[0] / np.max(taus)
print('Ran for ', nsamp*thin, ' steps, achieved ', neff_achieved, ' independent samples')
sys.__stdout__.flush()
if neff_achieved >= args.neff:
break
else:
thin *= 2
| 34.015625
| 131
| 0.635278
| 291
| 2,177
| 4.628866
| 0.408935
| 0.046771
| 0.088344
| 0.046771
| 0.158872
| 0.118782
| 0.095026
| 0
| 0
| 0
| 0
| 0.013921
| 0.208085
| 2,177
| 63
| 132
| 34.555556
| 0.767401
| 0.009187
| 0
| 0.046512
| 0
| 0
| 0.13961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.162791
| 0
| 0.162791
| 0.046512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd9402b8557bc8fee0baeb9f728d3c332668ae1e
| 2,240
|
py
|
Python
|
test/http2_test/http2_server_health_check.py
|
miyachu/grpc
|
a06ea3c3162c10ff90a1578bf82bbbff95dc799d
|
[
"BSD-3-Clause"
] | 2
|
2021-09-10T00:20:13.000Z
|
2021-11-16T11:27:19.000Z
|
test/http2_test/http2_server_health_check.py
|
miyachu/grpc
|
a06ea3c3162c10ff90a1578bf82bbbff95dc799d
|
[
"BSD-3-Clause"
] | null | null | null |
test/http2_test/http2_server_health_check.py
|
miyachu/grpc
|
a06ea3c3162c10ff90a1578bf82bbbff95dc799d
|
[
"BSD-3-Clause"
] | 1
|
2020-11-04T04:19:45.000Z
|
2020-11-04T04:19:45.000Z
|
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import hyper
import sys
# Utility to healthcheck the http2 server. Used when starting the server to
# verify that the server is live before tests begin.
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--server_host', type=str, default='localhost')
parser.add_argument('--server_port', type=int, default=8080)
args = parser.parse_args()
server_host = args.server_host
server_port = args.server_port
conn = hyper.HTTP20Connection('%s:%d' % (server_host, server_port))
conn.request('POST', '/grpc.testing.TestService/UnaryCall')
resp = conn.get_response()
if resp.headers.get('grpc-encoding') is None:
sys.exit(1)
else:
sys.exit(0)
| 44.8
| 75
| 0.766964
| 320
| 2,240
| 5.30625
| 0.5375
| 0.023557
| 0.020024
| 0.027091
| 0.108363
| 0.080094
| 0.080094
| 0.080094
| 0.080094
| 0.080094
| 0
| 0.006926
| 0.162054
| 2,240
| 49
| 76
| 45.714286
| 0.897709
| 0.7125
| 0
| 0
| 0
| 0
| 0.163132
| 0.057096
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd9452c189452f40fb4e6f56c43cb761ffc48203
| 3,494
|
py
|
Python
|
server/droidio/demands/test/test_views.py
|
lucasOlivio/droid.io
|
945b1452eaaa73b4d7f9d1d1a35eaa2900e97e96
|
[
"MIT"
] | null | null | null |
server/droidio/demands/test/test_views.py
|
lucasOlivio/droid.io
|
945b1452eaaa73b4d7f9d1d1a35eaa2900e97e96
|
[
"MIT"
] | null | null | null |
server/droidio/demands/test/test_views.py
|
lucasOlivio/droid.io
|
945b1452eaaa73b4d7f9d1d1a35eaa2900e97e96
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from nose.tools import eq_
from faker import Faker
import factory
from ..models import Demand
from .factories import DemandFactory
from ..serializers import DemandSerializer
from droidio.users.test.factories import UserFactory
fake = Faker()
class TestDemandListTestCase(APITestCase):
""" Tests /demands list operations.
"""
def setUp(self):
self.user = UserFactory()
self.client.force_authenticate(user=self.user)
self.url = reverse("demands-list")
self.demand_data = factory.build(dict, FACTORY_CLASS=DemandFactory)
def test_post_request_with_no_data_fails(self):
response = self.client.post(self.url, {})
eq_(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_request_with_valid_data_succeeds(self):
response = self.client.post(self.url, self.demand_data)
eq_(response.status_code, status.HTTP_201_CREATED)
demand = Demand.objects.get(pk=response.data.get("id"))
eq_(demand.description, self.demand_data.get("description"))
def test_get_list_returns_only_my_demands(self):
# Set testing demands
DemandFactory(user_created=self.user)
user2 = UserFactory()
DemandFactory(user_created=user2)
# Test response and results
response = self.client.get(self.url)
eq_(response.status_code, status.HTTP_200_OK)
demands = Demand.objects.filter(user_created=self.user)
serializer = DemandSerializer(demands, many=True)
eq_(response.data["count"], 1)
eq_(response.data["results"], serializer.data)
class TestDemandDetailTestCase(APITestCase):
""" Tests /demands detail operations.
"""
def setUp(self):
self.user = UserFactory()
self.client.force_authenticate(user=self.user)
self.demand = DemandFactory(user_created=self.user)
self.url = reverse("demands-detail", kwargs={"pk": self.demand.pk})
def test_get_request_returns_a_given_demand(self):
response = self.client.get(self.url)
eq_(response.status_code, status.HTTP_200_OK)
def test_patch_request_updates_a_demand(self):
new_description = fake.text()
payload = {"description": new_description}
response = self.client.patch(self.url, payload)
eq_(response.status_code, status.HTTP_200_OK)
demand = Demand.objects.get(pk=self.demand.id)
eq_(demand.description, new_description)
def test_put_request_updates_a_demand(self):
payload = factory.build(dict, FACTORY_CLASS=DemandFactory)
response = self.client.put(self.url, payload)
eq_(response.status_code, status.HTTP_200_OK)
demand = Demand.objects.get(pk=self.demand.id)
eq_(demand.description, payload["description"])
def test_set_demand_completed(self):
custom_action = reverse("demands-set-completed", kwargs={"pk": self.demand.pk})
response = self.client.post(custom_action)
eq_(response.status_code, status.HTTP_200_OK)
demand = Demand.objects.get(pk=self.demand.id)
eq_(demand.is_completed, True)
def test_delete_request_deletes_a_demand(self):
response = self.client.delete(self.url)
eq_(response.status_code, status.HTTP_204_NO_CONTENT)
demand = Demand.objects.filter(pk=self.demand.id).first()
eq_(demand, None)
| 34.94
| 87
| 0.704637
| 440
| 3,494
| 5.361364
| 0.222727
| 0.042391
| 0.061043
| 0.067825
| 0.476473
| 0.369648
| 0.302671
| 0.27766
| 0.246291
| 0.246291
| 0
| 0.009551
| 0.190899
| 3,494
| 99
| 88
| 35.292929
| 0.824903
| 0.034917
| 0
| 0.235294
| 0
| 0
| 0.029184
| 0.006254
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.147059
| 0
| 0.323529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd94f0230de4472e8494e2e5c028fe0a163fe4d9
| 422
|
py
|
Python
|
leetcode/python/check_in_n_and_its_double_exists.py
|
subhadig/leetcode
|
9151ea49c342efa228cf82de72736c3445bbfef2
|
[
"Unlicense"
] | null | null | null |
leetcode/python/check_in_n_and_its_double_exists.py
|
subhadig/leetcode
|
9151ea49c342efa228cf82de72736c3445bbfef2
|
[
"Unlicense"
] | null | null | null |
leetcode/python/check_in_n_and_its_double_exists.py
|
subhadig/leetcode
|
9151ea49c342efa228cf82de72736c3445bbfef2
|
[
"Unlicense"
] | null | null | null |
# https://leetcode.com/explore/learn/card/fun-with-arrays/527/searching-for-items-in-an-array/3250/
# time: O(n)
# space: O(n)
class Solution:
def checkIfExist(self, arr: List[int]) -> bool:
if not arr:
return False
nums = set()
for x in arr:
if 2*x in nums or x/2 in nums:
return True
else:
nums.add(x)
return False
| 26.375
| 99
| 0.533175
| 61
| 422
| 3.688525
| 0.688525
| 0.017778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032727
| 0.348341
| 422
| 15
| 100
| 28.133333
| 0.785455
| 0.28436
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd95ba5b789b57d2c18cb6c697a4bed1400af969
| 2,743
|
py
|
Python
|
cloud_functions/trigger-monitor-dag-function/main_test.py
|
google/feedloader
|
f6a25569bc3d7d4ee326961fd3b01e45fc3858e4
|
[
"Apache-2.0"
] | 5
|
2021-02-15T12:49:12.000Z
|
2022-01-12T06:28:41.000Z
|
cloud_functions/trigger-monitor-dag-function/main_test.py
|
google/feedloader
|
f6a25569bc3d7d4ee326961fd3b01e45fc3858e4
|
[
"Apache-2.0"
] | null | null | null |
cloud_functions/trigger-monitor-dag-function/main_test.py
|
google/feedloader
|
f6a25569bc3d7d4ee326961fd3b01e45fc3858e4
|
[
"Apache-2.0"
] | 4
|
2021-02-16T17:28:00.000Z
|
2021-06-18T15:27:52.000Z
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Trigger DAG Cloud Function."""
import os
from unittest import mock
from absl.testing import parameterized
import main
_TEST_CLIENT_ID = '12345.apps.googleusercontent.com'
_TEST_DAG_NAME = 'dag-name'
_TEST_WEBSERVER_ID = 'https://12345-tp.appspot.com'
@mock.patch.dict(
os.environ, {
'CLIENT_ID': _TEST_CLIENT_ID,
'DAG_NAME': _TEST_DAG_NAME,
'WEBSERVER_ID': _TEST_WEBSERVER_ID,
})
class TriggerMonitorDagFunctionTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.event = {
'bucket': 'feed-bucket',
'name': 'filename',
'metageneration': 'test-metageneration',
'timeCreated': '0',
'updated': '0'
}
self.context = mock.create_autospec('google.cloud.functions.Context')
self.context.event_id = '12345'
self.context.event_type = 'gcs-event'
self.context.timestamp = '2021-06-05T08:16:15.183Z'
@mock.patch.object(
main,
'make_iap_request',
side_effect=Exception('Bad request: JSON body error'))
def test_json_body_error(self, _):
trigger_event = None
with self.assertRaises(Exception) as context:
main.trigger_dag(trigger_event, self.context)
self.assertIn('Bad request: JSON body error', str(context.exception))
@mock.patch.object(
main,
'make_iap_request',
side_effect=Exception('Error in IAP response: unauthorized'))
def test_iap_response_error(self, _):
trigger_event = {'file': 'some-gcs-file'}
with self.assertRaises(Exception) as context:
main.trigger_dag(trigger_event, self.context)
self.assertIn('Error in IAP response', str(context.exception))
@mock.patch.object(main, 'make_iap_request', autospec=True)
def test_api_endpoint(self, make_iap_request_mock):
main.trigger_dag(self.event, self.context)
make_iap_request_mock.assert_called_once_with(
'https://12345-tp.appspot.com/api/experimental/dags/dag-name/dag_runs',
'12345.apps.googleusercontent.com',
method='POST',
json={
'conf': self.event,
'replace_microseconds': 'false'
},
)
| 31.528736
| 79
| 0.696318
| 360
| 2,743
| 5.138889
| 0.430556
| 0.041622
| 0.037838
| 0.030811
| 0.237297
| 0.188649
| 0.188649
| 0.188649
| 0.188649
| 0.188649
| 0
| 0.02382
| 0.188844
| 2,743
| 86
| 80
| 31.895349
| 0.80764
| 0.22202
| 0
| 0.175439
| 0
| 0.017544
| 0.263357
| 0.055792
| 0
| 0
| 0
| 0
| 0.087719
| 1
| 0.070175
| false
| 0
| 0.070175
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd99b6d2cdd53e9871b02f6e724fb47ac13372e3
| 12,973
|
py
|
Python
|
programs/loadsheet/loadsheet.py
|
admin-db/OnboardingTools
|
0f9d363d461df8c01e99157386338633828f5f92
|
[
"Apache-2.0"
] | 3
|
2021-04-24T14:39:50.000Z
|
2021-07-20T17:11:19.000Z
|
programs/loadsheet/loadsheet.py
|
admin-db/OnboardingTools
|
0f9d363d461df8c01e99157386338633828f5f92
|
[
"Apache-2.0"
] | 2
|
2020-07-22T21:34:33.000Z
|
2021-01-14T19:26:12.000Z
|
programs/loadsheet/loadsheet.py
|
admin-db/OnboardingTools
|
0f9d363d461df8c01e99157386338633828f5f92
|
[
"Apache-2.0"
] | 2
|
2020-07-16T03:34:35.000Z
|
2020-07-22T21:18:12.000Z
|
#Copyright 2020 DB Engineering
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
__version__ = '0.0.3'
__author__ = 'Trevor S., Shane S., Andrew K.'
# Standard Packages
import os
import sys
import string
from typing import Optional
from typing import Union
from typing import Dict
from typing import List
from typing import Any
# Open-source Packages
import openpyxl
import pandas as pd
sys.path.append('../')
# Proprietary Packages
from rules.rules import Rules
# Module GOBAL and CONTRAINTS
# 01132021: bms specific
_REQ_INPUT_HEADERS_BMS = [
'objectid',
'deviceid',
'objectname'
]
# 01132021: general
_REQ_INPUT_HEADERS = _REQ_INPUT_HEADERS_BMS + ['units', 'objecttype']
_REQ_OUTPUT_HEADERS = [
'required',
'manuallymapped',
'building',
'generaltype',
'typename',
'assetname',
'fullassetpath',
'standardfieldname'
]
class Loadsheet:
"""
Loadsheet Library
Purpose: The Loadsheet Library (loadsheet.py) is a proprietary class
used to load a loadsheet Excel file into the tool
Args: data - the list of dictionaries making up the loadsheet file
Keys are column names, values are column values
Returns: Loadsheet object
Usage Example(s):
1) From records:
data = {'coln1':[1,2,3], 'coln2':['a','b','c']}
ls = Loadsheet(data)
2) From loadsheet excel file*:
ls = Loadsheet.from_loadsheet(<loadsheet_file_path>)
3) From BMS file*:
ls = Loadsheet.from_bms(<bms_file_path>)
* - By default, expects header row at top
Dependencies:
Standard
- os
- sys
Open-source
- openpyxl
- yaml
- typing
Proprietary
- rules
TODOs:
- ini_config not used but will be added in future
- all rows will have same headers, so add header check
"""
def __init__(
self,
data: List[Dict[str,Any]],
std_header_map: Dict[str,str],
#has_normalized_fields: bool= False,
):
# 01132021: moved this check to import format specific method(s)
# currently a quick fix for a much broader update refactor
# that needs to be done
# assert Loadsheet._is_valid_headers(data[0].keys(), has_normalized_fields) == True,\
# "[ERROR] loadsheet headers:\n {} \ndo not match configuration \
# headers:\n {}".format(', '.join(data[0].keys()),', '.join(
# *[_REQ_INPUT_HEADERS+_REQ_OUTPUT_HEADERS if has_normalized_fields
# else _REQ_INPUT_HEADERS]))
# # end by sypks
self._data = data
self._std_header_map = std_header_map
@classmethod
def from_loadsheet(
cls,
filepath: str,
has_normalized_fields: bool= False
):
"""
Initializes loadsheet object from existing loadsheet Excel file
args:
filepath - absolute filepath to loadsheet excel file
has_normalized_fields - flag if has normalized fields
returns:
loadsheet object
"""
# hardcode header rows as [0] for initial release
valid_file_types = {
'.xlsx':'excel',
'.csv':'bms_file'
}
file_type = os.path.splitext(filepath)[1]
if file_type == '.xlsx':
df = pd.read_excel(filepath, header= 0)
elif file_type == '.csv':
df = pd.read_csv(filepath, header= 0)
std_header_map = Loadsheet._to_std_header_mapping(
df.columns)
df.columns = std_header_map.keys()
# 01132021: check to ensure that document has required headers
if not Loadsheet._is_valid_headers(
df.columns,
_REQ_INPUT_HEADERS,
has_normalized_fields
):
raise RuntimeError("[ERROR] Loadsheet headers:\n {} \nDoes not match "
+ "configuration headers:\n {}".format(', '.join(df.columns.tolist()),', '.join(
*[_REQ_INPUT_HEADERS+_REQ_OUTPUT_HEADERS if has_normalized_fields
else _REQ_INPUT_HEADERS])))
return cls(
df.to_dict('records'),
std_header_map
)
# end by sypks
@classmethod
def from_bms(
cls,
filepath: str
):
"""
Initializes loadsheet object from existing BMS file
args:
filepath - absolute filepath to BMS file
ini_config_filepath - not currently enabled, do not use
returns:
loadsheet object
"""
# hardcode header as row 0 for inital release
df = pd.read_csv(filepath, header= 0)
std_header_map = Loadsheet._to_std_header_mapping(
df.columns)
df.columns = std_header_map.keys()
# 01132021: check to ensure that document has required headers
if not Loadsheet._is_valid_headers(
df.columns,
_REQ_INPUT_HEADERS_BMS
):
raise RuntimeError("[ERROR] BMS headers:\n {} \nDoes not match "
"configuration headers:\n {}".format(', '.join(df.columns.tolist()),', '.join(
_REQ_INPUT_HEADERS_BMS)))
return cls(
df.to_dict('records'),
std_header_map
)
# end by sypks
def _rename_to_std(df):
df.columns = self._std_header_map.values()
@staticmethod
def _to_std_headers(headers: List[str]) -> List[str]:
'''
Removes all punctuation characters, spaces, and converts to all
lowercase characters. Returns standardized headers to be used
internally
'''
delete_dict = {sp_char: '' for sp_char in string.punctuation}
delete_dict[' '] = '' # space char not in sp_char by default
trans_table = str.maketrans(delete_dict)
return [sh.translate(trans_table).lower() for sh in headers]
@staticmethod
def _is_valid_headers(
headers: List[str],
required_input_headers: List[str],
has_normalized_fields: bool= False
) -> bool:
'''
Checks column names from loadsheet or BMS file are valid as
defined in _REQ_INPUT_HEADERS and _REQ_OUTPUT_HEADERS
'''
trans_headers = Loadsheet._to_std_headers(headers)
if has_normalized_fields:
return set(required_input_headers+_REQ_OUTPUT_HEADERS) == \
set(required_input_headers+_REQ_OUTPUT_HEADERS).intersection(
set(trans_headers))
else:
return set(required_input_headers) == \
set(required_input_headers).intersection(set(trans_headers))
@staticmethod
def _to_std_header_mapping(
orig_headers: List[str]
) -> Dict[str,str]:
'''
Creates a dict mapping from orig headers to strandardized
headers used interally
'''
std_headers = Loadsheet._to_std_headers(orig_headers)
return {std: orig for (std,orig) in zip(std_headers,orig_headers)}
def get_std_header(
self,
header: str
) -> str:
"""
Returns standardized header used internally based on the document
header passed in
"""
return self._std_header_map[header]
def get_data_row(
self,
row: int
) -> Dict[str, Any]:
pass
def get_data_row_generator(self):
pass
def export_to_loadsheet(self, output_filepath):
"""
exports data in Loadsheet object to excel file
args:
output_filepath - location and name of excel file output
"""
df = pd.DataFrame.from_records(self._data)
df.columns = [self._std_header_map[c] for c in df.columns]
df.to_excel(output_filepath, index=False)
def validate(
self,
non_null_fields: Optional[List[str]]= None
):
""" Perform loadsheet validation. It will not validate the
contents of the loadsheet, in terms of validity of entries, but
will validate that all required fields are filled in and that
no data is missing; the representations layer will handle the
ontology checks.
Checks:
1) Required is always in {YES, NO}
2) non-null fields are filled in where required is YES
3) there are no duplicate fullAssetPath-standardFieldName pairs
Args:
non_null_fields - fields that are checked to have values in step 2
by default set to None to use the following:
'building',
'generalType',
'assetName',
'fullAssetPath',
'standardFieldName',
'deviceId',
'objectType',
'objectId',
'units'
Returns:
None, but throws errors if any issues encountered
"""
# non_null_fields arg included for future user definied check to
# be implemented. Initial commit does not implement this feature
# Therefore we use the hardcoded non_null_fields below
if non_null_fields is None:
non_null_fields = [
'building',
'generaltype',
'assetname',
'fullassetpath',
'standardfieldname',
'deviceid',
'objecttype',
'objectid',
'units'
]
# convert self._data to pd.DataFrame (we will transistion to
# using only dataframes internally in a future update)
df = pd.DataFrame.from_records(self._data)
#required is always in [YES, NO]
assert self._ensure_required_correct(df), "Unacceptable values in required column"
#check for null field_details
null_fields = self._find_null_fields(df, non_null_fields)
assert len(null_fields) == 0, '\n'.join(
["There are rows with missing fields:"]+
[f"\t\t{uid + 2}" for uid in null_fields]
)
#check for duplicate fullAssetPath-standardFieldName combos
repeat_uid = self._get_duplicate_asset_fields(df)
assert len(repeat_uid) == 0, '\n'.join(
["There are duplicated asset-field combinations:"]+
[f"\t\t{uid}" for uid in repeat_uid]
)
def validate_without_errors(
self,
non_null_fields: Optional[List[str]]= None
):
"""
Perform loadsheet validation as in validate
but prints error messages instead of throwing errors
"""
# non_null_fields arg included for future user definied check to
# be implemented. Initial commit does not implement this feature
# Therefore we use the hardcoded non_null_fields below
if non_null_fields is None:
non_null_fields = [
'building',
'generaltype',
'assetname',
'fullassetpath',
'standardfieldname',
'deviceid',
'objecttype',
'objectid',
'units'
]
# convert self._data to pd.DataFrame (we will transistion to
# using only dataframes internally in a future update)
df = pd.DataFrame.from_records(self._data)
#required is always in [YES, NO]
if not self._ensure_required_correct(df):
print("[ERROR]\tUnacceptable values in required column")
#check for null field_details
null_fields = self._find_null_fields(df, non_null_fields)
if len(null_fields) > 0:
print(f"[ERROR]\tThere are rows with missing fields:")
for uid in null_fields:
print(f"\t\t{uid}")
#check for duplicate fullAssetPath-standardFieldName combos
repeat_uid = self._get_duplicate_asset_fields(df)
if len(repeat_uid) > 0:
print(f"[ERROR]\tThere are duplicated asset-field combinations:")
for uid in repeat_uid:
print(f"\t\t{uid}")
@staticmethod
def _ensure_required_correct(
data: pd.DataFrame
) -> bool:
'''
checks that required is in {YES, NO}
'''
return len(data[~data['required'].isin(['YES', 'NO'])]) == 0
@staticmethod
def _find_null_fields(
data: pd.DataFrame,
non_null_fields: list
) -> List[str]:
'''
Checks for null fields in any row marked required = YES
'''
needed_columns = ['required']
needed_columns.extend(non_null_fields)
relevant_df = data[needed_columns]
relevant_df = relevant_df[relevant_df['required'] == 'YES']
null_data = relevant_df[relevant_df.isnull().any(axis=1)]
return null_data.index.tolist()
@staticmethod
def _get_duplicate_asset_fields(
data: pd.DataFrame
) -> List[str]:
'''
finds and returns a list of duplicate FullAssetPath-StandardFieldName pairs
'''
data['uid'] = data['fullassetpath'] + ' ' + data['standardfieldname']
df = data[data['required'] == 'YES']
counts = df['uid'].value_counts()
df_counts = pd.DataFrame({'uid':counts.index, 'amt':counts.values})
repeat_uid = df_counts[df_counts['amt'] > 1]['uid'].tolist()
return repeat_uid
def apply_rules(
self,
rule_file: Dict
) -> None:
"""
Apply rules to the dataset. Will ignore any field where
manuallyMapped is set to YES.
args:
- rule_file: path to the rule file
returns: N/A
Note - See rules/rules.py for further information
"""
r = Rules(rule_file)
for row in self._data:
#add output headers
for output_header in _REQ_OUTPUT_HEADERS:
if output_header not in row.keys():
row[output_header] = ""
self._std_header_map[output_header] = output_header
#add manuallyMapped
if 'manuallymapped'not in row.keys():
row['manuallymapped'] = ''
self._std_header_map['manuallymapped'] = "manuallymapped"
#skip manuallyMapped rows
if row['manuallymapped'] == 'YES':
continue
#apply rules
else:
r.ApplyRules(row)
if __name__ == '__main__':
k = Loadsheet.from_bms(r'C:\Users\ShaneSpencer\Downloads\OnboardingTool-master\OnboardingTool-master\resources\bms_exports\alc\US-MTV-1395.csv')
| 27.427061
| 145
| 0.697757
| 1,751
| 12,973
| 4.974872
| 0.215306
| 0.029847
| 0.023878
| 0.011021
| 0.377339
| 0.302262
| 0.270577
| 0.253473
| 0.253473
| 0.243256
| 0
| 0.008015
| 0.201727
| 12,973
| 473
| 145
| 27.427061
| 0.83314
| 0.475064
| 0
| 0.391489
| 0
| 0.004255
| 0.157294
| 0.018974
| 0
| 0
| 0
| 0.002114
| 0.012766
| 1
| 0.07234
| false
| 0.008511
| 0.046809
| 0
| 0.165957
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dd9c212b2612a151f4e10e08866ba944cee12a2b
| 2,883
|
py
|
Python
|
openwater/zone/model.py
|
jeradM/openwater
|
740b7e76622a1ee909b970d9e5c612a840466cec
|
[
"MIT"
] | null | null | null |
openwater/zone/model.py
|
jeradM/openwater
|
740b7e76622a1ee909b970d9e5c612a840466cec
|
[
"MIT"
] | null | null | null |
openwater/zone/model.py
|
jeradM/openwater
|
740b7e76622a1ee909b970d9e5c612a840466cec
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from datetime import datetime
from typing import TYPE_CHECKING, Dict, Any, List, Optional
if TYPE_CHECKING:
from openwater.core import OpenWater
class ZoneRun:
def __init__(self, id: int, zone_id: int, start: datetime, duration: int):
self.id = id
self.zone_id = zone_id
self.start = start
self.duration = duration
def to_dict(self) -> dict:
return {
"id": self.id,
"zone_id": self.zone_id,
"start": self.start,
"duration": self.duration,
}
def to_db(self) -> dict:
return self.to_dict()
class BaseZone(ABC):
def __init__(
self,
ow: "OpenWater",
id: int,
name: str,
zone_type: str,
is_master: bool,
attrs: dict,
open_offset: int = 0,
close_offset: int = 0,
last_run: Optional[ZoneRun] = None,
):
self._ow = ow
self.id = id
self.name = name
self.zone_type = zone_type
self.is_master = is_master
self.attrs = attrs
self.open_offset = open_offset
self.close_offset = close_offset
self.last_run = last_run
self.master_zones: Optional[List[BaseZone]] = None
@classmethod
def of(cls, ow: "OpenWater", data: Dict[str, Any]):
return cls(
ow=ow,
id=data.get("id"),
name=data["name"],
zone_type=data["zone_type"],
is_master=data["is_master"],
open_offset=data["open_offset"],
close_offset=data["close_offset"],
attrs=data["attrs"],
)
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"zone_type": self.zone_type,
"is_master": self.is_master,
"open_offset": self.open_offset,
"close_offset": self.close_offset,
"open": self.is_open(),
"attrs": dict(self.attrs, **self.extra_attrs),
"last_run": self.last_run,
"master_zones": self.master_zones,
}
def to_db(self):
return {
"id": self.id,
"name": self.name,
"zone_type": self.zone_type,
"is_master": self.is_master,
"open": self.is_open(),
"attrs": dict(self.attrs, **self.extra_attrs),
}
@abstractmethod
def is_open(self) -> bool:
pass
@abstractmethod
async def open(self) -> None:
pass
@abstractmethod
async def close(self) -> None:
pass
@abstractmethod
def get_zone_type(self) -> str:
pass
@property
def extra_attrs(self) -> dict:
return {}
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
| 25.289474
| 78
| 0.539369
| 340
| 2,883
| 4.347059
| 0.164706
| 0.054127
| 0.032476
| 0.028417
| 0.159675
| 0.159675
| 0.159675
| 0.159675
| 0.159675
| 0.159675
| 0
| 0.00106
| 0.34582
| 2,883
| 113
| 79
| 25.513274
| 0.782609
| 0
| 0
| 0.270833
| 0
| 0
| 0.069719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.041667
| 0.041667
| 0.083333
| 0.270833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dda05eca52f0bd879e75366f591fdb92e3e9abbd
| 855
|
py
|
Python
|
tests/test_views.py
|
pennlabs/django-shortener
|
a8f362863d4d8f13916e9e924ed316384f588373
|
[
"MIT"
] | 3
|
2018-11-04T15:46:01.000Z
|
2020-01-06T13:49:46.000Z
|
tests/test_views.py
|
pennlabs/shortener
|
a8f362863d4d8f13916e9e924ed316384f588373
|
[
"MIT"
] | 1
|
2019-07-30T04:31:19.000Z
|
2019-07-30T04:31:19.000Z
|
tests/test_views.py
|
pennlabs/shortener
|
a8f362863d4d8f13916e9e924ed316384f588373
|
[
"MIT"
] | 2
|
2021-02-22T18:12:27.000Z
|
2021-09-16T18:51:47.000Z
|
import hashlib
from django.test import TestCase
from django.urls import reverse
from shortener.models import Url
class RedirectViewTestCase(TestCase):
def setUp(self):
self.redirect = "https://pennlabs.org"
self.url, _ = Url.objects.get_or_create(long_url=self.redirect)
def test_exists(self):
try:
hashed = hashlib.sha3_256(self.redirect.encode("utf-8")).hexdigest()
except AttributeError:
hashed = hashlib.sha256(self.redirect.encode("utf-8")).hexdigest()
response = self.client.get(reverse("shortener:index", args=[hashed[:5]]))
self.assertRedirects(response, self.redirect, fetch_redirect_response=False)
def test_no_exists(self):
response = self.client.get(reverse("shortener:index", args=["abcd"]))
self.assertEqual(response.status_code, 404)
| 34.2
| 84
| 0.691228
| 104
| 855
| 5.576923
| 0.490385
| 0.103448
| 0.062069
| 0.072414
| 0.265517
| 0.265517
| 0.158621
| 0.158621
| 0
| 0
| 0
| 0.018625
| 0.183626
| 855
| 24
| 85
| 35.625
| 0.812321
| 0
| 0
| 0
| 0
| 0
| 0.074854
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.166667
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dda073c654623fd4431b83697b75b0c9003f460a
| 1,758
|
py
|
Python
|
Models/Loss/__init__.py
|
bobo0810/classification
|
b27397308c5294dcc30a5aaddab4692becfc45d3
|
[
"MIT"
] | null | null | null |
Models/Loss/__init__.py
|
bobo0810/classification
|
b27397308c5294dcc30a5aaddab4692becfc45d3
|
[
"MIT"
] | null | null | null |
Models/Loss/__init__.py
|
bobo0810/classification
|
b27397308c5294dcc30a5aaddab4692becfc45d3
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from timm.loss import LabelSmoothingCrossEntropy
from pytorch_metric_learning import losses
class create_class_loss(nn.Module):
"""
常规分类 - 损失函数入口
"""
def __init__(self, name):
super(create_class_loss, self).__init__()
assert name in ["cross_entropy", "label_smooth"]
self.loss = self.init_loss(name)
def forward(self, predict, target):
return self.loss(predict, target)
def init_loss(self, name):
"""
常规分类
"""
loss_dict = {
"cross_entropy": nn.CrossEntropyLoss,
"label_smooth": LabelSmoothingCrossEntropy,
}
loss = loss_dict[name]()
return loss
class create_metric_loss(nn.Module):
"""
度量学习 - 损失函数入口
"""
def __init__(self, name, num_classes, embedding_size):
"""
name: 损失函数名称
num_classes: 类别数
embedding_size: 特征维度
"""
super(create_metric_loss, self).__init__()
assert name in ["cosface", "arcface", "subcenter_arcface", "circleloss"]
self.loss = self.init_loss(name, num_classes, embedding_size)
def forward(self, predict, target, hard_tuples):
return self.loss(predict, target, hard_tuples)
def init_loss(self, name, num_classes, embedding_size):
loss_dict = {
"cosface": losses.CosFaceLoss,
"arcface": losses.ArcFaceLoss,
"subcenter_arcface": losses.SubCenterArcFaceLoss,
}
if name in loss_dict.keys():
loss = loss_dict[name](
num_classes=num_classes, embedding_size=embedding_size
)
elif name == "circleloss":
loss = losses.CircleLoss()
return loss
| 27.46875
| 80
| 0.609215
| 189
| 1,758
| 5.375661
| 0.285714
| 0.047244
| 0.047244
| 0.090551
| 0.347441
| 0.155512
| 0
| 0
| 0
| 0
| 0
| 0
| 0.288965
| 1,758
| 63
| 81
| 27.904762
| 0.8128
| 0.047213
| 0
| 0.105263
| 0
| 0
| 0.083439
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.157895
| false
| 0
| 0.105263
| 0.052632
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dda2bd0af7d3de24450c99ea2968e3067f121da2
| 1,397
|
py
|
Python
|
VGG_GRU/TrainTestlist/Emotiw/getTraintest_Emotiw.py
|
XiaoYee/emotion_classification
|
6122e1b575bce5235169f155295b549a8f721ca1
|
[
"MIT"
] | 74
|
2018-06-29T06:46:33.000Z
|
2022-02-26T19:15:55.000Z
|
VGG_GRU/TrainTestlist/Emotiw/getTraintest_Emotiw.py
|
JIangjiang1108/emotion_classification
|
6122e1b575bce5235169f155295b549a8f721ca1
|
[
"MIT"
] | 6
|
2018-07-02T09:29:05.000Z
|
2020-01-30T14:21:26.000Z
|
VGG_GRU/TrainTestlist/Emotiw/getTraintest_Emotiw.py
|
JIangjiang1108/emotion_classification
|
6122e1b575bce5235169f155295b549a8f721ca1
|
[
"MIT"
] | 23
|
2018-06-29T12:52:40.000Z
|
2020-12-02T12:55:13.000Z
|
import os
import os.path as osp
import argparse
import random
parser = argparse.ArgumentParser(description='Emotiw dataset list producer')
args = parser.parse_args()
train = "/home/quxiaoye/disk/FR/Emotiw2018/data/Train_AFEW_all/Emotiw-faces"
test = "/home/quxiaoye/disk/FR/Emotiw2018/data/Val_AFEW/Emotiw-faces"
train_path = osp.join(train)
test_path = osp.join(test)
Face_category = open("./TrainTestlist/Emotiw/Emotiw_TRAIN.txt","w")
Face_category_test = open("./TrainTestlist/Emotiw/Emotiw_VAL.txt","w")
train_img_folders = os.listdir(train_path)
train_img_folders.sort()
for i in range(len(train_img_folders)):
path_folder = osp.join(train_path,train_img_folders[i])
emotion_folders = os.listdir(path_folder)
emotion_folders.sort()
for emotion_folder in emotion_folders:
path_write = osp.join(path_folder,emotion_folder)
Face_category.write(path_write+" "+train_img_folders[i]+"\n")
Face_category.close()
test_img_folders = os.listdir(test_path)
test_img_folders.sort()
for i in range(len(test_img_folders)):
path_folder = osp.join(test_path,test_img_folders[i])
emotion_folders = os.listdir(path_folder)
emotion_folders.sort()
for emotion_folder in emotion_folders:
path_write = osp.join(path_folder,emotion_folder)
Face_category_test.write(path_write+" "+test_img_folders[i]+"\n")
Face_category_test.close()
| 29.723404
| 76
| 0.7602
| 207
| 1,397
| 4.835749
| 0.236715
| 0.0999
| 0.074925
| 0.035964
| 0.563437
| 0.511489
| 0.345654
| 0.345654
| 0.28971
| 0.28971
| 0
| 0.006515
| 0.120974
| 1,397
| 46
| 77
| 30.369565
| 0.808632
| 0
| 0
| 0.25
| 0
| 0
| 0.170365
| 0.144596
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06bb33b3d53b354d7a98d017485acac1da8698a5
| 1,127
|
py
|
Python
|
py/1081. Smallest Subsequence of Distinct Characters.py
|
longwangjhu/LeetCode
|
a5c33e8d67e67aedcd439953d96ac7f443e2817b
|
[
"MIT"
] | 3
|
2021-08-07T07:01:34.000Z
|
2021-08-07T07:03:02.000Z
|
py/1081. Smallest Subsequence of Distinct Characters.py
|
longwangjhu/LeetCode
|
a5c33e8d67e67aedcd439953d96ac7f443e2817b
|
[
"MIT"
] | null | null | null |
py/1081. Smallest Subsequence of Distinct Characters.py
|
longwangjhu/LeetCode
|
a5c33e8d67e67aedcd439953d96ac7f443e2817b
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/smallest-subsequence-of-distinct-characters/
# Return the lexicographically smallest subsequence of s that contains all the
# distinct characters of s exactly once.
# Note: This question is the same as 316: https://leetcode.com/problems/remove-
# duplicate-letters/
################################################################################
# record last postion of each char
# use stack and pop previous chars when i) new char is smaller and ii) we can add the popped char back later
class Solution:
def smallestSubsequence(self, s: str) -> str:
last_pos = {}
for idx, char in enumerate(s):
last_pos[char] = idx
stack = []
for idx, char in enumerate(s):
if char not in stack:
# pop the previous chars if the new char is smaller
# but only when we can add the popped char back: idx < last_pos[popped_char]
while stack and char < stack[-1] and idx < last_pos[stack[-1]]:
stack.pop()
stack.append(char)
return ''.join(stack)
| 38.862069
| 108
| 0.585626
| 146
| 1,127
| 4.486301
| 0.472603
| 0.042748
| 0.048855
| 0.073282
| 0.143511
| 0.143511
| 0.076336
| 0
| 0
| 0
| 0
| 0.006053
| 0.267081
| 1,127
| 28
| 109
| 40.25
| 0.786925
| 0.489796
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06be7d0ae668828822753247461cfec9b2e4f3d3
| 675
|
py
|
Python
|
kpm/commands/push.py
|
ericchiang/kpm
|
3653b1dba8359f086a6a21d3a5003e80a46083a7
|
[
"Apache-2.0"
] | 121
|
2016-08-05T17:54:27.000Z
|
2022-02-21T14:21:59.000Z
|
kpm/commands/push.py
|
ericchiang/kpm
|
3653b1dba8359f086a6a21d3a5003e80a46083a7
|
[
"Apache-2.0"
] | 82
|
2016-08-07T01:42:41.000Z
|
2017-05-05T17:35:45.000Z
|
kpm/commands/push.py
|
ericchiang/kpm
|
3653b1dba8359f086a6a21d3a5003e80a46083a7
|
[
"Apache-2.0"
] | 30
|
2016-08-15T13:12:10.000Z
|
2022-02-21T14:22:00.000Z
|
from appr.commands.push import PushCmd as ApprPushCmd
from kpm.manifest_jsonnet import ManifestJsonnet
class PushCmd(ApprPushCmd):
default_media_type = 'kpm'
def _kpm(self):
self.filter_files = True
self.manifest = ManifestJsonnet()
ns, name = self.manifest.package['name'].split("/")
if not self.namespace:
self.namespace = ns
if not self.pname:
self.pname = name
self.package_name = "%s/%s" % (self.namespace, self.pname)
if not self.version or self.version == "default":
self.version = self.manifest.package['version']
self.metadata = self.manifest.metadata()
| 32.142857
| 66
| 0.638519
| 80
| 675
| 5.3125
| 0.4125
| 0.112941
| 0.063529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251852
| 675
| 20
| 67
| 33.75
| 0.841584
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06bfd7cd414a9434b1f295b51c26d7407c29f08d
| 383
|
py
|
Python
|
problem_29/distinct_powers.py
|
plilja/project-euler
|
646d1989cf15e903ef7e3c6e487284847d522ec9
|
[
"Apache-2.0"
] | null | null | null |
problem_29/distinct_powers.py
|
plilja/project-euler
|
646d1989cf15e903ef7e3c6e487284847d522ec9
|
[
"Apache-2.0"
] | null | null | null |
problem_29/distinct_powers.py
|
plilja/project-euler
|
646d1989cf15e903ef7e3c6e487284847d522ec9
|
[
"Apache-2.0"
] | null | null | null |
from common.matrix import Matrix
def distinct_powers(n):
m = Matrix(n + 2, n + 2)
for i in range(2, n + 1):
m[i][2] = i ** 2
for j in range(3, n + 1):
m[i][j] = m[i][j - 1] * i
distinct_values = set()
for i in range(2, n + 1):
for j in range(2, n + 1):
distinct_values |= {m[i][j]}
return len(distinct_values)
| 21.277778
| 40
| 0.488251
| 68
| 383
| 2.691176
| 0.308824
| 0.043716
| 0.131148
| 0.147541
| 0.20765
| 0.153005
| 0.153005
| 0
| 0
| 0
| 0
| 0.052419
| 0.35248
| 383
| 17
| 41
| 22.529412
| 0.685484
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06c21871e9ad89697d51562c488828bc64f7390f
| 1,436
|
py
|
Python
|
1-python/python/transpose.py
|
Domin-Imperial/Domin-Respository
|
2e531aabc113ed3511f349107695847b5c4e4320
|
[
"MIT"
] | null | null | null |
1-python/python/transpose.py
|
Domin-Imperial/Domin-Respository
|
2e531aabc113ed3511f349107695847b5c4e4320
|
[
"MIT"
] | null | null | null |
1-python/python/transpose.py
|
Domin-Imperial/Domin-Respository
|
2e531aabc113ed3511f349107695847b5c4e4320
|
[
"MIT"
] | 1
|
2021-05-24T20:09:38.000Z
|
2021-05-24T20:09:38.000Z
|
# exercism exercise "transpose"
def transpose(lines: str) -> str:
input_list = lines.split('\n') # or splitlines
input_height = len(input_list)
input_width = get_input_width(input_list)
output_list = []
for colnum in range(input_width):
output = ''
for rownum in range(input_height):
output += get_char(input_list, rownum, colnum)
output = output.rstrip('*').replace('*', ' ')
output_list.append(output)
return '\n'.join(output_list)
def get_char(input_list, rownum, colnum):
# row = input_list[rownum]
# if colnum >= len(row):
# return '*'
# return row[colnum]
try:
return input_list[rownum][colnum]
except IndexError:
return '*'
def get_input_width(input_list):
# max_length = 0
# for i in range(len(input_list)):
# row = input_list[i]
# if len(row) > max_length:
# max_length = len(row)
# max_length = 0
# for row in input_list:
# if len(row) > max_length:
# max_length = len(row)
# list comprehension
# lengths = [len(x) for x in input_list] # list of ints
# max_length = max(lengths)
# generator expression
# an expression that acts like a sequence, that's not built yet
max_length = max((len(x) for x in input_list), default=0)
return max_length
print(transpose("AB\nC"))
print(repr(transpose("AB\nC").split('\n')))
| 27.615385
| 67
| 0.612117
| 193
| 1,436
| 4.373057
| 0.305699
| 0.138626
| 0.07109
| 0.074645
| 0.239336
| 0.187204
| 0.120853
| 0.075829
| 0.075829
| 0
| 0
| 0.00283
| 0.261838
| 1,436
| 51
| 68
| 28.156863
| 0.793396
| 0.373259
| 0
| 0
| 0
| 0
| 0.022779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0
| 0
| 0.318182
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06c2aad42518b04959fd06448a4c2d1ef11c34fe
| 4,318
|
py
|
Python
|
core/models.py
|
mcflydesigner/innorussian
|
70bec97ad349f340bd66cd8234d94f8829540397
|
[
"MIT"
] | 1
|
2021-04-12T18:54:37.000Z
|
2021-04-12T18:54:37.000Z
|
core/models.py
|
mcflydesigner/InnoRussian
|
70bec97ad349f340bd66cd8234d94f8829540397
|
[
"MIT"
] | null | null | null |
core/models.py
|
mcflydesigner/InnoRussian
|
70bec97ad349f340bd66cd8234d94f8829540397
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.timezone import now
from django.core.validators import FileExtensionValidator
from django.contrib.auth import get_user_model
from django.contrib.postgres.fields import ArrayField
from django.db.models import (Func, Value, CharField, IntegerField)
from .shortcuts import upload_to
"""
Models of core app.
The architecture is done in the following way.
An user accesses the content sequentially:
Category -> Subcategory -> List of words
"""
class Category(models.Model):
"""
The model for categories
"""
name = models.CharField('Name', max_length=55, unique=True)
# Here we use FileField instead of ImageField to allow only .svg extension for images.
picture = models.FileField('Picture', upload_to=upload_to('categories/pictures/'),
validators=[FileExtensionValidator(allowed_extensions=['svg'])])
class Meta:
verbose_name_plural = 'categories'
ordering = ['id']
def __str__(self):
return self.name + '(' + str(self.id) + ')'
class SubCategory(models.Model):
"""
The model for subcategories which are connected
with the corresponding categories. One subcategory can be connected
to different categories(many to many relationship).
"""
categoryId = models.ManyToManyField(Category)
name = models.CharField('Name', max_length=55, unique=True)
# Here we use FileField instead of ImageField to allow only .svg extension for images.
picture = models.FileField('Picture', upload_to=upload_to('subcategories/pictures/'),
validators=[FileExtensionValidator(allowed_extensions=['svg'])])
class Meta:
verbose_name_plural = 'subcategories'
ordering = ['id']
def __str__(self):
return self.name + '(' + str(self.id) + ')'
class TypesOfCard(models.TextChoices):
""" Each card must have a type for the convenience of the user(sorting) """
WORD = 'W', 'Word'
DIALOGUE = 'D', 'Dialogue'
SENTENCE = 'S', 'Sentence'
class Card(models.Model):
"""
Model for the cards with the content.
The card can be connected to different categories at
the same time(many to many relationship)
"""
subCategoryId = models.ManyToManyField(SubCategory)
content = models.TextField('Content')
# The card must have exactly one type out of TypesOfCard
type = models.CharField(max_length=1, choices=TypesOfCard.choices,
default=TypesOfCard.WORD)
# notes = models.CharField('Notes', blank=True, max_length=255)
# Pronunciation for the card is optional
pronunciation = models.FileField('Pronunciation', upload_to=upload_to('cards/sounds/'),
validators=[FileExtensionValidator(allowed_extensions=['mp3'])],
null=True, blank=True)
# Translit of pronunciation is optional
translit_of_pronunciation = models.TextField('Translit of pronunciation', null=True, blank=True)
class Meta:
ordering = ['-pk']
def __str__(self):
return self.content + '(' + str(self.id) + ')'
class Favourite(models.Model):
"""
Model for user's favourite cards.
"""
card = models.ForeignKey(Card, on_delete=models.CASCADE)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
# For sorting by `default`
data_added = models.DateTimeField(default=now)
class Meta:
ordering = ['-data_added']
def __str__(self):
return 'card ' + str(self.card.id) + ' -> user ' + str(self.owner.id) + \
' (' + str(self.id) + ') '
class ArrayPosition(Func):
"""
Class to solve one of the Django's problems.
This class is used for filtering(user's sorting option) the cards.
"""
function = 'array_position'
def __init__(self, items, *expressions, **extra):
if isinstance(items[0], int):
base_field = IntegerField()
else:
base_field = CharField(max_length=max(len(i) for i in items))
first_arg = Value(list(items), output_field=ArrayField(base_field))
expressions = (first_arg,) + expressions
super().__init__(*expressions, **extra)
| 35.393443
| 101
| 0.652154
| 497
| 4,318
| 5.545272
| 0.32998
| 0.025399
| 0.014514
| 0.023222
| 0.260523
| 0.2373
| 0.211901
| 0.211901
| 0.211901
| 0.211901
| 0
| 0.003044
| 0.239231
| 4,318
| 121
| 102
| 35.68595
| 0.835921
| 0.215146
| 0
| 0.258065
| 0
| 0
| 0.076296
| 0.007499
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.112903
| 0.064516
| 0.693548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06c7a3448e8983e9a265c812e501b174dd35b66d
| 5,821
|
py
|
Python
|
SegmentationAlgorithms/CBSMoT.py
|
JRose6/TrajLib
|
2a5749bf6e9517835801926d6a5e92564ef2c7f0
|
[
"Apache-2.0"
] | null | null | null |
SegmentationAlgorithms/CBSMoT.py
|
JRose6/TrajLib
|
2a5749bf6e9517835801926d6a5e92564ef2c7f0
|
[
"Apache-2.0"
] | null | null | null |
SegmentationAlgorithms/CBSMoT.py
|
JRose6/TrajLib
|
2a5749bf6e9517835801926d6a5e92564ef2c7f0
|
[
"Apache-2.0"
] | null | null | null |
import Distances as d
import pandas as pd
import numpy as np
class CBSmot:
nano_to_seconds = 1000000000
def count_neighbors(self, traj, position, max_dist):
neighbors = 0
yet = True
j = position + 1
while j < len(traj.index) and yet:
if d.Distances.calculate_two_point_distance(traj.iloc[position]['lat'],
traj.iloc[position]['lon'],
traj.iloc[j]['lat'],
traj.iloc[j]['lon']) < max_dist:
neighbors += 1
else:
yet = False
j += 1
return neighbors
def centroid(self, subtraj):
x = 0
y = 0
for index, row in subtraj.iterrows():
x += row['lat']
y += row['lon']
return [x/len(subtraj.index), y/len(subtraj.index)]
def clean_stops(self, stops, min_time):
stops_aux = stops.copy()
for stop in stops:
p1 = stop.index.values[0]
p2 = stop.index.values[-1]
if (p2 - p1).item() / CBSmot.nano_to_seconds < min_time:
stops_aux.remove(stop)
return stops_aux
def clean_stops_segment(self, stops, min_time, index):
stops_aux = stops.copy()
i = 0
curr_idx=0
for stop in stops:
p1 = stop.index.values[0]
p2 = stop.index.values[-1]
if (p2 - p1).item() / CBSmot.nano_to_seconds < min_time:
stops_aux.pop(i)
index.pop(i)
else:
i += 1
return index, stops_aux
def merge_stop(self, stops, max_dist, time_tolerance):
i = 0
while i < len(stops):
if (i+1) < len(stops):
s1 = stops[i]
s2 = stops[i+1]
p2 = s2.index.values[0]
p1 = s1.index.values[-1]
if (p2 - p1).item() / CBSmot.nano_to_seconds <= time_tolerance:
c1 = self.centroid(s1)
c2 = self.centroid(s2)
if d.Distances.calculate_two_point_distance(c1[0], c1[1], c2[0], c2[1]) <= max_dist:
stops.pop(i+1)
s1.append(s2, ignore_index=True)
stops[i] = s1
i -= 1
i += 1
return stops
def merge_stop_segment(self, stops, max_dist, time_tolerance, index):
i = 0
while i < len(stops):
if (i+1) < len(stops):
s1 = stops[i]
s2 = stops[i+1]
p2 = s2.index.values[0]
p1 = s1.index.values[-1]
if (p2 - p1).item() / CBSmot.nano_to_seconds <= time_tolerance:
c1 = self.centroid(s1)
c2 = self.centroid(s2)
if d.Distances.calculate_two_point_distance(c1[0], c1[1], c2[0], c2[1]) <= max_dist:
index_i = index[i]
index_i_1 = index[i+1]
stops.pop(i+1)
index.pop(i+1)
s1.append(s2, ignore_index=True)
stops[i] = s1
index[i] = [index_i[0], index_i_1[-1]]
i -= 1
i += 1
return index, stops
def find_stops(self, traj, max_dist, min_time, time_tolerance, merge_tolerance):
neighborhood = [0]*len(traj.index)
stops = []
traj.sort_index(inplace=True)
j = 0
while j < len(traj.index):
valor = self.count_neighbors(traj, j, max_dist)
neighborhood[j] = valor
j += valor
j += 1
for i in range(len(neighborhood)):
if neighborhood[i] > 0:
p1 = pd.to_datetime(traj.iloc[i].name)
p2 = pd.to_datetime(traj.iloc[i + neighborhood[i]-1].name)
diff = (p2 - p1).total_seconds()
if diff >= time_tolerance:
stops.append(traj.loc[p1:p2])
stops = self.merge_stop(stops, max_dist, merge_tolerance)
stops = self.clean_stops(stops, min_time)
return stops
def segment_stops_moves(self, traj, max_dist, min_time, time_tolerance, merge_tolerance):
neighborhood = [0]*len(traj.index)
stops = []
index = []
traj.sort_index(inplace=True)
j = 0
while j < len(traj.index):
valor = self.count_neighbors(traj, j, max_dist)
neighborhood[j] = valor
j += valor
j += 1
#print(neighborhood)
for i in range(len(neighborhood)):
if neighborhood[i] > 0:
p1 = pd.to_datetime(traj.iloc[i].name)
p2 = pd.to_datetime(traj.iloc[i + neighborhood[i]-1].name)
diff = (p2 - p1).total_seconds()
if diff >= time_tolerance:
stops.append(traj.loc[p1:p2])
index.append([p1, p2])
#print(len(index))
index, stops = self.merge_stop_segment(stops, max_dist, merge_tolerance, index)
#print(len(index))
index, stops = self.clean_stops_segment(stops, min_time, index)
#print(len(index))
return index, stops
@staticmethod
def get_quantile(traj,area):
if area>1 or area<0:
raise ValueError("Area must be >=0 and <=1")
distances = [1]
for i in range(len(traj)-1):
p1 = traj.iloc[i]
p2 = traj.iloc[i+1]
distances.append(d.Distances.calculate_two_point_distance(p1.lat,p1.lon,p2.lat,p2.lon))
return np.quantile(distances,area,overwrite_input=True)
| 35.932099
| 104
| 0.489263
| 711
| 5,821
| 3.869198
| 0.139241
| 0.013086
| 0.019629
| 0.034533
| 0.619775
| 0.576881
| 0.517993
| 0.504544
| 0.504544
| 0.504544
| 0
| 0.038813
| 0.398042
| 5,821
| 162
| 105
| 35.932099
| 0.74629
| 0.012025
| 0
| 0.597122
| 0
| 0
| 0.007307
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064748
| false
| 0
| 0.021583
| 0
| 0.165468
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06c9d2978cf880b3371f69c40666eeeea090512c
| 13,838
|
py
|
Python
|
Support/validate.py
|
sgarbesi/javascript-eslint.tmbundle
|
b117fe0133582676113a96fc9804795c033d0b78
|
[
"BSD-3-Clause"
] | 1
|
2015-05-01T14:24:39.000Z
|
2015-05-01T14:24:39.000Z
|
Support/validate.py
|
sgarbesi/javascript-eslint.tmbundle
|
b117fe0133582676113a96fc9804795c033d0b78
|
[
"BSD-3-Clause"
] | null | null | null |
Support/validate.py
|
sgarbesi/javascript-eslint.tmbundle
|
b117fe0133582676113a96fc9804795c033d0b78
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
Validate a JavaScript file using eslint.
Author: Nate Silva
Copyright 2014 Nate Silva
License: MIT
"""
from __future__ import print_function
import sys
import os
import re
import time
import json
import subprocess
import tempfile
import hashlib
import shutil
def find_up_the_tree(dir_name, filename, max_depth=30):
"""
Search for the named file in the dir_name or any of its parent
directories, up to the root directory.
"""
while True:
if max_depth <= 0:
return None
full_path = os.path.abspath(os.path.join(dir_name, filename))
if os.path.isfile(full_path):
return full_path
(drive, path) = os.path.splitdrive(dir_name)
is_root = (path == os.sep or path == os.altsep)
if is_root:
return None
max_depth -= 1
dir_name = os.path.abspath(os.path.join(dir_name, os.pardir))
def find_eslintrc(start_dir):
"""
Locates the most relevant .eslintrc file. Of the following
locations, the first to be found will be used:
1. An .eslintrc file in the start_dir or any of its parents.
2. If the file has not been saved yet, ~/.eslintrc will be
used.
start_dir is normally set to the directory of the file being
validated.
When start_dir is not provided (which happens with files that
are not saved yet), ~/.eslintrc is the only candidate that is
considered.
If no relevant .eslintrc is found, the return value is None.
"""
if start_dir:
# locate the nearest .eslintrc
eslintrc = find_up_the_tree(start_dir, '.eslintrc')
if eslintrc:
return eslintrc
# last ditch: look for .eslintrc in the user’s home directory
home_eslintrc = os.path.expanduser('~/.eslintrc')
if os.path.isfile(home_eslintrc):
return home_eslintrc
return None
def show_error_message(message):
context = {
'message': message,
'timestamp': time.strftime('%c')
}
my_dir = os.path.abspath(os.path.dirname(__file__))
error_ejs_path = os.path.join(my_dir, 'error.ejs')
error_ejs = open(error_ejs_path, 'r').read()
template_path = os.path.join(my_dir, 'template.html')
template = open(template_path, 'r').read()
template = template.replace('{{ TM_BUNDLE_SUPPORT }}',
os.environ['TM_BUNDLE_SUPPORT'])
template = template.replace('{{ EJS_TEMPLATE }}', json.dumps(error_ejs))
template = template.replace('{{ CONTEXT }}', json.dumps(context))
print(template)
def get_marker_directory():
"""
Create the directory that will hold "marker" files that we use
to detect which files have a validation window open. Used to
implement the following feature:
Normally, when you hit Cmd-S, the validation window appears
only if there is a warning or error.
Assume you had previously validated a file, and the validation
window showing its errors is still open. Now you fix the
errors and press Cmd-S. We want that validation window to
update to show no errors.
In order to do this, we have to somehow detect if TextMate has
a validation window open for the current file. It’s not easy.
We use marker files.
This script creates a marker file before returning the HTML
document that will be shown in the validation window.
When the HTML document detects that it is being hidden (closed),
it runs a TextMate.system command to delete its marker file.
"""
baseDir = os.path.join(tempfile.gettempdir(), 'javascript-eslint-tmbundle')
if not os.path.isdir(baseDir):
os.makedirs(baseDir)
today = time.strftime('%Y-%m-%d')
markerDir = os.path.join(baseDir, today)
if not os.path.isdir(markerDir):
os.makedirs(markerDir)
# Deletion should happen automatically, but to be clean(er),
# delete any previous-day marker dirs.
children = os.listdir(baseDir)
children = [_ for _ in children if _ != today]
children = [os.path.join(baseDir, _) for _ in children]
children = [_ for _ in children if os.path.isdir(_)]
[shutil.rmtree(_, True) for _ in children]
return markerDir
def validate(quiet=False):
# locate the .eshintrc to use
eslintrc = find_eslintrc(os.environ.get('TM_DIRECTORY', None))
# Copy stdin to a named temporary file: at this time eslint
# doesn’t support reading from stdin.
file_to_validate = tempfile.NamedTemporaryFile(suffix='.js')
if os.environ['TM_SCOPE'].startswith('source.js'):
shutil.copyfileobj(sys.stdin, file_to_validate)
else:
# If we are validating an HTML file with embedded
# JavaScript, only copy content within the
# <script>…</script> tags to the subprocess.
start_tag = re.compile('(\<\s*script)[\s\>]', re.IGNORECASE)
end_tag = re.compile('\<\/\s*script[\s\>]', re.IGNORECASE)
state = 'IGNORE'
for line in sys.stdin:
while line:
if state == 'IGNORE':
match = start_tag.search(line)
if match:
# found a script tag
line = ' ' * match.end(1) + line[match.end(1):]
state = 'LOOK_FOR_END_OF_OPENING_TAG'
else:
file_to_validate.write('\n')
line = None
elif state == 'LOOK_FOR_END_OF_OPENING_TAG':
gt_pos = line.find('>')
if gt_pos != -1:
line = ' ' * (gt_pos + 1) + line[gt_pos + 1:]
state = 'PIPE_TO_OUTPUT'
else:
file_to_validate.write('\n')
line = None
elif state == 'PIPE_TO_OUTPUT':
match = end_tag.search(line)
if match:
# found closing </script> tag
file_to_validate.write(line[:match.start()])
line = line[match.end():]
state = 'IGNORE'
else:
file_to_validate.write(line)
line = None
file_to_validate.flush()
# build eslint args
args = [
os.environ.get('TM_JAVASCRIPT_ESLINT_ESLINT', 'eslint'),
'-f',
'compact'
]
if eslintrc:
args.append('-c')
args.append(eslintrc)
args.append(file_to_validate.name)
# Build env for our command: ESLint (and Node) are often
# installed to /usr/local/bin, which may not be on the
# bundle’s PATH in a default install of TextMate.
env = os.environ.copy()
path_parts = env['PATH'].split(':')
if '/bin' not in path_parts:
path_parts.append('/bin')
if '/usr/bin' not in path_parts:
path_parts.append('/usr/bin')
if '/usr/local/bin' not in path_parts:
path_parts.append('/usr/local/bin')
env['PATH'] = ':'.join(path_parts)
try:
eslint = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
(child_stdout, child_stderr) = eslint.communicate()
if child_stderr:
msg = [
'Hi there. This is the “JavaScript ESLint” bundle for ' +
'TextMate. I validate your code using ESLint.',
'',
'I had the following problem running <code>eslint</code>:',
'',
'<code>%s</code>' % child_stderr,
'',
'<h4>How to disable validation</h4>',
'If you mistakenly installed this validation tool and want to ' +
'disable it, you can do so in TextMate:',
'',
'<ol>' +
'<li>On the TextMate menu, choose ' +
'<i>Bundles</i> > <i>Edit Bundles…</i></li>' +
'<li>Locate “JavaScript ESLint”</li>' +
'<li>Uncheck “Enable this item”</li>' +
'<li>Close the Bundle Editor and choose “Save”</li>' +
'</ol>'
]
show_error_message('<br>'.join(msg))
sys.exit()
except OSError as e:
msg = [
'Hi there. This is the “JavaScript ESLint” bundle for ' +
'TextMate. I validate your code using ESLint.',
'',
'I had the following problem running <code>eslint</code>:',
'',
'<code>%s</code>' % e,
'',
'<h4>How to fix it</h4>',
'Make sure the <code>eslint</code> and <code>node</code> ' +
'commands are on the <code>PATH</code>.',
'',
'<ol>' +
'<li>Go to <i>TextMate</i> > <i>Preferences…</i> > ' +
'<i>Variables</i></li>' +
'<li>Ensure the <code>PATH</code> is enabled there and that ' +
'it includes the location of your <code>eslint</code> ' +
'and <code>node</code> commands.</li>'
'</ol>',
'The path currently used by TextMate bundles is:',
'',
'<div style="overflow:auto"><code>%s</code></div>' % env['PATH'],
'<h4>How to disable validation</h4>',
'If you mistakenly installed this validation tool and want to ' +
'disable it, you can do so in TextMate:',
'',
'<ol>' +
'<li>On the TextMate menu, choose ' +
'<i>Bundles</i> > <i>Edit Bundles…</i></li>' +
'<li>Locate “JavaScript ESLint”</li>' +
'<li>Uncheck “Enable this item”</li>' +
'<li>Close the Bundle Editor and choose “Save”</li>' +
'</ol>'
]
show_error_message('<br>'.join(msg))
sys.exit()
# parse the results
rx = re.compile('^[^:]+\: line (?P<line>\d+), col (?P<character>\d+), ' +
'(?P<code>\w+) - (?P<reason>.+?)(\s\((?P<shortname>[\w\-]+)\))?$')
issues = []
for line in child_stdout.split('\n'):
line = line.strip()
if not line:
continue
m = rx.match(line)
if not m:
continue
issue = {
'line': int(m.group('line')),
'character': int(m.group('character')) + 1,
'code': m.group('code'),
'reason': m.group('reason')
}
if m.group('shortname'):
issue['shortname'] = m.group('shortname')
issues.append(issue)
# normalize line numbers
input_start_line = int(os.environ['TM_INPUT_START_LINE']) - 1
for issue in issues:
issue['line'] += input_start_line
# add URLs to the issues
if 'TM_FILEPATH' in os.environ:
url_maker = lambda x: \
'txmt://open?url=file://%s&line=%d&column=%d' % \
(os.environ['TM_FILEPATH'], x['line'], x['character'])
else:
url_maker = lambda x: \
'txmt://open?line=%d&column=%d' % (x['line'], x['character'])
for issue in issues:
issue['url'] = url_maker(issue)
# context data we will send to JavaScript
context = {
'eslintrc': eslintrc,
'issues': issues,
'timestamp': time.strftime('%c')
}
if 'TM_FILEPATH' in os.environ:
context['fileUrl'] = \
'txmt://open?url=file://%s' % os.environ['TM_FILEPATH']
context['targetFilename'] = os.path.basename(os.environ['TM_FILEPATH'])
else:
context['fileUrl'] = 'txmt://open?line=1&column=0'
context['targetFilename'] = '(current unsaved file)'
# Identify the marker file that we will use to indicate the
# TM_FILEPATH of the file currently shown in the validation
# window.
markerDir = get_marker_directory()
hash = hashlib.sha224(context['fileUrl']).hexdigest()
context['markerFile'] = os.path.join(markerDir, hash + '.marker')
context['errorCount'] = \
len([_ for _ in context['issues'] if _['code'][0] == 'E'])
context['warningCount'] = \
len([_ for _ in context['issues'] if _['code'][0] == 'W'])
if context['errorCount'] == 0 and context['warningCount'] == 0:
# There are no errors or warnings. We can bail out if all of
# the following are True:
#
# * There is no validation window currently open for
# this document.
# * quiet is True.
if not os.path.exists(context['markerFile']):
if quiet:
return
# create the marker file
markerFile = open(context['markerFile'], 'w+')
markerFile.close()
# read and prepare the template
my_dir = os.path.abspath(os.path.dirname(__file__))
content_ejs_path = os.path.join(my_dir, 'content.ejs')
content_ejs = open(content_ejs_path, 'r').read()
template_path = os.path.join(my_dir, 'template.html')
template = open(template_path, 'r').read()
template = template.replace('{{ TM_BUNDLE_SUPPORT }}',
os.environ['TM_BUNDLE_SUPPORT'])
template = template.replace('{{ EJS_TEMPLATE }}', json.dumps(content_ejs))
template = template.replace('{{ CONTEXT }}', json.dumps(context))
# print(template)
# @sgarbesi Tooltips for Textmate
if context['errorCount'] == 0 and context['warningCount'] == 0:
print('Lint Free!')
return
template = '%s Errors / %s Warnings' % (context['errorCount'], context['warningCount'])
template = '%s\r\n---' % (template)
for issue in context['issues']:
template = '%s\r\n%s: L%s: %s' % (template, issue['code'], issue['line'], issue['reason'])
print(template)
if __name__ == '__main__':
quiet = ('-q' in sys.argv or '--quiet' in sys.argv)
validate(quiet)
| 33.833741
| 98
| 0.568001
| 1,727
| 13,838
| 4.452808
| 0.223509
| 0.019506
| 0.013004
| 0.007802
| 0.319246
| 0.283355
| 0.264889
| 0.249415
| 0.20104
| 0.182835
| 0
| 0.003626
| 0.3025
| 13,838
| 408
| 99
| 33.916667
| 0.791857
| 0.200607
| 0
| 0.363281
| 0
| 0.011719
| 0.25637
| 0.032932
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019531
| false
| 0
| 0.039063
| 0
| 0.09375
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06d128fc6f207aa019def30c73ff71c2d5f4ad72
| 8,745
|
py
|
Python
|
imagenet_pytorch/utils.py
|
lishuliang/Emotion-Recognition
|
a8aea1b71b2508e6157410089b20ab463fe901f5
|
[
"MIT"
] | 1
|
2019-03-16T08:11:53.000Z
|
2019-03-16T08:11:53.000Z
|
imagenet_pytorch/utils.py
|
lishuliang/Emotion-Recognition
|
a8aea1b71b2508e6157410089b20ab463fe901f5
|
[
"MIT"
] | null | null | null |
imagenet_pytorch/utils.py
|
lishuliang/Emotion-Recognition
|
a8aea1b71b2508e6157410089b20ab463fe901f5
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.nn.functional as F
from torch.nn import init
class attention(nn.Module):
def __init__(self, input_channels, map_size):
super(attention, self).__init__()
self.pool = nn.AvgPool2d(kernel_size=map_size)
self.fc1 = nn.Linear(in_features=input_channels,
out_features=input_channels // 2)
self.fc2 = nn.Linear(in_features=input_channels //
2, out_features=input_channels)
def forward(self, x):
output = self.pool(x)
output = output.view(output.size()[0], output.size()[1])
output = self.fc1(output)
output = F.relu(output)
output = self.fc2(output)
output = F.sigmoid(output)
output = output.view(output.size()[0], output.size()[1], 1, 1)
output = torch.mul(x, output)
return output
class transition(nn.Module):
def __init__(self, if_att, current_size, input_channels, keep_prob):
super(transition, self).__init__()
self.input_channels = input_channels
self.keep_prob = keep_prob
self.bn = nn.BatchNorm2d(self.input_channels)
self.conv = nn.Conv2d(self.input_channels,
self.input_channels, kernel_size=1, bias=False)
# self.dropout = nn.Dropout2d(1 - self.keep_prob)
self.pool = nn.AvgPool2d(kernel_size=2)
self.if_att = if_att
if self.if_att == True:
self.attention = attention(
input_channels=self.input_channels, map_size=current_size)
def forward(self, x):
output = self.bn(x)
output = F.relu(output)
output = self.conv(output)
if self.if_att == True:
output = self.attention(output)
# output = self.dropout(output)
output = self.pool(output)
return output
class global_pool(nn.Module):
def __init__(self, input_size, input_channels):
super(global_pool, self).__init__()
self.input_size = input_size
self.input_channels = input_channels
self.bn = nn.BatchNorm2d(self.input_channels)
self.pool = nn.AvgPool2d(kernel_size=self.input_size)
def forward(self, x):
output = self.bn(x)
output = F.relu(output)
output = self.pool(output)
return output
class compress(nn.Module):
def __init__(self, input_channels, keep_prob):
super(compress, self).__init__()
self.keep_prob = keep_prob
self.bn = nn.BatchNorm2d(input_channels)
self.conv = nn.Conv2d(input_channels, input_channels //
2, kernel_size=1, padding=0, bias=False)
def forward(self, x):
output = self.bn(x)
output = F.relu(output)
output = self.conv(output)
# output = F.dropout2d(output, 1 - self.keep_prob)
return output
class clique_block(nn.Module):
def __init__(self, input_channels, channels_per_layer, layer_num, loop_num, keep_prob):
super(clique_block, self).__init__()
self.input_channels = input_channels
self.channels_per_layer = channels_per_layer
self.layer_num = layer_num
self.loop_num = loop_num
self.keep_prob = keep_prob
# conv 1 x 1
self.conv_param = nn.ModuleList([nn.Conv2d(self.channels_per_layer, self.channels_per_layer, kernel_size=1, padding=0, bias=False)
for i in range((self.layer_num + 1) ** 2)])
for i in range(1, self.layer_num + 1):
self.conv_param[i] = nn.Conv2d(
self.input_channels, self.channels_per_layer, kernel_size=1, padding=0, bias=False)
for i in range(1, self.layer_num + 1):
self.conv_param[i * (self.layer_num + 2)] = None
for i in range(0, self.layer_num + 1):
self.conv_param[i * (self.layer_num + 1)] = None
self.forward_bn = nn.ModuleList([nn.BatchNorm2d(
self.input_channels + i * self.channels_per_layer) for i in range(self.layer_num)])
self.forward_bn_b = nn.ModuleList(
[nn.BatchNorm2d(self.channels_per_layer) for i in range(self.layer_num)])
self.loop_bn = nn.ModuleList([nn.BatchNorm2d(
self.channels_per_layer * (self.layer_num - 1)) for i in range(self.layer_num)])
self.loop_bn_b = nn.ModuleList(
[nn.BatchNorm2d(self.channels_per_layer) for i in range(self.layer_num)])
# conv 3 x 3
self.conv_param_bottle = nn.ModuleList([nn.Conv2d(self.channels_per_layer, self.channels_per_layer, kernel_size=3, padding=1, bias=False)
for i in range(self.layer_num)])
def forward(self, x):
# key: 1, 2, 3, 4, 5, update every loop
self.blob_dict = {}
# save every loops results
self.blob_dict_list = []
# first forward
for layer_id in range(1, self.layer_num + 1):
bottom_blob = x
# bottom_param = self.param_dict['0_' + str(layer_id)]
bottom_param = self.conv_param[layer_id].weight
for layer_id_id in range(1, layer_id):
# pdb.set_trace()
bottom_blob = torch.cat(
(bottom_blob, self.blob_dict[str(layer_id_id)]), 1)
# bottom_param = torch.cat((bottom_param, self.param_dict[str(layer_id_id) + '_' + str(layer_id)]), 1)
bottom_param = torch.cat(
(bottom_param, self.conv_param[layer_id_id * (self.layer_num + 1) + layer_id].weight), 1)
next_layer = self.forward_bn[layer_id - 1](bottom_blob)
next_layer = F.relu(next_layer)
# conv 1 x 1
next_layer = F.conv2d(
next_layer, bottom_param, stride=1, padding=0)
# conv 3 x 3
next_layer = self.forward_bn_b[layer_id - 1](next_layer)
next_layer = F.relu(next_layer)
next_layer = F.conv2d(
next_layer, self.conv_param_bottle[layer_id - 1].weight, stride=1, padding=1)
# next_layer = F.dropout2d(next_layer, 1 - self.keep_prob)
self.blob_dict[str(layer_id)] = next_layer
self.blob_dict_list.append(self.blob_dict)
# loop
for loop_id in range(self.loop_num):
for layer_id in range(1, self.layer_num + 1):
layer_list = [l_id for l_id in range(1, self.layer_num + 1)]
layer_list.remove(layer_id)
bottom_blobs = self.blob_dict[str(layer_list[0])]
# bottom_param = self.param_dict[layer_list[0] + '_' + str(layer_id)]
bottom_param = self.conv_param[layer_list[0]
* (self.layer_num + 1) + layer_id].weight
for bottom_id in range(len(layer_list) - 1):
bottom_blobs = torch.cat(
(bottom_blobs, self.blob_dict[str(layer_list[bottom_id + 1])]), 1)
# bottom_param = torch.cat((bottom_param, self.param_dict[layer_list[bottom_id+1]+'_'+str(layer_id)]), 1)
bottom_param = torch.cat(
(bottom_param, self.conv_param[layer_list[bottom_id + 1] * (self.layer_num + 1) + layer_id].weight), 1)
bottom_blobs = self.loop_bn[layer_id - 1](bottom_blobs)
bottom_blobs = F.relu(bottom_blobs)
# conv 1 x 1
mid_blobs = F.conv2d(
bottom_blobs, bottom_param, stride=1, padding=0)
# conv 3 x 3
top_blob = self.loop_bn_b[layer_id - 1](mid_blobs)
top_blob = F.relu(top_blob)
top_blob = F.conv2d(
top_blob, self.conv_param_bottle[layer_id - 1].weight, stride=1, padding=1)
self.blob_dict[str(layer_id)] = top_blob
self.blob_dict_list.append(self.blob_dict)
assert len(self.blob_dict_list) == 1 + self.loop_num
# output
block_feature_I = self.blob_dict_list[0]['1']
for layer_id in range(2, self.layer_num + 1):
block_feature_I = torch.cat(
(block_feature_I, self.blob_dict_list[0][str(layer_id)]), 1)
block_feature_I = torch.cat((x, block_feature_I), 1)
block_feature_II = self.blob_dict_list[self.loop_num]['1']
for layer_id in range(2, self.layer_num + 1):
block_feature_II = torch.cat(
(block_feature_II, self.blob_dict_list[self.loop_num][str(layer_id)]), 1)
return block_feature_I, block_feature_II
| 43.507463
| 145
| 0.5992
| 1,199
| 8,745
| 4.089241
| 0.090909
| 0.039976
| 0.051397
| 0.03712
| 0.700184
| 0.616153
| 0.489904
| 0.448297
| 0.344687
| 0.303896
| 0
| 0.020392
| 0.293425
| 8,745
| 200
| 146
| 43.725
| 0.773102
| 0.077759
| 0
| 0.302632
| 0
| 0
| 0.000249
| 0
| 0
| 0
| 0
| 0
| 0.006579
| 1
| 0.065789
| false
| 0
| 0.052632
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06d1d332e24aee96ce48f604359996ef77a12eea
| 1,349
|
py
|
Python
|
setup.py
|
jopo666/HistoPrep
|
1b74c346b38c7ca44f92269246571f5f850836af
|
[
"MIT"
] | 11
|
2021-04-21T10:37:22.000Z
|
2021-12-19T22:32:59.000Z
|
setup.py
|
jopo666/HistoPrep
|
1b74c346b38c7ca44f92269246571f5f850836af
|
[
"MIT"
] | 1
|
2021-02-24T09:15:13.000Z
|
2021-04-19T06:38:58.000Z
|
setup.py
|
jopo666/HistoPrep
|
1b74c346b38c7ca44f92269246571f5f850836af
|
[
"MIT"
] | 1
|
2021-09-16T05:00:21.000Z
|
2021-09-16T05:00:21.000Z
|
import setuptools
exec(open('histoprep/_version.py').read())
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="histoprep",
version=__version__,
author="jopo666",
scripts=['HistoPrep'],
author_email="jopo@birdlover.com",
description="Preprocessing module for large histological images.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jopo666/HistoPrep",
packages=setuptools.find_packages(include=['histoprep','histoprep.*']),
install_requires=[
'opencv-python==4.5.1.48',
'openslide-python==1.1.2',
'pandas==1.2.1',
'Pillow==8.0.0',
'seaborn==0.11.0',
'numpy==1.19.2',
'tqdm==4.60.0',
'aicspylibczi==2.8.0',
'shapely==1.7.1',
'scikit-learn==0.24.1',
'ipywidgets==7.6.3',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
keywords='image-analysis preprocessing histology openslide',
python_requires='>=3.8',
)
| 32.119048
| 75
| 0.614529
| 154
| 1,349
| 5.285714
| 0.642857
| 0.07371
| 0.046683
| 0.07371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047125
| 0.213491
| 1,349
| 42
| 76
| 32.119048
| 0.720075
| 0
| 0
| 0.051282
| 0
| 0
| 0.488148
| 0.065926
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025641
| 0
| 0.025641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06d28dfe07994e25ac5013d571490aa1301605ee
| 15,260
|
py
|
Python
|
train.py
|
Kiwi-PUJ/DataTraining
|
706642996e884b47a0aa7dfb19da33a7234a311e
|
[
"CC0-1.0"
] | 3
|
2021-06-04T00:07:54.000Z
|
2021-06-09T01:14:07.000Z
|
train.py
|
Kiwi-PUJ/DataTraining
|
706642996e884b47a0aa7dfb19da33a7234a311e
|
[
"CC0-1.0"
] | null | null | null |
train.py
|
Kiwi-PUJ/DataTraining
|
706642996e884b47a0aa7dfb19da33a7234a311e
|
[
"CC0-1.0"
] | null | null | null |
## @package Training_app
# Training code developed with Tensorflow Keras. Content: Unet, Unet++ and FCN
#
# @version 1
#
# Pontificia Universidad Javeriana
#
# Electronic Enginnering
#
# Developed by:
# - Andrea Juliana Ruiz Gomez
# Mail: <andrea_ruiz@javeriana.edu.co>
# GitHub: andrearuizg
# - Pedro Eli Ruiz Zarate
# Mail: <pedro.ruiz@javeriana.edu.co>
# GitHub: PedroRuizCode
#
# With support of:
# - Francisco Carlos Calderon Bocanegra
# Mail: <calderonf@javeriana.edu.co>
# GitHub: calderonf
# - John Alberto Betancout Gonzalez
# Mail: <john@kiwibot.com>
# GitHub: JohnBetaCode
import os
from time import time
import numpy as np
import cv2
from glob import glob
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.metrics import Recall, Precision
from tensorflow.keras.callbacks import (EarlyStopping, ModelCheckpoint,
ReduceLROnPlateau, CSVLogger, TensorBoard)
## Load data
# Load the data
# @param path Path of the image
def load_data(path):
images_train = sorted(glob(os.path.join(path, "images/train/*")))
masks_train = sorted(glob(os.path.join(path, "masks/train/*")))
images_valid = sorted(glob(os.path.join(path, "images/valid/*")))
masks_valid = sorted(glob(os.path.join(path, "masks/valid/*")))
train_x, valid_x = images_train, images_valid
train_y, valid_y = masks_train, masks_valid
return (train_x, train_y), (valid_x, valid_y)
## Read image
# Read the images
# @param path Path of the image
def read_image(path):
path = path.decode()
x = cv2.imread(path, cv2.IMREAD_COLOR)
x = cv2.resize(x, (256, 256))
x = x / 255.0
return x
## Read mask
# Read the mask of the images
# @param path Path of the mask
def read_mask(path):
path = path.decode()
x = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
x = cv2.resize(x, (256, 256))
x = x / 1.0
x = np.expand_dims(x, axis=-1)
return x
## Parse
# Read images and masks and convert to TensorFlow dataformat
# @param x Images
# @param y Masks
def tf_parse(x, y):
def _parse(x, y):
x = read_image(x)
y = read_mask(y)
return x, y
x, y = tf.numpy_function(_parse, [x, y], [tf.float64, tf.float64])
x.set_shape([256, 256, 3])
y.set_shape([256, 256, 1])
return x, y
## Dataset
# Read images and masks and convert to TensorFlow format
# @param x Images
# @param y Masks
# @param batch Batch size
def tf_dataset(x, y, batch):
dataset = tf.data.Dataset.from_tensor_slices((x, y))
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.OFF)
dataset = dataset.with_options(options)
dataset = dataset.map(tf_parse)
dataset = dataset.batch(batch)
dataset = dataset.repeat()
return dataset
## Down sample function
# Make the down sample of the layer
# @param x Input
# @param filters The dimensionality of the output space
# @param kernel_size Height and width of the 2D convolution window
# @param padding Padding
# @param strides Strides of the convolution along the height and width
def down_block(x, filters, kernel_size=(3, 3), padding="same", strides=1):
c = Conv2D(filters, kernel_size, padding=padding, strides=strides,
activation="relu")(x)
c = BatchNormalization()(c)
c = Conv2D(filters, kernel_size, padding=padding, strides=strides,
activation="relu")(c)
c = BatchNormalization()(c)
p = MaxPool2D((2, 2), (2, 2))(c)
return c, p
## Up sample function
# Make the up sample of the layer
# @param x Input
# @param skip The skip connection is made to avoid the loss of accuracy
# in the downsampling layers. In case the image becomes so small that
# it has no information, the weights are calculated with the skip layer.
# @param filters The dimensionality of the output space
# @param kernel_size Height and width of the 2D convolution window
# @param padding Padding
# @param strides Strides of the convolution along the height and width
def up_block(x, skip, filters, kernel_size=(3, 3), padding="same", strides=1):
us = UpSampling2D((2, 2))(x)
concat = Concatenate()([us, skip])
c = Conv2D(filters, kernel_size, padding=padding, strides=strides,
activation="relu")(concat)
c = BatchNormalization()(c)
c = Conv2D(filters, kernel_size, padding=padding, strides=strides,
activation="relu")(c)
c = BatchNormalization()(c)
return c
## Bottleneck function
# Added to reduce the number of feature maps in the network
# @param x Input
# @param filters The dimensionality of the output space
# @param kernel_size Height and width of the 2D convolution window
# @param padding Padding
# @param strides Strides of the convolution along the height and width
def bottleneck(x, filters, kernel_size=(3, 3), padding="same", strides=1):
c = Conv2D(filters, kernel_size, padding=padding, strides=strides,
activation="relu")(x)
c = BatchNormalization()(c)
c = Conv2D(filters, kernel_size, padding=padding, strides=strides,
activation="relu")(c)
c = BatchNormalization()(c)
return c
## Unet 1
# Unet implementation
# @param f Filters dimensionality
def UNet_1(f):
inputs = Input((256, 256, 3))
p0 = inputs
c1, p1 = down_block(p0, f[0]) # 256 -> 128
c2, p2 = down_block(p1, f[1]) # 128 -> 64
c3, p3 = down_block(p2, f[2]) # 64 -> 32
c4, p4 = down_block(p3, f[3]) # 32 -> 16
bn = bottleneck(p4, f[4])
u3 = up_block(bn, c4, f[3]) # 16 -> 32
u4 = up_block(u3, c3, f[2]) # 32 -> 64
u5 = up_block(u4, c2, f[1]) # 64 -> 128
u6 = up_block(u5, c1, f[0]) # 128 -> 256
# Classifying layer
outputs = Dropout(0.1)(u6)
outputs = Conv2D(1, (1, 1), padding="same", activation="sigmoid")(outputs)
model = Model(inputs, outputs)
return model
## Unet 2
# Unet implementation
# @param f Filters dimensionality
def UNet_2(f):
inputs = Input((256, 256, 3))
p0 = inputs
c1, p1 = down_block(p0, f[0]) # 256 -> 128
c2, p2 = down_block(p1, f[1]) # 128 -> 64
c3, p3 = down_block(p2, f[2]) # 64 -> 32
c4, p4 = down_block(p3, f[3]) # 32 -> 16
c5, p5 = down_block(p4, f[4]) # 16 -> 8
c6, p6 = down_block(p5, f[5]) # 8 -> 4
bn = bottleneck(p6, f[6])
u1 = up_block(bn, c6, f[5]) # 4 -> 8
u2 = up_block(u1, c5, f[4]) # 8 -> 16
u3 = up_block(u2, c4, f[3]) # 16 -> 32
u4 = up_block(u3, c3, f[2]) # 32 -> 64
u5 = up_block(u4, c2, f[1]) # 64 -> 128
u6 = up_block(u5, c1, f[0]) # 128 -> 256
# Classifying layer
outputs = Dropout(0.1)(u6)
outputs = Conv2D(1, (1, 1), padding="same", activation="sigmoid")(outputs)
model = Model(inputs, outputs)
return model
## Unet++ 1
# Unet++ implementation
# @param f Filters dimensionality
def UNetpp_1(f):
inputs = Input((256, 256, 3))
p0 = inputs
c1, p1 = down_block(p0, f[0]) # 256 -> 128
c2, p2 = down_block(p1, f[1]) # 128 -> 64
c3, p3 = down_block(p2, f[2]) # 64 -> 32
c4, p4 = down_block(p3, f[3]) # 32 -> 16
u11 = up_block(c2, c1, f[0]) # 128 -> 256
u21 = up_block(c3, c2, f[1]) # 64 -> 128
u31 = up_block(c4, c3, f[2]) # 32 -> 64
u21_1 = Concatenate()([c2, u21])
u22 = up_block(u31, u21_1, f[1]) # 128 -> 256
u11_1 = Concatenate()([c1, u11])
u12 = up_block(u21, u11_1, f[0]) # 64 -> 128
u12_1 = Concatenate()([u11_1, u12])
u13 = up_block(u22, u12_1, f[0]) # 128 -> 256
bn = bottleneck(p4, f[4])
u3 = up_block(bn, c4, f[3]) # 16 -> 32
u31_1 = Concatenate()([c3, u31])
u4 = up_block(u3, u31_1, f[2]) # 32 -> 64
u22_1 = Concatenate()([u21_1, u22])
u5 = up_block(u4, u22_1, f[1]) # 64 -> 128
u13_1 = Concatenate()([u12_1, u13])
u6 = up_block(u5, u13_1, f[0]) # 128 -> 256
# Classifying layer
outputs = Dropout(0.1)(u6)
outputs = Conv2D(1, (1, 1), padding="same", activation="sigmoid")(outputs)
model = Model(inputs, outputs)
return model
## Unet++ 2
# Unet++ implementation
# @param f Filters dimensionality
def UNetpp_2(f):
inputs = Input((256, 256, 3))
p0 = inputs
c1, p1 = down_block(p0, f[0]) # 256 -> 128
c2, p2 = down_block(p1, f[1]) # 128 -> 64
c3, p3 = down_block(p2, f[2]) # 64 -> 32
c4, p4 = down_block(p3, f[3]) # 32 -> 16
c5, p5 = down_block(p4, f[4]) # 16 -> 8
c6, p6 = down_block(p5, f[5]) # 8 -> 4
u11 = up_block(c2, c1, f[0]) # 128 -> 256
u21 = up_block(c3, c2, f[1]) # 64 -> 128
u31 = up_block(c4, c3, f[2]) # 32 -> 64
u41 = up_block(c5, c4, f[3]) # 16 -> 32
u51 = up_block(c6, c5, f[4]) # 8 -> 16
u11_1 = Concatenate()([c1, u11])
u12 = up_block(u21, u11_1, f[0]) # 128 -> 256
u21_1 = Concatenate()([c2, u21])
u22 = up_block(u31, u21_1, f[1]) # 64 -> 128
u31_1 = Concatenate()([c3, u31])
u32 = up_block(u41, u31_1, f[2]) # 32 -> 64
u41_1 = Concatenate()([c4, u41])
u42 = up_block(u51, u41_1, f[3]) # 16 -> 32
u12_1 = Concatenate()([u11_1, u12])
u13 = up_block(u22, u12_1, f[0]) # 128 -> 256
u22_1 = Concatenate()([u21_1, u22])
u23 = up_block(u32, u22_1, f[1]) # 64 -> 128
u32_1 = Concatenate()([u31_1, u32])
u33 = up_block(u42, u32_1, f[2]) # 32 -> 64
u13_1 = Concatenate()([u12_1, u13])
u14 = up_block(u23, u13_1, f[0]) # 128 -> 256
u23_1 = Concatenate()([u22_1, u23])
u24 = up_block(u33, u23_1, f[1]) # 64 -> 128
u14_1 = Concatenate()([u13_1, u14])
u15 = up_block(u24, u14_1, f[0]) # 128 -> 256
bn = bottleneck(p6, f[6])
u1 = up_block(bn, c6, f[5]) # 4 -> 8
u51_1 = Concatenate()([c5, u51])
u2 = up_block(u1, u51_1, f[4]) # 8 -> 16
u42_1 = Concatenate()([u41_1, u42])
u3 = up_block(u2, u42_1, f[3]) # 16 -> 32
u33_1 = Concatenate()([u32_1, u33])
u4 = up_block(u3, u33_1, f[2]) # 32 -> 64
u24_1 = Concatenate()([u23_1, u24])
u5 = up_block(u4, u24_1, f[1]) # 64 -> 128
u15_1 = Concatenate()([u14_1, u15])
u6 = up_block(u5, u15_1, f[0]) # 128 -> 256
# Classifying layer
outputs = Dropout(0.1)(u6)
outputs = Conv2D(1, (1, 1), padding="same", activation="sigmoid")(outputs)
model = Model(inputs, outputs)
return model
## FCN 1
# Fully Convolutional Network implementation
# @param f Filters dimensionality
def FCN_1(f):
inputs = Input((256, 256, 3))
p0 = inputs
c1, p1 = down_block(p0, f[0]) # 256 -> 128
c2, p2 = down_block(p1, f[1]) # 128 -> 64
c3, p3 = down_block(p2, f[2]) # 64 -> 32
c4, p4 = down_block(p3, f[3]) # 32 -> 16
bn = bottleneck(p4, f[4])
pr1 = Conv2D(1, (4, 4), activation='relu', padding='same', strides=1)(bn)
pr2 = Conv2D(1, (8, 8), activation='relu', padding='same', strides=1)(p3)
pr3 = Conv2D(1, (16, 16), activation='relu', padding='same', strides=1)(p2)
us1 = UpSampling2D((2, 2))(pr1)
add1 = Add()([us1, pr2])
us2 = UpSampling2D((2, 2))(add1)
add2 = Add()([us2, pr3])
us3 = UpSampling2D((4, 4))(add2)
# Classifying layer
outputs = Dropout(0.1)(us3)
outputs = Conv2D(1, (32, 32), activation='sigmoid', padding='same')(outputs)
model = Model(inputs, outputs)
return model
## FCN 2
# Fully Convolutional Network implementation
# @param f Filters dimensionality
def FCN_2(f):
inputs = Input((256, 256, 3))
p0 = inputs
c1, p1 = down_block(p0, f[0]) # 256 -> 128
c2, p2 = down_block(p1, f[1]) # 128 -> 64
c3, p3 = down_block(p2, f[2]) # 64 -> 32
c4, p4 = down_block(p3, f[3]) # 32 -> 16
c5, p5 = down_block(p4, f[4]) # 16 -> 8
c6, p6 = down_block(p5, f[5]) # 8 -> 4
bn = bottleneck(p6, f[6])
pr1 = Conv2D(1, (1, 1), activation='relu', padding='same', strides=1)(bn)
pr2 = Conv2D(1, (2, 2), activation='relu', padding='same', strides=1)(p5)
pr3 = Conv2D(1, (4, 4), activation='relu', padding='same', strides=1)(p4)
pr4 = Conv2D(1, (8, 8), activation='relu', padding='same', strides=1)(p3)
pr5 = Conv2D(1, (16, 16), activation='relu', padding='same', strides=1)(p2)
us1 = UpSampling2D((2, 2))(pr1)
add1 = Add()([us1, pr2])
us2 = UpSampling2D((2, 2))(add1)
add2 = Add()([us2, pr3])
us3 = UpSampling2D((2, 2))(add2)
add3 = Add()([us3, pr4])
us4 = UpSampling2D((2, 2))(add3)
add4 = Add()([us4, pr5])
us5 = UpSampling2D((4, 4))(add4)
# Classifying layer
outputs = Dropout(0.1)(us5)
outputs = Conv2D(1, (32, 32), activation='sigmoid', padding='same')(outputs)
model = Model(inputs, outputs)
return model
## Training
# CNN training
def training(m_name):
## Dataset
path = "media/"
(train_x, train_y), (valid_x, valid_y) = load_data(path)
## Hyperparameters
batch = 15
epochs = 190
train_dataset = tf_dataset(train_x, train_y, batch=batch)
valid_dataset = tf_dataset(valid_x, valid_y, batch=batch)
## Time
t0 = time()
## Filters
f = [16, 32, 64, 128, 256, 512, 1024]
if m_name == "unetv1":
model = UNet_1(f)
elif m_name == "unetv2":
model = UNet_2(f)
elif m_name == "unetppv1":
model = UNetpp_1(f)
elif m_name == "unetppv2":
model = UNetpp_2(f)
elif m_name == "fcnv1":
model = FCN_1(f)
else:
model = FCN_2(f)
m_sum = 'files/model_summary_%s_BN.txt' % m_name
m_log = 'logs/%s_BN/scalars/' % m_name
m_h5 = 'files/model_%s_BN.h5' % m_name
m_data = 'files/data_%s_BN.csv' % m_name
m_time = 'files/time_%s_BN.txt' % m_name
model.compile(optimizer="adam", loss="binary_crossentropy",
metrics=["acc", Precision(), Recall()])
with open(m_sum, 'w') as fh:
model.summary(print_fn=lambda x: fh.write(x + '\n'))
train_steps = len(train_x) // batch
valid_steps = len(valid_x) // batch
if len(train_x) % batch != 0:
train_steps += 1
if len(valid_x) % batch != 0:
valid_steps += 1
logdir = m_log
tensorboard_callback = TensorBoard(log_dir=logdir)
callbacks = [
ModelCheckpoint(m_h5),
ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10),
CSVLogger(m_data),
tensorboard_callback,
EarlyStopping(monitor='val_loss', patience=33,
restore_best_weights=False)
]
model.fit(train_dataset, validation_data=valid_dataset,
steps_per_epoch=train_steps,
validation_steps=valid_steps, epochs=epochs,
callbacks=callbacks)
time_tr = open(m_time, 'w')
time_tr.write(str(time() - t0))
if __name__ == "__main__":
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
model_l = ["unetv1", "unetv2", "unetppv1", "unetppv2", "fcnv1", "fcnv2"]
for model in model_l:
with strategy.scope():
print("\n\n\n\n\n Training", model, "model\n\n\n\n\n")
training(model)
| 29.921569
| 80
| 0.602687
| 2,302
| 15,260
| 3.874023
| 0.143354
| 0.032967
| 0.022202
| 0.023436
| 0.59464
| 0.568065
| 0.53577
| 0.505382
| 0.478471
| 0.462772
| 0
| 0.102997
| 0.243512
| 15,260
| 509
| 81
| 29.980354
| 0.669525
| 0.216252
| 0
| 0.461794
| 0
| 0
| 0.043663
| 0.002463
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053156
| false
| 0
| 0.036545
| 0
| 0.139535
| 0.009967
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06d355973fd78ec8f3b614057e835f98f36682ef
| 379
|
py
|
Python
|
qt__pyqt__pyside__pyqode/qt_ini.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
qt__pyqt__pyside__pyqode/qt_ini.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
qt__pyqt__pyside__pyqode/qt_ini.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
try:
from PyQt4.QtCore import QSettings
except:
from PyQt5.QtCore import QSettings
if __name__ == '__main__':
config = QSettings('config.ini', QSettings.IniFormat)
counter = int(config.value('counter', 0))
config.setValue('counter', counter + 1)
config.setValue('key2', 'abc')
| 18.95
| 57
| 0.664908
| 45
| 379
| 5.333333
| 0.688889
| 0.1
| 0.175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022581
| 0.182058
| 379
| 19
| 58
| 19.947368
| 0.751613
| 0.113456
| 0
| 0
| 0
| 0
| 0.140719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06dacdda970f273fddcf69cadb01b1f2dd499e8c
| 296
|
py
|
Python
|
sim_test_model.py
|
feiyanke/simpy
|
bde9d09e47596e0bfe66dc7001f556bafd03acc5
|
[
"MIT"
] | 1
|
2019-01-28T09:13:58.000Z
|
2019-01-28T09:13:58.000Z
|
sim_test_model.py
|
feiyanke/simpy
|
bde9d09e47596e0bfe66dc7001f556bafd03acc5
|
[
"MIT"
] | null | null | null |
sim_test_model.py
|
feiyanke/simpy
|
bde9d09e47596e0bfe66dc7001f556bafd03acc5
|
[
"MIT"
] | 2
|
2019-01-28T09:13:59.000Z
|
2020-12-13T09:48:20.000Z
|
import math
import matplotlib.pyplot as plt
from simpy import model
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
model_sin = model.TimedFunctionModel(math.sin)
model_cos = model.TimedFunctionModel(math.cos)
scope = model.ScopeModel(ax1, ax2)
def run():
scope(model_sin(), model_cos())
| 17.411765
| 46
| 0.75
| 43
| 296
| 5.069767
| 0.488372
| 0.110092
| 0.119266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039063
| 0.135135
| 296
| 16
| 47
| 18.5
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06db8ba3ca98cc15e56e2db049c572bc5f7c97a3
| 2,760
|
py
|
Python
|
Day_10_classes_and_objects/day10_uzd1.py
|
ValRCS/Python_TietoEvry_Sep2021
|
e11dac38deb17ba695ce8ad9dab9cf78b4adb99d
|
[
"MIT"
] | null | null | null |
Day_10_classes_and_objects/day10_uzd1.py
|
ValRCS/Python_TietoEvry_Sep2021
|
e11dac38deb17ba695ce8ad9dab9cf78b4adb99d
|
[
"MIT"
] | null | null | null |
Day_10_classes_and_objects/day10_uzd1.py
|
ValRCS/Python_TietoEvry_Sep2021
|
e11dac38deb17ba695ce8ad9dab9cf78b4adb99d
|
[
"MIT"
] | null | null | null |
# class Song: # Song is name of Class, start with Capital letter
# def __init__(self, title="", author="", lyrics=tuple()): # constructor method called upon creation of object
# self.title = title
# self.author = author
# self.lyrics = lyrics
# # print(f"New Song made by Author: {self.author=} Title: {self.title=}")
#
# def sing(self): # method definition which is function associated with objects
# print(f"New Song Title: {self.title}")
# print(f"Lyrics made by Author: {self.author}")
# for line in self.lyrics:
# print(line)
# return self
#
# def yell(self):
# for line in self.lyrics:
# print(line.upper())
# return self
# can be put inside the class or it's better to make _print_lyrics static inside the class?
class Song:
def __init__(self, title, author, lyrics):
self.title = title
self.author = author
self.lyrics = lyrics
if title == '':
title = 'Unknown'
if author == '':
author = 'Unknown'
print(f"\n\nNew song made:\nTitle: {title} \nAuthor: {author}")
@classmethod # this means that this method is a class method can be called without any objects
def print_lines(cls, lyrics, line_count=-1):
all_lines_count = len(lyrics)
if line_count == -1:
line_count = len(lyrics)
elif line_count <= 0:
print("no lines to print")
elif all_lines_count < line_count:
print(f"only {all_lines_count} lines can be printed:\n")
for i in lyrics[:line_count]:
print(i)
def sing(self, lines_present=-1):
x = '_' * (len(self.author + self.title) + 3)
print(x, '\nSinging:')
self.print_lines(self.lyrics, lines_present)
return self
def yell(self, lines_present=-1):
x = '_' * (len(self.author + self.title) + 3)
lines_upper = [line.upper() for line in self.lyrics]
print(x, '\nYELLING:')
self.print_lines(lines_upper, lines_present)
return self
class Rap(Song):
def break_it(self, lines_present=-1, drop="yeah"):
x = '_' * (len(self.author + self.title) + 3)
lyrics = [line.replace(' ', f' {drop.upper()} ') + ' ' + drop.upper() for line in self.lyrics]
print(x, '\nRapping:')
self.print_lines(lyrics, lines_present)
return self
ziemelmeita = Song('Ziemeļmeita', 'Jumprava', ['Gāju meklēt ziemeļmeitu', 'Garu, tālu ceļu veicu'])
ziemelmeita.sing(1).yell(10).sing().sing(-3)
zrap = Rap("Ziemeļmeita", "Jumprava", ["Gāju meklēt ziemeļmeitu", "Garu, tālu ceļu veicu"])
zrap.break_it(1, "yah").yell(1)
ziemelmeita.sing().yell().sing(1)
| 34.5
| 115
| 0.595652
| 362
| 2,760
| 4.433702
| 0.276243
| 0.050467
| 0.02243
| 0.032399
| 0.381308
| 0.301558
| 0.266667
| 0.216822
| 0.179439
| 0.05109
| 0
| 0.007937
| 0.269565
| 2,760
| 79
| 116
| 34.936709
| 0.788194
| 0.318116
| 0
| 0.136364
| 0
| 0
| 0.169181
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0
| 0
| 0.227273
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06dc1c17dd56d7e1a1011a34a1e1d9b273c1982c
| 1,956
|
py
|
Python
|
rurina2/widgets/text.py
|
TeaCondemns/rurina
|
43725ebea5872953125271a9abb300a4e3a80a64
|
[
"MIT"
] | null | null | null |
rurina2/widgets/text.py
|
TeaCondemns/rurina
|
43725ebea5872953125271a9abb300a4e3a80a64
|
[
"MIT"
] | null | null | null |
rurina2/widgets/text.py
|
TeaCondemns/rurina
|
43725ebea5872953125271a9abb300a4e3a80a64
|
[
"MIT"
] | null | null | null |
from constants import STYLE_NORMAL, STYLE_BOLD, STYLE_ITALIC
from prefabs.text import write_autoline
from widgets.widget import WidgetByRect
from base_node import get_surface
from prefabs.surface import blit
from shape import Rect
import pygame
class Text(WidgetByRect):
def __init__(
self,
font: pygame.font.Font,
value: str = '',
text_color=pygame.Color('white'),
linespacing: int = 0,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.font = font
self.value = value
self.text_color = text_color
self.linespacing = linespacing
self.sprite.region_enabled = True
@property
def can_be_drawn(self):
return self.visible and self.alpha > 0 and self.scale != 0 and len(self.value) > 0
@property
def style(self) -> int:
__style = STYLE_NORMAL
if self.font.get_bold():
__style |= STYLE_BOLD
if self.font.get_italic():
__style |= STYLE_ITALIC
return __style
@style.setter
def style(self, value):
self.font.set_bold(value & STYLE_BOLD)
self.font.set_italic(value & STYLE_ITALIC)
def draw(self, surface: pygame.Surface = ...) -> None:
if self.can_be_drawn:
surface = get_surface(surface)
self.sprite.draw(surface)
blit(
surface,
write_autoline(
self.value,
self.font,
Rect(0, 0, *self.rect.size),
self.text_color,
self.gravity,
pygame.Surface(self.rect.size, pygame.SRCALPHA, 32),
self.linespacing
),
self.rect.rpos,
self.ralpha,
self.rscale
)
super().draw(surface)
__all__ = [
'Text'
]
| 25.402597
| 90
| 0.541922
| 211
| 1,956
| 4.805687
| 0.303318
| 0.055227
| 0.025641
| 0.025641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006478
| 0.368609
| 1,956
| 76
| 91
| 25.736842
| 0.814575
| 0
| 0
| 0.032787
| 0
| 0
| 0.004601
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.114754
| 0.016393
| 0.245902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06dcdfc975ea640979bb4f316bbb031845b68fa5
| 5,350
|
py
|
Python
|
Chapter05/B12322_05_code upload/topic_categorization.py
|
PacktPublishing/Python-Machine-Learning-By-Example-Second-Edition
|
830ad0124dc72c3a24929ff1b67081a66894f1f9
|
[
"MIT"
] | 31
|
2019-05-25T11:28:23.000Z
|
2022-02-09T15:19:20.000Z
|
Chapter05/B12322_05_code upload/topic_categorization.py
|
PacktPublishing/Python-Machine-Learning-By-Example-Second-Edition
|
830ad0124dc72c3a24929ff1b67081a66894f1f9
|
[
"MIT"
] | null | null | null |
Chapter05/B12322_05_code upload/topic_categorization.py
|
PacktPublishing/Python-Machine-Learning-By-Example-Second-Edition
|
830ad0124dc72c3a24929ff1b67081a66894f1f9
|
[
"MIT"
] | 22
|
2019-02-27T20:11:39.000Z
|
2022-03-07T21:46:38.000Z
|
'''
Source codes for Python Machine Learning By Example 2nd Edition (Packt Publishing)
Chapter 5: Classifying Newsgroup Topic with Support Vector Machine
Author: Yuxi (Hayden) Liu
'''
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.datasets import fetch_20newsgroups
from nltk.corpus import names
from nltk.stem import WordNetLemmatizer
all_names = set(names.words())
lemmatizer = WordNetLemmatizer()
def is_letter_only(word):
return word.isalpha()
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
def clean_text(docs):
docs_cleaned = []
for doc in docs:
doc = doc.lower()
doc_cleaned = ' '.join(lemmatizer.lemmatize(word) for word in doc.split()
if is_letter_only(word) and word not in all_names and word not in stop_words)
docs_cleaned.append(doc_cleaned)
return docs_cleaned
# Binary classification
categories = ['comp.graphics', 'sci.space']
data_train = fetch_20newsgroups(subset='train', categories=categories, random_state=42)
data_test = fetch_20newsgroups(subset='test', categories=categories, random_state=42)
cleaned_train = clean_text(data_train.data)
label_train = data_train.target
cleaned_test = clean_text(data_test.data)
label_test = data_test.target
from collections import Counter
Counter(label_train)
tfidf_vectorizer = TfidfVectorizer(stop_words='english', max_features=None)
term_docs_train = tfidf_vectorizer.fit_transform(cleaned_train)
term_docs_test = tfidf_vectorizer.transform(cleaned_test)
from sklearn.svm import SVC
svm = SVC(kernel='linear', C=1.0, random_state=42)
svm.fit(term_docs_train, label_train)
accuracy = svm.score(term_docs_test, label_test)
print('The accuracy of binary classification is: {0:.1f}%'.format(accuracy*100))
# Multiclass classification
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
'rec.sport.hockey'
]
data_train = fetch_20newsgroups(subset='train', categories=categories, random_state=42)
data_test = fetch_20newsgroups(subset='test', categories=categories, random_state=42)
cleaned_train = clean_text(data_train.data)
label_train = data_train.target
cleaned_test = clean_text(data_test.data)
label_test = data_test.target
term_docs_train = tfidf_vectorizer.fit_transform(cleaned_train)
term_docs_test = tfidf_vectorizer.transform(cleaned_test)
svm = SVC(kernel='linear', C=1.0, random_state=42)
svm.fit(term_docs_train, label_train)
accuracy = svm.score(term_docs_test, label_test)
print('The accuracy of 5-class classification is: {0:.1f}%'.format(accuracy*100))
from sklearn.metrics import classification_report
prediction = svm.predict(term_docs_test)
report = classification_report(label_test, prediction)
print(report)
# Grid search
categories = None
data_train = fetch_20newsgroups(subset='train', categories=categories, random_state=42)
data_test = fetch_20newsgroups(subset='test', categories=categories, random_state=42)
cleaned_train = clean_text(data_train.data)
label_train = data_train.target
cleaned_test = clean_text(data_test.data)
label_test = data_test.target
tfidf_vectorizer = TfidfVectorizer(stop_words='english', max_features=None)
term_docs_train = tfidf_vectorizer.fit_transform(cleaned_train)
term_docs_test = tfidf_vectorizer.transform(cleaned_test)
parameters = {'C': [0.1, 1, 10, 100]}
svc_libsvm = SVC(kernel='linear')
from sklearn.model_selection import GridSearchCV
grid_search = GridSearchCV(svc_libsvm, parameters, n_jobs=-1, cv=5)
import timeit
start_time = timeit.default_timer()
grid_search.fit(term_docs_train, label_train)
print("--- %0.3fs seconds ---" % (timeit.default_timer() - start_time))
print(grid_search.best_params_)
print(grid_search.best_score_)
svc_libsvm_best = grid_search.best_estimator_
accuracy = svc_libsvm_best.score(term_docs_test, label_test)
print('The accuracy of 20-class classification is: {0:.1f}%'.format(accuracy*100))
from sklearn.svm import LinearSVC
svc_linear = LinearSVC()
grid_search = GridSearchCV(svc_linear, parameters, n_jobs=-1, cv=5)
start_time = timeit.default_timer()
grid_search.fit(term_docs_train, label_train)
print("--- %0.3fs seconds ---" % (timeit.default_timer() - start_time))
print(grid_search.best_params_)
print(grid_search.best_score_)
svc_linear_best = grid_search.best_estimator_
accuracy = svc_linear_best.score(term_docs_test, label_test)
print('TThe accuracy of 20-class classification is: {0:.1f}%'.format(accuracy*100))
# Pipeline
from sklearn.pipeline import Pipeline
pipeline = Pipeline([
('tfidf', TfidfVectorizer(stop_words='english')),
('svc', LinearSVC()),
])
parameters_pipeline = {
'tfidf__max_df': (0.25, 0.5, 1.0),
'tfidf__max_features': (10000, None),
'tfidf__sublinear_tf': (True, False),
'tfidf__smooth_idf': (True, False),
'svc__C': (0.3, 1, 3),
}
grid_search = GridSearchCV(pipeline, parameters_pipeline, n_jobs=-1, cv=5)
start_time = timeit.default_timer()
grid_search.fit(cleaned_train, label_train)
print("--- %0.3fs seconds ---" % (timeit.default_timer() - start_time))
print(grid_search.best_params_)
print(grid_search.best_score_)
pipeline_best = grid_search.best_estimator_
accuracy = pipeline_best.score(cleaned_test, label_test)
print('The accuracy of 20-class classification is: {0:.1f}%'.format(accuracy*100))
| 31.470588
| 108
| 0.774393
| 752
| 5,350
| 5.226064
| 0.207447
| 0.040712
| 0.032061
| 0.047328
| 0.606616
| 0.606616
| 0.590331
| 0.561832
| 0.551908
| 0.551908
| 0
| 0.021886
| 0.111776
| 5,350
| 169
| 109
| 31.656805
| 0.805135
| 0.045794
| 0
| 0.424779
| 0
| 0
| 0.111896
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017699
| false
| 0
| 0.106195
| 0.00885
| 0.141593
| 0.132743
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06e007230d32188f666bcfa817cb0d72deaa62d6
| 639
|
py
|
Python
|
cocos#275--HTMLLabel/html_label_test.py
|
los-cocos/etc_code
|
71c642a5e0f7ff8049cb5fb4ecac3f166ca20280
|
[
"MIT"
] | 2
|
2016-08-28T19:41:47.000Z
|
2018-12-14T22:01:26.000Z
|
cocos#275--HTMLLabel/html_label_test.py
|
los-cocos/etc_code
|
71c642a5e0f7ff8049cb5fb4ecac3f166ca20280
|
[
"MIT"
] | null | null | null |
cocos#275--HTMLLabel/html_label_test.py
|
los-cocos/etc_code
|
71c642a5e0f7ff8049cb5fb4ecac3f166ca20280
|
[
"MIT"
] | 2
|
2015-09-21T06:55:12.000Z
|
2020-05-29T14:34:34.000Z
|
#!/usr/bin/env python3
# -*-coding:utf-8 -*
import cocos
from cocos.text import HTMLLabel
from cocos.director import director
class TestLayer(cocos.layer.Layer):
def __init__(self):
super(TestLayer, self).__init__()
x, y = director.get_window_size()
self.text = HTMLLabel("""<center><font color=white size=4>
Image here --><img src="grossini.png"><-- here.</font></center>""",
(100, y//2))
self.add(self.text)
def main():
director.init()
test_layer = TestLayer()
main_scene = cocos.scene.Scene(test_layer)
director.run(main_scene)
if __name__ == '__main__':
main()
| 24.576923
| 68
| 0.640063
| 84
| 639
| 4.607143
| 0.535714
| 0.046512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013672
| 0.198748
| 639
| 26
| 69
| 24.576923
| 0.742188
| 0.062598
| 0
| 0
| 0
| 0
| 0.175585
| 0.071906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06e062374abeb59d36c97a31d852af3b3fb9d03c
| 4,284
|
py
|
Python
|
depronoun.py
|
rui-bettencourt/AutomaticSentenceDivision
|
4cb29897103189791c932aaea42c8d5b4ecd8bcd
|
[
"MIT"
] | null | null | null |
depronoun.py
|
rui-bettencourt/AutomaticSentenceDivision
|
4cb29897103189791c932aaea42c8d5b4ecd8bcd
|
[
"MIT"
] | null | null | null |
depronoun.py
|
rui-bettencourt/AutomaticSentenceDivision
|
4cb29897103189791c932aaea42c8d5b4ecd8bcd
|
[
"MIT"
] | null | null | null |
#
from nltk.tokenize import word_tokenize
from xml.dom import minidom
import progressbar
from time import sleep
input_file = 'data/dataset_output.txt'
num_lines = sum(1 for line in open(input_file))
read_file = open(input_file, 'r')
write_output_file = open('data/dataset_output_no_pronouns.txt', 'w')
pronouns = ['him','her']
pronouns_objects = ['it']
names = []
objects = []
special_objects = []
pronoun_error_counter_p = 0
pronoun_error_counter_o = 0
pronoun_misplacements = 0
# parse an xml file by name
mydoc_names = minidom.parse('Names.xml')
mydoc_objects = minidom.parse('Objects.xml')
names_raw = mydoc_names.getElementsByTagName('name')
for elem in names_raw:
names.append(elem.firstChild.data)
objects_raw = mydoc_objects.getElementsByTagName('object')
category_raw = mydoc_objects.getElementsByTagName('category')
for elem in objects_raw:
if ' ' not in elem.attributes['name'].value:
objects.append(elem.attributes['name'].value)
else:
complex_word = []
for word in elem.attributes['name'].value.split(' '):
complex_word.append(word)
special_objects.append(complex_word)
for elem in category_raw:
if ' ' not in elem.attributes['name'].value:
objects.append(elem.attributes['name'].value)
else:
complex_word = []
for word in elem.attributes['name'].value.split(' '):
complex_word.append(word)
special_objects.append(complex_word)
names.sort()
objects.sort()
print("The availabe names are: ")
print(names)
print("\n\n")
print("The availabe objects are: ")
print(objects)
print("\n\n")
print("The availabe special objects are: ")
print(special_objects)
print("\n\n")
bar = progressbar.ProgressBar(maxval=num_lines, \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
##### Actual code
i = 0
for line in read_file:
i += 1
used_name = None
used_object = None
words = word_tokenize(line)
if any(pronoun in words for pronoun in pronouns):
#Loop the tokenized line for the pronoun and name
for word in words:
if word in names:
used_name = word
if word in pronouns and used_name is not None:
#if a pronoun was found and previously also a name, replace that pronoun by the name
words[words.index(word)] = used_name
elif word in pronouns and used_name is None:
print("PRONOUN WITH NO NAME!")
pronoun_error_counter_p += 1
if any(pronoun in words for pronoun in pronouns_objects):
#Loop the tokenized line for the pronoun and object
for word in words:
if word in objects:
used_object = word
if word in names:
used_name = word
if word in pronouns_objects and used_object is not None:
words[words.index(word)] = "the " + used_object
elif word in pronouns_objects and used_object is None:
# print("PRONOUN WITH NO NAME!")
success = False
for special in special_objects:
correct_special = True
for item in special:
if item not in words:
correct_special = False
break
if correct_special:
to_add = ' '.join(special)
words[words.index(word)] = "the " + to_add
success = True
if not success and used_name is not None:
words[words.index(word)] = used_name
pronoun_misplacements += 1
elif not success:
pronoun_error_counter_o += 1
#Write the output into a file
write_output_file.write(' '.join(words).replace(' .','.') + '\n')
# print("Iter: " + str(i))
bar.update(i)
bar.finish()
print("Success! With " + str(pronoun_error_counter_p) + " sentences that had a pronoun but no name and " + str(pronoun_error_counter_o) + " with no object.")
print("A total of " + str(pronoun_misplacements) + " were considered as pronoun misplacements and the it was replace by a name")
| 33.46875
| 157
| 0.614613
| 548
| 4,284
| 4.655109
| 0.213504
| 0.025872
| 0.044688
| 0.054096
| 0.369267
| 0.359467
| 0.320659
| 0.249314
| 0.198354
| 0.167777
| 0
| 0.002949
| 0.287582
| 4,284
| 127
| 158
| 33.732283
| 0.832896
| 0.070495
| 0
| 0.255102
| 0
| 0
| 0.108619
| 0.014617
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.040816
| 0
| 0.040816
| 0.122449
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06e4f094f267243cc672eb7459a8a3c1167d18f8
| 2,794
|
py
|
Python
|
pyaws/utils/userinput.py
|
mwozniczak/pyaws
|
af8f6d64ff47fd2ef2eb9fef25680e4656523fa3
|
[
"MIT"
] | null | null | null |
pyaws/utils/userinput.py
|
mwozniczak/pyaws
|
af8f6d64ff47fd2ef2eb9fef25680e4656523fa3
|
[
"MIT"
] | null | null | null |
pyaws/utils/userinput.py
|
mwozniczak/pyaws
|
af8f6d64ff47fd2ef2eb9fef25680e4656523fa3
|
[
"MIT"
] | null | null | null |
"""
Python3 Module
Summary:
User Input Manipulation
"""
import re
from string import ascii_lowercase
def bool_assignment(arg, patterns=None):
"""
Summary:
Enforces correct bool argment assignment
Arg:
:arg (*): arg which must be interpreted as either bool True or False
Returns:
bool assignment | TYPE: bool
"""
arg = str(arg) # only eval type str
try:
if patterns is None:
patterns = (
(re.compile(r'^(true|false)$', flags=re.IGNORECASE), lambda x: x.lower() == 'true'),
(re.compile(r'^(yes|no)$', flags=re.IGNORECASE), lambda x: x.lower() == 'yes'),
(re.compile(r'^(y|n)$', flags=re.IGNORECASE), lambda x: x.lower() == 'y')
)
if not arg:
return '' # default selected
else:
for pattern, func in patterns:
if pattern.match(arg):
return func(arg)
except Exception as e:
raise e
def range_bind(min_value, max_value, value):
""" binds number to a type and range """
if value not in range(min_value, max_value + 1):
value = min(value, max_value)
value = max(min_value, value)
return int(value)
def userchoice_mapping(choice):
"""
Summary:
Maps the number of an option presented to the user to the
correct letters in sequential a-z series when choice parameter
is provided as a number.
When given a letter as an input parameter (choice is a single
letter), returns the integer number corresponding to the letter
in the alphabet (a-z)
Examples:
- userchoice_mapping(3) returns 'c'
- userchoice_mapping('z') returns 26 (integer)
Args:
choice, TYPE: int or str
Returns:
ascii (lowercase), TYPE: str OR None
"""
# prepare mapping dict containing all 26 letters
map_dict = {}
letters = ascii_lowercase
for index in range(1, 27):
map_dict[index] = letters[index - 1]
# process user input
try:
if isinstance(choice, str):
if choice in letters:
for k, v in map_dict.items():
if v == choice.lower():
return k
elif int(choice) in range(1, 27):
# integer string provided
return map_dict[int(choice)]
else:
# not in letters or integer string outside range
return None
elif choice not in range(1, 27):
return None
except KeyError:
# integer outside range provided
return None
except ValueError:
# string outside range provided
return None
return map_dict[choice]
| 30.043011
| 100
| 0.566929
| 345
| 2,794
| 4.53913
| 0.342029
| 0.02235
| 0.019157
| 0.044061
| 0.122605
| 0.057471
| 0.057471
| 0
| 0
| 0
| 0
| 0.00929
| 0.345025
| 2,794
| 92
| 101
| 30.369565
| 0.846448
| 0.366142
| 0
| 0.173913
| 0
| 0
| 0.023679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.043478
| 0
| 0.326087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06f368aa1460565e63ed4a80b862ae97a70212cf
| 1,151
|
py
|
Python
|
2.py
|
zweed4u/dailycodingproblem
|
6e40eaad347e283f86a11adeff01c6426211a0be
|
[
"MIT"
] | null | null | null |
2.py
|
zweed4u/dailycodingproblem
|
6e40eaad347e283f86a11adeff01c6426211a0be
|
[
"MIT"
] | null | null | null |
2.py
|
zweed4u/dailycodingproblem
|
6e40eaad347e283f86a11adeff01c6426211a0be
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
Good morning! Here's your coding interview problem for today.
This problem was asked by Uber.
Given an array of integers, return a new array such that each element at index i of the new array is the product of all the numbers in the original array except the one at i.
For example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24]. If our input was [3, 2, 1], the expected output would be [2, 3, 6].
Follow-up: what if you can't use division?
"""
def func(in_array):
out_array = []
for number in in_array:
product = 1
for v in in_array:
if number == v:
continue
product *= v
out_array.append(product)
return out_array
def division_func(in_array):
# Using folllow up as hint for a solution using division
out_array = []
for number in in_array:
product = 1
for v in in_array:
product *= v
out_array.append(int(product/number))
return out_array
print(division_func([1,2,3,4,5]))
print(division_func([3, 2, 1]))
print(func([1,2,3,4,5]))
print(func([3, 2, 1]))
| 28.775
| 174
| 0.636838
| 197
| 1,151
| 3.64467
| 0.42132
| 0.058496
| 0.050139
| 0.016713
| 0.305014
| 0.169916
| 0.169916
| 0.130919
| 0.130919
| 0.130919
| 0
| 0.048235
| 0.261512
| 1,151
| 39
| 175
| 29.512821
| 0.796471
| 0.482189
| 0
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.181818
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66012301064b5709a93a6fcc63e250bae442c6d6
| 1,804
|
py
|
Python
|
main.py
|
PortosKo/Python_Lesson_2
|
160c569f17d21cc1f2e48227b526a49594e90d59
|
[
"MIT"
] | null | null | null |
main.py
|
PortosKo/Python_Lesson_2
|
160c569f17d21cc1f2e48227b526a49594e90d59
|
[
"MIT"
] | null | null | null |
main.py
|
PortosKo/Python_Lesson_2
|
160c569f17d21cc1f2e48227b526a49594e90d59
|
[
"MIT"
] | null | null | null |
#Задачи на циклы и оператор условия------
#----------------------------------------
'''
# Задача 1
Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована.
'''
print ('Задача1')
x = 0
for x in range (1,6,):
print (x,0)
'''
Задача 2
Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5.
'''
print ('Задача2')
x = 98535254155
count = 0
while (x // 10) > 0:
if x % 10 == 5:
count += 1
x = x // 10
print(count)
'''
Задача 3
Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран.
'''
print ('Задача3')
sum = 0
for i in range(1,101):
sum+=i
print(sum)
'''
Задача 4
Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран.
'''
print ('Задача4')
for x in range(1,10):
x += x
print(x)
'''
Задача 5
#Вывести цифры числа на каждой строчке.
'''
print('Задача5')
integer_number = 2129
print(integer_number%10,integer_number//10)
while integer_number>0:
print(integer_number%10)
integer_number = integer_number//10
'''
Задача 6
Найти сумму цифр числа.
'''
print('задача6')
num = int(input('Введите число:'))
sum = 0
while num:
sum = sum + num % 10
num = num // 10
print('Сумма числа =', sum)
'''
Задача 7
Найти произведение цифр числа.
'''
print('Задача7')
num = int(input('Введите число:'))
mult = 1
while num:
mult = mult * (num % 10)
num = num // 10
print('Произведение цифр =', mult)
'''
Задача 8
Дать ответ на вопрос: есть ли среди цифр числа 5?
'''
integer_number = 213413
while integer_number>0:
if integer_number%10 == 5:
print('Yes')
break
integer_number = integer_number//10
else: print('No')
'''
Задача 9
Найти максимальную цифру в числе
'''
'''
Задача 10
Найти количество цифр 5 в числе
'''
| 15.288136
| 92
| 0.636364
| 265
| 1,804
| 4.286792
| 0.362264
| 0.137324
| 0.079225
| 0.019366
| 0.28081
| 0.15669
| 0
| 0
| 0
| 0
| 0
| 0.06882
| 0.210643
| 1,804
| 118
| 93
| 15.288136
| 0.728933
| 0.10255
| 0
| 0.25
| 0
| 0
| 0.11332
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.354167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
660235efa168bc77f41850e9696ac1ce83979716
| 5,062
|
py
|
Python
|
config_loader/loader.py
|
egabancho/config-loader
|
af45c7bef3afe4dee930754386a1763a28574d6c
|
[
"MIT"
] | null | null | null |
config_loader/loader.py
|
egabancho/config-loader
|
af45c7bef3afe4dee930754386a1763a28574d6c
|
[
"MIT"
] | null | null | null |
config_loader/loader.py
|
egabancho/config-loader
|
af45c7bef3afe4dee930754386a1763a28574d6c
|
[
"MIT"
] | null | null | null |
"""Configuration loader class."""
import ast
import logging
import os
import types
from operator import attrgetter
import pkg_resources
logger = logging.getLogger(__name__)
class Config(object):
"""Configuration loader, it's like a normal dictionary with super-powers.
It will load configuration in the following order:
1. Load configuration from ``config_loader.module`` entry points
group, following the alphabetical ascending order in case of
multiple entry points defined.
2. Load from file path, if provided via environment variable.
3. Load from keyword arguments when provided.
4. Load configuration from environment variables with the prefix
``env_prefix``.
Once the object is created it can be updated, as a normal dictionary or,
using any of the ``from_`` methods provided.
:param env_var: Name of an environment variable pointing to a configuration
file.
:param env_prefix: Environment variable prefix, it will iterate over all
environment variables and load the ones matching the prefix.
:param entry_point_name: Name of the entry point used to add configuration
files from outside modules.
:param kwargs_config: Dictionary with ad-hoc configuration variables.
"""
def __init__(
self,
env_var='CONFIG_SETTINGS',
env_prefix='CONFIG_',
entry_point_name='config_loader.module',
**kwargs_config
):
"""Initialize new configuration loader instance."""
self._internal_config = None
self.env_var = env_var
self.env_prefix = env_prefix
self.entry_point_name = entry_point_name
self.extra_config = kwargs_config
@property
def _config(self):
"""Hide internal configuration for lazy loading."""
if self._internal_config is None:
self._internal_config = dict()
self.build()
return self._internal_config
def __getattr__(self, name):
"""Fallback to the internal dictionary if attr not found."""
return getattr(self._config, name)
def __repr__(self):
"""Get repr from the internal dictionary."""
return self._config.__repr__()
def __getitem__(self, key):
"""Allow for square bracket notation."""
return self._config.__getitem__(key)
def __setitem__(self, key, value):
"""Allow for square bracket notation."""
return self._config.__setitem(key, value)
def build(self):
"""Build internal configuration."""
self.from_entry_point(self.entry_point_name)
self.from_envvar(self.env_var)
self._config.update(self.extra_config)
self.from_env(self.env_prefix)
def from_entry_point(self, entry_point_name):
"""Update values from module defined by entry point.
Configurations are loaded in alphabetical ascending order.
:param entry_point_name: The name of the entry point.
"""
eps = sorted(
pkg_resources.iter_entry_points(entry_point_name),
key=attrgetter('name'),
)
for ep in eps:
self.from_object(ep.load())
def from_envvar(self, variable_name):
"""Update values from an env variable pointing to a configuration file.
:param variable_name: The name of the environment variable.
"""
filename = os.environ.get(variable_name, None)
if filename:
self.from_pyfile(filename)
else:
logger.debug('Cannot find env file')
def from_pyfile(self, filename):
"""Update the values in the config from a Python file.
:param filename: The filename of the config.
"""
if not os.path.exists(filename):
logger.warn('File %s does not exists', filename)
return
d = types.ModuleType('config')
d.__file__ = filename
with open(filename, mode='rb') as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
self.from_object(d)
def from_object(self, obj):
"""Update the values from the given object.
:param obj: An object to import cfg values from.
"""
for key in dir(obj):
if key.isupper():
self._config[key] = getattr(obj, key)
def from_env(self, prefix):
"""Load configuration from environment variables.
:param prefix: The prefix used to filter the environment variables.
"""
prefix_len = len(prefix)
for varname, value in os.environ.items():
if not varname.startswith(prefix):
continue
# Prepare values
varname = varname[prefix_len:]
value = value or self.get(varname)
# Evaluate value
try:
value = ast.literal_eval(value)
except (SyntaxError, ValueError):
pass
# Set value
self._config[varname] = value
| 32.87013
| 79
| 0.633544
| 610
| 5,062
| 5.060656
| 0.286885
| 0.042112
| 0.036281
| 0.017493
| 0.122773
| 0.07645
| 0.07645
| 0.029155
| 0
| 0
| 0
| 0.001106
| 0.285263
| 5,062
| 153
| 80
| 33.084967
| 0.852128
| 0.391545
| 0
| 0
| 0
| 0
| 0.035551
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155844
| false
| 0.012987
| 0.077922
| 0
| 0.324675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66055a1c87077054947e4816c06f4763187ad5d0
| 3,125
|
py
|
Python
|
draugr/visualisation/seaborn_utilities/seaborn_enums.py
|
cnHeider/draugr
|
b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6
|
[
"Apache-2.0"
] | 3
|
2019-09-27T08:04:59.000Z
|
2020-12-02T06:14:45.000Z
|
draugr/visualisation/seaborn_utilities/seaborn_enums.py
|
cnHeider/draugr
|
b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6
|
[
"Apache-2.0"
] | 64
|
2019-09-27T08:03:42.000Z
|
2022-03-28T15:07:30.000Z
|
draugr/visualisation/seaborn_utilities/seaborn_enums.py
|
cnHeider/draugr
|
b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6
|
[
"Apache-2.0"
] | 1
|
2020-10-01T00:18:57.000Z
|
2020-10-01T00:18:57.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 26-01-2021
"""
from enum import Enum
from typing import Tuple
import numpy
from matplotlib import patheffects, pyplot
__all__ = ["plot_median_labels", "show_values_on_bars"]
from draugr.visualisation.matplotlib_utilities.styles.annotation import (
semi_opaque_round_tight_bbox,
)
class MatplotlibHorizontalAlignment(Enum):
Center = "center"
Right = "right"
Left = "left"
class MatplotlibVerticalAlignment(Enum):
Center = "center"
Top = "top"
Bottom = "bottom"
Baseline = "baseline"
CenterBaseline = "center_baseline"
def plot_median_labels(
ax: pyplot.Axes,
*,
has_fliers: bool = False,
# text_size: int = 10,
# text_weight: str = "normal",
stroke_width: int = 0,
precision: int = 3,
color: str = "black",
edgecolor: str = "black", # also the stroke color
ha: str = "center",
va: str = "center", # bottom
bbox: Tuple = semi_opaque_round_tight_bbox,
) -> None:
""" """
lines = ax.get_lines()
# depending on fliers, toggle between 5 and 6 lines per box
lines_per_box = 5 + int(has_fliers)
# iterate directly over all median lines, with an interval of lines_per_box
# this enables labeling of grouped data without relying on tick positions
for median_line in lines[4 : len(lines) : lines_per_box]:
# get center of median line
mean_x = sum(median_line._x) / len(median_line._x)
mean_y = sum(median_line._y) / len(median_line._y)
text = ax.text(
mean_x,
mean_y,
f"{round(mean_y, precision)}",
ha=ha,
va=va,
# fontweight=text_weight,
# size=text_size,
color=color,
# edgecolor=edgecolor
bbox=bbox,
) # print text to center coordinates
if stroke_width:
# create small black border around white text
# for better readability on multi-colored boxes
text.set_path_effects(
[
patheffects.Stroke(linewidth=stroke_width, foreground=edgecolor),
patheffects.Normal(),
]
)
def show_values_on_bars(axs: pyplot.Axes, h_v: str = "v", space: float = 0.4) -> None:
""" """
def _show_on_single_plot(ax):
if h_v == "v":
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height()
value = int(p.get_height())
ax.text(_x, _y, value, ha="center")
elif h_v == "h":
for p in ax.patches:
_x = p.get_x() + p.get_width() + float(space)
_y = p.get_y() + p.get_height()
value = int(p.get_width())
ax.text(_x, _y, value, ha="left")
if isinstance(axs, numpy.ndarray):
for idx, ax in numpy.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
| 28.935185
| 86
| 0.57632
| 392
| 3,125
| 4.346939
| 0.392857
| 0.023474
| 0.025822
| 0.028169
| 0.134977
| 0.085681
| 0.068075
| 0.068075
| 0.068075
| 0.068075
| 0
| 0.009809
| 0.31488
| 3,125
| 107
| 87
| 29.205607
| 0.786081
| 0.1712
| 0
| 0.081081
| 0
| 0
| 0.086328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.067568
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66085420dcc9a5728829c81982cb5b8af048ece5
| 1,061
|
py
|
Python
|
py/callback_dashboard.py
|
pnvnd/plotly
|
ede0bb0bb92484c2e3bf4e3631fa97f547e02c16
|
[
"Unlicense"
] | null | null | null |
py/callback_dashboard.py
|
pnvnd/plotly
|
ede0bb0bb92484c2e3bf4e3631fa97f547e02c16
|
[
"Unlicense"
] | null | null | null |
py/callback_dashboard.py
|
pnvnd/plotly
|
ede0bb0bb92484c2e3bf4e3631fa97f547e02c16
|
[
"Unlicense"
] | 1
|
2022-01-22T17:19:25.000Z
|
2022-01-22T17:19:25.000Z
|
from dash import dash, dcc, html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
url = 'csv/covidtesting.csv'
df = pd.read_csv(url)
app = dash.Dash()
# list = df.columns[1:]
# filter_options = []
# for option in list:
# filter_options.append({'label': str(option), 'value': option})
app.layout = html.Div([
dcc.Graph(id='graphs'),
dcc.Dropdown(
id='option-picker',
options=[{"label": x, "value": x} for x in df.columns[1:]],
value=df.columns[1]
)
])
@app.callback(
Output('graphs', 'figure'),
[Input('option-picker', 'value')])
def update_figure(selected_option):
# fig = px.line(df, x='Reported Date', y=selected_option)
# return fig
return {
'data': [go.Scatter(x=df['Reported Date'], y=df[selected_option], mode='lines')],
'layout': go.Layout(
title='COVID Data',
xaxis={'title': 'Date'},
yaxis={'title': 'Number of Cases'}
)
}
if __name__ == '__main__':
app.run_server()
| 24.113636
| 89
| 0.597549
| 140
| 1,061
| 4.407143
| 0.485714
| 0.04376
| 0.048622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003667
| 0.229029
| 1,061
| 44
| 90
| 24.113636
| 0.750611
| 0.183789
| 0
| 0
| 0
| 0
| 0.17907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.137931
| 0.034483
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6608592b97fcda5eb8dc5fb8cb22369434750380
| 4,288
|
py
|
Python
|
appface.py
|
iljoong/FaceTag
|
c43fce89c92ce6de2f397580d80aa834d3e2dbb6
|
[
"MIT"
] | 2
|
2021-11-12T15:30:55.000Z
|
2021-11-14T13:53:13.000Z
|
appface.py
|
iljoong/FaceTag
|
c43fce89c92ce6de2f397580d80aa834d3e2dbb6
|
[
"MIT"
] | 1
|
2018-07-31T08:30:33.000Z
|
2018-08-01T04:44:52.000Z
|
appface.py
|
iljoong/FaceTag
|
c43fce89c92ce6de2f397580d80aa834d3e2dbb6
|
[
"MIT"
] | 1
|
2021-11-12T15:31:00.000Z
|
2021-11-12T15:31:00.000Z
|
###############################################################################################
from keras.models import Model, load_model
from PIL import Image
import numpy as np
import time
import cv2
import os
import logging
import pymongo
#import dlib
import requests
import appconfig
import json
cascade = cv2.CascadeClassifier('./face/haarcascade_frontalface_default.xml')
eyeCascade = cv2.CascadeClassifier('./face/haarcascade_eye.xml')
img_size = 200
labels = []
def loadModel():
global labels
try:
modelpath = os.environ.get('MODELPATH')
logging.debug("modelpath = %s" % modelpath)
if (modelpath != None and modelpath != ""):
model = load_model(modelpath)
modeltags = os.environ.get('MODELTAGS', 'tag1;tag2;tag3;tag4;tag5;tag6;tag7;tag8;tag9;tag10')
logging.debug("modeltags = %s" % modeltags)
labels = modeltags.split(';')
else:
model = None
except Exception as e:
raise e
return model
def loadCollection():
# mongodb
mongouri = os.environ.get('MONGOURI', 'mongodb://localhost:27017')
mongodb = os.environ.get('MONGODB', 'facedb')
mongocoll = os.environ.get('MONGOCOLL', 'face')
logging.debug("env: {}, {}, {}".format(mongouri, mongodb, mongocoll))
try:
conn = pymongo.MongoClient(mongouri)
#conn = pymongo.MongoClient(mongoip, 27017)
db = conn.get_database(mongodb)
except Exception as e:
raise e
return db.get_collection(mongocoll)
def detectFaceCV(gray):
start_time = time.time()
faces = []
try:
rects = cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),flags=cv2.CASCADE_SCALE_IMAGE)
for rect in rects:
(x, y, w, h) = rect
roi = gray[y:y+h, x:x+w]
eyes = eyeCascade.detectMultiScale(roi)
if len(eyes):
faces.append(rect)
except Exception as e:
print(e)
return faces, time.time() - start_time
"""
hog_face_detector = dlib.get_frontal_face_detector()
def detectFaceHog(gray):
start_time = time.time()
rects = []
try:
rects = hog_face_detector(gray, 1)
faces = [ [rect.left(), rect.top(), rect.right()-rect.left(), rect.bottom()-rect.top()] for rect in rects ]
except Exception as e:
print(e)
return faces, time.time() - start_time
cnn_face_detector = dlib.cnn_face_detection_model_v1("../face/mmod_human_face_detector.dat")
def detectFaceCNN(gray):
start_time = time.time()
rects = []
try:
rects = cnn_face_detector(gray, 1)
faces = [ [rect.rect.left(), rect.rect.top(), rect.rect.right()-rect.rect.left(), rect.rect.bottom()-rect.rect.top()] for rect in rects ]
except Exception as e:
print(e)
return faces, time.time() - start_time
"""
def classifyFace(model, frame):
global labels
if (model == None):
return ("none", 0.0)
img = cv2.resize(frame, (img_size, img_size), interpolation = cv2.INTER_AREA)
x = np.expand_dims(img, axis=0)
x = x.astype(float)
x /= 255.
start_time = time.time()
classes = model.predict(x)
result = np.squeeze(classes)
result_indices = np.argmax(result)
logging.debug("classify time: {:.2f} sec".format(time.time() - start_time))
return labels[result_indices], result[result_indices]*100
def classifyFaceCV(model, frame):
_, roi = cv2.imencode('.png', frame)
start_time = time.time()
apiurl = 'https://southcentralus.api.cognitive.microsoft.com/customvision/v2.0/Prediction/%s/image?iterationId=%s'
headers = {"Content-Type": "application/octet-stream", "Prediction-Key": appconfig.api_key }
r = requests.post(apiurl % (appconfig.api_id, appconfig.api_iter), headers=headers, data=roi.tostring())
if (r.status_code == 200):
# JSON parse
pred = json.loads(r.content.decode("utf-8"))
conf = float(pred['predictions'][0]['probability'])
label = pred['predictions'][0]['tagName']
logging.debug("classify time: {:.2f} sec".format(time.time() - start_time))
return label, conf*100
else:
return "none", 0.0
| 28.586667
| 145
| 0.614039
| 522
| 4,288
| 4.948276
| 0.350575
| 0.046458
| 0.023229
| 0.034843
| 0.213318
| 0.19125
| 0.171119
| 0.14789
| 0.121564
| 0.121564
| 0
| 0.019673
| 0.229478
| 4,288
| 149
| 146
| 28.778523
| 0.762107
| 0.016791
| 0
| 0.204819
| 0
| 0.012048
| 0.153633
| 0.051416
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060241
| false
| 0
| 0.13253
| 0
| 0.277108
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|