hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
005275ffc230ec92bee5ce893b5981ac62fa9054
| 3,293
|
py
|
Python
|
library/ucs_vlan_find.py
|
dsoper2/ucsm-ansible-1
|
9eef898d016e8e2c5f07990385ec1b8885edc533
|
[
"NTP"
] | 54
|
2017-05-30T19:22:23.000Z
|
2021-12-17T07:36:55.000Z
|
library/ucs_vlan_find.py
|
dsoper2/ucsm-ansible-1
|
9eef898d016e8e2c5f07990385ec1b8885edc533
|
[
"NTP"
] | 50
|
2017-05-10T07:37:09.000Z
|
2021-07-26T18:23:30.000Z
|
library/ucs_vlan_find.py
|
dsoper2/ucsm-ansible-1
|
9eef898d016e8e2c5f07990385ec1b8885edc533
|
[
"NTP"
] | 54
|
2017-05-08T05:31:23.000Z
|
2021-11-16T09:34:32.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_vlan_find
short_description: Find VLANs on Cisco UCS Manager
description:
- Find VLANs on Cisco UCS Manager based on different criteria.
extends_documentation_fragment: ucs
options:
pattern:
description:
- Regex pattern to find within the name property of the fabricVlan class.
- This is required if C(vlanid) parameter is not supplied.
type: str
fabric:
description:
- "The fabric configuration of the VLAN. This can be one of the following:"
- "common - The VLAN applies to both fabrics and uses the same configuration parameters in both cases."
- "A — The VLAN only applies to fabric A."
- "B — The VLAN only applies to fabric B."
choices: [common, A, B]
default: common
type: str
vlanid:
description:
- The unique string identifier assigned to the VLAN.
- A VLAN ID can be between '1' and '3967', or between '4048' and '4093'.
- This is required if C(pattern) parameter is not supplied.
type: str
requirements:
- ucsmsdk
author:
- David Martinez (@dx0xm)
- CiscoUcs (@CiscoUcs)
version_added: '2.9'
'''
EXAMPLES = r'''
- name: Get all vlans in fabric A
ucs_vlan_find:
hostname: 172.16.143.150
username: admin
password: password
fabric: 'A'
pattern: '.'
- name: Confirm if vlan 15 is present
ucs_vlan_find:
hostname: 172.16.143.150
username: admin
password: password
vlanid: '15'
'''
RETURN = r'''
vlan_list:
description: basic details of vlans found
returned: on success
type: list
sample: [
{
"id": "0",
"name": "vlcloud1"
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
fabric=dict(type='str', default='common', choices=['common', 'A', 'B']),
pattern=dict(type='str'),
vlanid=dict(type='str')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['pattern', 'vlanid']]
)
ucs = UCSModule(module)
filtls = ['(cloud,"ethlan")']
if module.params['fabric'] != 'common':
filtls.append('(switch_id,"' + module.params['fabric'] + '")')
if module.params['vlanid']:
filtls.append('(id,"' + module.params['vlanid'] + '")')
else:
filtls.append('(name,"' + module.params['pattern'] + '")')
object_dict = ucs.login_handle.query_classid("fabricVlan", filter_str=' and '.join(filtls))
if object_dict is None:
module.fail_json(msg="Failed to query vlan objects")
vlnlist = []
for ob in object_dict:
vlnlist.append(dict(name=ob.name, id=ob.id))
module.exit_json(changed=False,
vlan_list=vlnlist)
if __name__ == '__main__':
main()
| 26.991803
| 107
| 0.64379
| 422
| 3,293
| 4.893365
| 0.433649
| 0.020339
| 0.015981
| 0.021308
| 0.16368
| 0.147216
| 0.119128
| 0.057143
| 0.057143
| 0.057143
| 0
| 0.020166
| 0.232007
| 3,293
| 121
| 108
| 27.214876
| 0.795571
| 0.039174
| 0
| 0.181818
| 0
| 0.010101
| 0.573101
| 0.00981
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010101
| false
| 0.020202
| 0.030303
| 0
| 0.040404
| 0.010101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc37b7d3f91a095814758d351ed6ae362f360126
| 5,934
|
py
|
Python
|
arbiter.py
|
camerongivler/bitcoin-utils
|
86e7ba95ef016b24c33fe819a9879b387f7f70f9
|
[
"MIT"
] | null | null | null |
arbiter.py
|
camerongivler/bitcoin-utils
|
86e7ba95ef016b24c33fe819a9879b387f7f70f9
|
[
"MIT"
] | null | null | null |
arbiter.py
|
camerongivler/bitcoin-utils
|
86e7ba95ef016b24c33fe819a9879b387f7f70f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
import time
import traceback
from itertools import combinations
from multiprocessing.pool import ThreadPool
from exchangebase import ExchangeBase
from exchangepair import ExchangePair
from gdaxapi import Gdax
from krakenapi import Kraken
class Arbiter:
def __init__(self):
# Set up 'exchanges' dictionary to hold all of the exchanges
self.exchanges = {"kraken": Kraken(), "gdax": Gdax()}
# exchanges["gemini"] = Gemini()
self.cutoff = 0.1 # %gain on the trade
self.exchangePairs = []
for exchange in combinations(self.exchanges.values(), 2): # 2 for pairs, 3 for triplets, etc
self.exchangePairs.append(ExchangePair(self.cutoff, exchange[0], exchange[1]))
self.arbitrar = "USD"
self.lastKey = ""
for exchange in self.exchanges.values():
exchange.set_arbitrar(self.arbitrar)
if exchange.valueWallet.currency != self.arbitrar:
self.lastKey = exchange.valueWallet.currency
self.trades = []
# First trade loses money, but gets the ball rolling
self.totalGain = 1
self.pool = ThreadPool(processes=2)
def run(self):
os.system('clear')
# always print out how much money there is each wallet that has money
for exchName, exchange in self.exchanges.items():
print(exchName)
for walletName, wallet in exchange.wallets.items():
if wallet.amount > 0:
print(wallet.currency, ":", round(wallet.amount, 5))
print()
for exchange in self.exchangePairs: # 2 for pairs, 3 for triplets, etc
# Check to make sure exactly one has USD
arbitrar_exchange = 0
if exchange[0].valueWallet.currency == self.arbitrar:
arbitrar_exchange = 1
if exchange[1].valueWallet.currency == self.arbitrar:
arbitrar_exchange += 2
if arbitrar_exchange == 0 or arbitrar_exchange == 3:
continue
i = 1
try:
diffp = exchange.get_diff(self.lastKey)
last = exchange.last
goal = 0
if arbitrar_exchange == 1:
# goal = exchange.runningAverages[lastKey] + cutoff/2
goal = self.cutoff / 2
# goal = last + cutoff if last + cutoff > minimum else minimum
print("goal : >" + str("%.3f" % goal) + "%")
if arbitrar_exchange == 2:
# goal = exchange.runningAverages[lastKey] - cutoff/2
goal = -self.cutoff / 2
# goal = last - cutoff if last - cutoff < maximum else maximum
print("goal : <" + str("%.3f" % goal) + "%")
print()
if diffp >= goal and arbitrar_exchange == 1 \
or diffp <= goal and arbitrar_exchange == 2:
sell_exchange = 1 if arbitrar_exchange == 1 else 0
buy_exchange = 0 if arbitrar_exchange == 1 else 1
# buy_symbol, buy_rate, lastKey = exchange.buy(buy_exchange)
# Do the buys and sells asynchronously
async_sell = self.pool.apply_async(ExchangeBase.sell, (exchange[sell_exchange],))
async_buy = self.pool.apply_async(ExchangeBase.buy, (exchange[buy_exchange], self.lastKey))
buy_symbol, buy_rate = async_buy.get()
sell_symbol, sell_rate = async_sell.get()
exchange.last = diffp
total_value = exchange[buy_exchange].get_value() + exchange[sell_exchange].get_value()
# last = difference between exchanges on last trade
real_diff = exchange.last - last
# divide by 2 bc we only make money on money in crypto,
# then again because we only make money in 1 direction (pos or neg)
real_gain = (sell_rate / buy_rate - 1) / 2 * 100
self.totalGain *= 1 + real_gain / 100
localtime = time.asctime(time.localtime(time.time()))
self.trades.append(
"Sold " + sell_symbol + " at " + str(sell_rate) + " on " + exchange[sell_exchange].get_name()
+ "; Bought " + buy_symbol + " at " + str(buy_rate) + " on " + exchange[
buy_exchange].get_name()
+ "; diff: " + str("%.3f" % exchange.last) + "%; gain: " + str("%.3f" % real_diff) + "%"
+ "\n\tReal Gain: " + str("%.3f" % real_gain) + "%; Total (multiplier): "
+ str("%.6f" % self.totalGain) + "; time: " + localtime
+ "\n\t\tTotal Value of portfolio: " + str(total_value))
for trade in self.trades:
print(trade)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
localtime = time.asctime(time.localtime(time.time()))
self.trades.append("Unexpected " + exc_type.__name__ +
" at " + fname + ":" + str(exc_tb.tb_lineno) +
" on " + localtime + ": \"" + str(e) + "\"")
print(self.trades[-1])
print(traceback.format_exc())
time.sleep(max(2 * i, 2))
# So we don't get rate limited by exchanges
time.sleep(max(2 * i, 2))
if __name__ == "__main__":
arbiter = Arbiter()
# Infinite loop
while True:
try:
arbiter.run()
except KeyboardInterrupt:
print("Goodbye.")
break
| 41.496503
| 117
| 0.531345
| 638
| 5,934
| 4.818182
| 0.289969
| 0.057254
| 0.027651
| 0.030254
| 0.217632
| 0.146389
| 0.106051
| 0.090436
| 0.090436
| 0.090436
| 0
| 0.015957
| 0.366363
| 5,934
| 142
| 118
| 41.788732
| 0.801596
| 0.152005
| 0
| 0.082474
| 0
| 0
| 0.047486
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020619
| false
| 0
| 0.103093
| 0
| 0.134021
| 0.103093
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc3a46291852e6e6df74967b9e59f3edcd3b7c15
| 2,289
|
py
|
Python
|
demos/dlgr/demos/rumour_stuff_chain/extend_hit.py
|
allisonmorgan/Dallinger
|
f171e28c352854a3c6ed6b21f25362cd933b17dc
|
[
"MIT"
] | 1
|
2019-08-01T16:15:44.000Z
|
2019-08-01T16:15:44.000Z
|
demos/dlgr/demos/rumour_stuff_chain/extend_hit.py
|
allisonmorgan/Dallinger
|
f171e28c352854a3c6ed6b21f25362cd933b17dc
|
[
"MIT"
] | null | null | null |
demos/dlgr/demos/rumour_stuff_chain/extend_hit.py
|
allisonmorgan/Dallinger
|
f171e28c352854a3c6ed6b21f25362cd933b17dc
|
[
"MIT"
] | null | null | null |
import boto3
import sys
import numpy as np
import random
import time
# endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
endpoint_url = 'https://mturk-requester.us-east-1.amazonaws.com'
##Remember to put the AWS credential in ~/.aws/credential like below:
#######
#[default]
#aws_access_key_id = XXXXX
#aws_secret_access_key = XXXXXX
client = boto3.client('mturk',endpoint_url = endpoint_url,region_name='us-east-1')
#Number of participants
number_participants=int(sys.argv[1])
result_hits= client.list_hits()
number_of_parallel_hits=len(result_hits['HITs'])
vector_completed_experiments = np.zeros(number_of_parallel_hits)
# Check that all the experiments have been completed
while np.mean(vector_completed_experiments) != number_participants:
result_hits= client.list_hits()
number_of_parallel_hits=len(result_hits['HITs'])
vector_completed_experiments=np.zeros(number_of_parallel_hits)
for i in range(number_of_parallel_hits):
hits_completed=int(result_hits['HITs'][i]['NumberOfAssignmentsCompleted'])
vector_completed_experiments[i]=hits_completed
if hits_completed != number_participants:
##Checking if it is necessary to extend the HIT (Available and Pending HIT should be set to zero for extending)
if int(result_hits['HITs'][i]['NumberOfAssignmentsAvailable']) == 0 and int(result_hits['HITs'][i]['NumberOfAssignmentsPending']) == 0:
#There is a little bit of lag when checking whether the HIT has been completed, waiting 30 second to avoid this issue
time.sleep(30)
result_hits= client.list_hits()
hits_completed=int(result_hits['HITs'][i]['NumberOfAssignmentsCompleted'])
if hits_completed < number_participants and hits_completed > 0:
hit = result_hits['HITs'][i]['HITId']
#The request token should always be unique for each additional assignment
request_token= 'Request_{}_{}_{}'.format(hit,random.randint(1,100000),hits_completed)
print("Extending the HIT for the following ID: {}".format(hit))
client.create_additional_assignments_for_hit(HITId = hit, NumberOfAdditionalAssignments=1, UniqueRequestToken=request_token)
#Sleep for 10 minutes..
print("Sleeping for 10 minutes...")
time.sleep(600)
print("Completed participants:",vector_completed_experiments)
| 48.702128
| 138
| 0.769332
| 316
| 2,289
| 5.35443
| 0.370253
| 0.059102
| 0.05792
| 0.059102
| 0.339244
| 0.20922
| 0.20922
| 0.20922
| 0.20922
| 0.134752
| 0
| 0.01391
| 0.120577
| 2,289
| 46
| 139
| 49.76087
| 0.826627
| 0.261249
| 0
| 0.290323
| 0
| 0
| 0.186339
| 0.065908
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16129
| 0
| 0.16129
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc3bb136570430d6501e2f8f63227f6e1d115c10
| 6,018
|
py
|
Python
|
tests/test_manager.py
|
rscalzo/sami
|
7ac5632e018cdf2384f5ff067c503177684f61c8
|
[
"BSD-3-Clause"
] | 1
|
2021-12-07T08:30:38.000Z
|
2021-12-07T08:30:38.000Z
|
tests/test_manager.py
|
rscalzo/sami
|
7ac5632e018cdf2384f5ff067c503177684f61c8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_manager.py
|
rscalzo/sami
|
7ac5632e018cdf2384f5ff067c503177684f61c8
|
[
"BSD-3-Clause"
] | 3
|
2021-02-15T19:51:59.000Z
|
2021-05-04T05:48:46.000Z
|
from __future__ import print_function
import pytest
from tempfile import mkdtemp
from glob import glob
import shutil
import fnmatch
import os
import os.path
import sami
TEST_DIR = os.path.join(os.path.split(__file__)[0], "test_data")
# Note: if the test data is changed, then these lists must be updated
# (too hard to automate!)
bias_files = ("22apr10035", "22apr10036", "22apr10037",
"22apr20035", "22apr20036", "22apr20037")
dark_files = ("22apr10001", "22apr10002", "22apr10003",
"22apr20001", "22apr20002", "22apr20003")
lflat_files = ("14apr10027", "22apr10088",
"14apr20027", "22apr20088")
tlm_files = ("22apr10074", "22apr20074")
flat_files = tlm_files
arc_files = ("22apr10075", "22apr20075")
obj_files = ("22apr10078", "22apr20078", "22apr10079", "22apr20079")
all_files = set(bias_files + dark_files + lflat_files + tlm_files + flat_files + arc_files + obj_files)
def find_files(path, pattern):
"""From:
http://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python
"""
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, pattern):
matches.append(filename)
return matches
@pytest.fixture(scope='module')
def reduction_dir(request):
tmpdir = mkdtemp(prefix="sami_test")
print(tmpdir)
def fin():
shutil.rmtree(tmpdir)
pass
request.addfinalizer(fin)
return tmpdir
@pytest.mark.incremental
class TestSAMIManagerReduction:
@pytest.fixture
def sami_manager(self, reduction_dir):
mngr = sami.manager.Manager(reduction_dir + "/test/", fast=True, debug=True)
return mngr
def test_pytest_not_capturing_fds(self, pytestconfig):
# Note: pytest must be run in sys capture mode, instead of file descriptor capture mode
# otherwise calls to "aaorun" seem to fail. This next test ensures that is the case.
print("If this test fails, then you must run pytest with the option '--capture=sys'.")
assert pytestconfig.getoption("capture") == "sys"
def test_tests(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager
assert isinstance(mngr, sami.manager.Manager)
print(reduction_dir)
assert isinstance(reduction_dir, str)
# assert os.path.exists(reduction_dir + "/test")
def test_import_data(self, sami_manager, raw_test_data):
mngr = sami_manager # type: sami.Manager
mngr.import_dir(raw_test_data)
print(len(mngr.file_list))
print(len(all_files))
assert len(mngr.file_list) == len(all_files)
def test_reduce_bias(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.reduce_bias()
# Check that files actually generated
for base in bias_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/reduced/bias", base + "*")
def test_combine_bias(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.combine_bias()
# Check that files actually generated
assert "BIAScombined.fits" in find_files(reduction_dir + "/test/reduced/bias/ccd_1", "*.fits")
assert "BIAScombined.fits" in find_files(reduction_dir + "/test/reduced/bias/ccd_2", "*.fits")
def test_reduce_dark(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.reduce_dark()
# Check that files actually generated
for base in dark_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/reduced/dark", base + "*")
def test_combine_dark(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.combine_dark()
# Check that files actually generated
assert "DARKcombined1800.fits" in find_files(reduction_dir + "/test/reduced/dark/ccd_1", "*.fits")
assert "DARKcombined1800.fits" in find_files(reduction_dir + "/test/reduced/dark/ccd_2", "*.fits")
def test_reduce_lflat(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.reduce_lflat()
# Check that files actually generated
for base in lflat_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/reduced/lflat", base + "*")
def test_combine_lflat(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.combine_lflat()
# Check that files actually generated
assert "LFLATcombined.fits" in find_files(reduction_dir + "/test/reduced/lflat/ccd_1", "*.fits")
assert "LFLATcombined.fits" in find_files(reduction_dir + "/test/reduced/lflat/ccd_2", "*.fits")
def test_make_tlm(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager
mngr.make_tlm()
# Check that files actually generated
for base in tlm_files:
assert base + "tlm.fits" in find_files(reduction_dir + "/test/", base + "*")
def test_reduce_arc(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager
mngr.reduce_arc()
# Check that files actually generated
for base in arc_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/", base + "*")
def test_reduce_fflat(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager
mngr.reduce_fflat()
for base in flat_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/", base + "*")
def test_reduce_object(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager
mngr.reduce_object()
for base in obj_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/", base + "*")
| 38.088608
| 106
| 0.674643
| 777
| 6,018
| 4.996139
| 0.214929
| 0.096342
| 0.061824
| 0.050232
| 0.529109
| 0.519062
| 0.472437
| 0.463679
| 0.412159
| 0.40881
| 0
| 0.040374
| 0.218013
| 6,018
| 157
| 107
| 38.33121
| 0.78453
| 0.137421
| 0
| 0.141509
| 0
| 0
| 0.153801
| 0.036462
| 0
| 0
| 0
| 0
| 0.160377
| 1
| 0.160377
| false
| 0.009434
| 0.103774
| 0
| 0.301887
| 0.056604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc3c03312a697ba2de1761ab5b8ccb3cbedb8ca1
| 1,536
|
py
|
Python
|
test/test_add_contact.py
|
galarina1880/python_training
|
f8fefdd484f4409cb6f43be1d791b50306e5bb2d
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_contact.py
|
galarina1880/python_training
|
f8fefdd484f4409cb6f43be1d791b50306e5bb2d
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_contact.py
|
galarina1880/python_training
|
f8fefdd484f4409cb6f43be1d791b50306e5bb2d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app):
old_contacts = app.contact.get_contact_list()
contact = Contact(firstname='firstname', middlename='middlename', lastname='lastname', nickname='nick', title='title', company='company', address='address', home='home phone', mobile='mobile', work='work phone', fax='fax', email='email 1', email2='email 2', email3='email 3', homepage='homepage', bday='6', bmonth='August', byear='1980', aday='8', amonth='January', ayear='2000', address2='Address 2', phone2='phone 2', notes='notes')
app.contact.create(contact)
# print(new_contacts)
assert len(old_contacts) + 1 == app.contact.count()
new_contacts = app.contact.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
# def test_add_empty_contact(app):
# old_contacts = app.contact.get_contact_list()
# contact = Contact(firstname='', middlename='', lastname='', nickname='', title='', company='', address='', home='', mobile='', work='', fax='', email='', email2='', email3='', homepage='', bday='', bmonth='-', byear='', aday='', amonth='-', ayear='', address2='', phone2='', notes='')
# app.contact.create(contact)
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) + 1 == len(new_contacts)
# old_contacts.append(contact)
# assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
| 64
| 438
| 0.684245
| 199
| 1,536
| 5.105528
| 0.311558
| 0.086614
| 0.070866
| 0.082677
| 0.492126
| 0.395669
| 0.395669
| 0.326772
| 0.326772
| 0.326772
| 0
| 0.019259
| 0.121094
| 1,536
| 23
| 439
| 66.782609
| 0.733333
| 0.445313
| 0
| 0
| 0
| 0
| 0.180737
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc3e5392334cf15bfc02e165a4492d59caf3fc7b
| 1,585
|
py
|
Python
|
colorswatch-translator.py
|
MartyMacGyver/color-swatch-tool
|
a1351e459a4bea4da2e3dfaf881221109595fa21
|
[
"Apache-2.0"
] | null | null | null |
colorswatch-translator.py
|
MartyMacGyver/color-swatch-tool
|
a1351e459a4bea4da2e3dfaf881221109595fa21
|
[
"Apache-2.0"
] | null | null | null |
colorswatch-translator.py
|
MartyMacGyver/color-swatch-tool
|
a1351e459a4bea4da2e3dfaf881221109595fa21
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2015-2017 Martin F. Falatic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
from __future__ import print_function
from collections import OrderedDict
import re
def TranslateColorTable(infile):
''' Usage: TranslateColorTable("tkcolors") '''
DofL = OrderedDict()
with open(infile) as f:
for line in f:
m = re.match(r"^(.+?)\t(.+?)\t(.+?)\t(.+?)$", line)
if m:
name = m.group(1)
red = int(m.group(2))
grn = int(m.group(3))
blu = int(m.group(4))
rgb = '{0:02X}{1:02X}{2:02X}'.format(red, grn, blu)
if rgb in DofL.keys():
DofL[rgb].append(name)
else:
DofL[rgb] = [name]
print('COLORS_DICT = OrderedDict([')
for d in DofL:
print(' (\'{0}\', {1}),'.format(d, repr(DofL[d])))
print('])')
if __name__ == "__main__":
TranslateColorTable("colors_tk.orig")
| 31.7
| 77
| 0.565931
| 201
| 1,585
| 4.38806
| 0.557214
| 0.068027
| 0.030612
| 0.036281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024434
| 0.302839
| 1,585
| 49
| 78
| 32.346939
| 0.773756
| 0.403785
| 0
| 0
| 0
| 0
| 0.130184
| 0.056452
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc3ffb8f152bd3e99ff55c7878559254b86ef957
| 11,024
|
py
|
Python
|
broker/test_setup/nas/run_jobs.py
|
ebloc/ebloc-broker
|
776a8d9d4642ed1ba4726c94da68d61bd81c098b
|
[
"MIT"
] | 3
|
2021-12-11T19:26:57.000Z
|
2021-12-30T00:17:23.000Z
|
broker/test_setup/nas/run_jobs.py
|
ebloc/ebloc-broker
|
776a8d9d4642ed1ba4726c94da68d61bd81c098b
|
[
"MIT"
] | null | null | null |
broker/test_setup/nas/run_jobs.py
|
ebloc/ebloc-broker
|
776a8d9d4642ed1ba4726c94da68d61bd81c098b
|
[
"MIT"
] | 1
|
2021-09-18T11:38:07.000Z
|
2021-09-18T11:38:07.000Z
|
#!/usr/bin/env python3
import os.path
import random
from pathlib import Path
from random import randint
from pymongo import MongoClient
from web3.logs import DISCARD
from broker import cfg
from broker._utils import _log
from broker._utils._log import console_ruler
from broker._utils.tools import _time, _timestamp, countdown, is_process_on, log, run
from broker._utils.web3_tools import get_tx_status
from broker._utils.yaml import Yaml
from broker.libs.mongodb import BaseMongoClass
from broker.submit_base import SubmitBase
from broker.test_setup._users import users
from broker.utils import print_tb
yaml_files = ["job_nas.yaml"]
Ebb = cfg.Ebb
cfg.IS_FULL_TEST = True
_log.ll.LOG_FILENAME = Path.home() / ".ebloc-broker" / "test.log"
provider_addresses = [
"0x3e6FfC5EdE9ee6d782303B2dc5f13AFeEE277AeA",
"0x765508fc8f78a465f518ae79897d0e4b249e82dc",
"0x38cc03c7e2a7d2acce50045141633ecdcf477e9a",
"0xeab50158e8e51de21616307a99c9604c1c453a02",
]
def create_cppr_job_script():
"""Create cppr slurm job script to be submitted."""
registered_data_hashes_small = [
"b6aaf03752dc68d625fc57b451faa2bf",
"f1de03edab51f281815c3c1e5ecb88c6",
"082d2a71d86a64250f06be14c55ca27e",
"03919732a417cb1d14049844b9de0f47",
"983b9fe8a85b543dd5a4a75d031f1091",
"f71df9d36cd519d80a3302114779741d",
"c0fee5472f3c956ba759fd54f1fe843e",
"63ffd1da6122e3fe9f63b1e7fcac1ff5",
"9e8918ff9903e3314451bf2943296d31",
"eaf488aea87a13a0bea5b83a41f3d49a",
"e62593609805db0cd3a028194afb43b1",
"3b0f75445e662dc87e28d60a5b13cd43",
"ebe53bd498a9f6446cd77d9252a9847c",
"f82aa511f8631bfc9a82fe6fa30f4b52",
"761691119cedfb9836a78a08742b14cc",
"f93b9a9f63447e0e086322b8416d4a39",
]
registered_data_hashes_medium = [
"050e6cc8dd7e889bf7874689f1e1ead6",
"9d5d892a63b5758090258300a59eb389",
"779745f315060d1bc0cd44b7266fb4da",
"fe801973c5b22ef6861f2ea79dc1eb9c",
"0d6c3288ef71d89fb93734972d4eb903",
"4613abc322e8f2fdeae9a5dd10f17540",
"dd0fbccccf7a198681ab838c67b68fbf",
"45281dfec4618e5d20570812dea38760",
"fa64e96bcee96dbc480a1495bddbf53c",
"8f6faf6cfd245cae1b5feb11ae9eb3cf",
"1bfca57fe54bc46ba948023f754521d6",
]
hash_small_data = random.choice(registered_data_hashes_small)
hash_med_data = random.choice(registered_data_hashes_medium)
fn = Path.home() / "test_eblocbroker" / "run_cppr" / "run.sh"
f = open(fn, "w+")
f.write("#!/bin/bash\n")
f.write("#SBATCH -o slurm.out # STDOUT\n")
f.write("#SBATCH -e slurm.err # STDERR\n")
f.write("#SBATCH --mail-type=ALL\n\n")
f.write("export OMP_NUM_THREADS=1\n")
f.write("current_date=$(LANG=en_us_88591; date)\n")
f.write(f"DATA_HASH='{hash_small_data}'\n")
f.write("DATA1_DIR='../data_link/'$DATA_HASH'/'\n")
f.write("echo ' * '$current_date > output.log\n")
f.write("find $DATA1_DIR -name '*.max' -print0 | while read -d $'\\0' file\n")
f.write("do\n")
f.write(" echo $file >> output.log\n")
f.write(" (/usr/bin/time -v cppr -a pr $file) >> output.log 2>&1\n")
f.write("done\n")
f.write(f"DATA_HASH='{hash_med_data}'\n")
f.write("DATA2_DIR='../data_link/'$DATA_HASH'/'\n")
f.write("echo ' * '$current_date >> output.log\n")
f.write("find $DATA2_DIR -name '*.max' -print0 | while read -d $'\\0' file\n")
f.write("do\n")
f.write(" echo $file >> output.log\n")
f.write(" (/usr/bin/time -v cppr -a pr $file) >> output.log 2>&1\n")
f.write("done\n")
#
f.write("DATA_HASH='change_folder_hash'\n")
f.write("if [[ '$DATA_HASH' != 'change_folder_hash' ]]; then\n")
f.write(" DATA3_DIR='../data_link/'$DATA_HASH'/'\n")
f.write(" echo ' * '$current_date >> output.log\n")
f.write(" find $DATA3_DIR -name '*.max' -print0 | while read -d $'\\0' file\n")
f.write(" do\n")
f.write(" echo $file >> output.log\n")
f.write(" (/usr/bin/time -v cppr -a pr $file) >> output.log 2>&1\n")
f.write(" done\n")
f.write("fi\n")
f.write("echo ' [ DONE ] ' >> output.log\n")
f.close()
run(["sed", "-i", r"s/\x0//g", fn]) # remove NULL characters from the SBATCH file
return hash_small_data, hash_med_data
def create_nas_job_script(is_small=False):
"""Create NPB3.3-SER slurm job script to be submitted."""
benchmark_names = ["bt", "cg", "ep", "is", "lu", "sp", "ua"]
benchmark_name = random.choice(benchmark_names)
output_fn = "output.log"
hash_str = random.getrandbits(128)
fn = Path.home() / "test_eblocbroker" / "NPB3.3-SER_source_code" / "run.sh"
f = open(fn, "w+")
f.write("#!/bin/bash\n")
f.write("#SBATCH -o slurm.out # STDOUT\n")
f.write("#SBATCH -e slurm.err # STDERR\n")
f.write("#SBATCH --mail-type=ALL\n\n")
f.write(f"make {benchmark_name} CLASS=A > {output_fn}\n")
f.write(f"/usr/bin/time -v bin/{benchmark_name}.A.x >> {output_fn}\n")
if not is_small:
f.write(f"make {benchmark_name} CLASS=B >> {output_fn}\n")
f.write(f"/usr/bin/time -v bin/{benchmark_name}.B.x >> {output_fn}\n")
f.write(f"make {benchmark_name} CLASS=C >> {output_fn}\n")
f.write(f"/usr/bin/time -v bin/{benchmark_name}.C.x >> {output_fn}\n")
f.write(f"# {hash_str}\n")
f.close()
run(["sed", "-i", r"s/\x0//g", fn]) # remove NULL characters from the SBATCH file
return benchmark_name
def pre_submit(storage_ids, provider_address):
is_pass = True
required_confs = 0
yaml_fn = Path.home() / "ebloc-broker" / "broker" / "test_setup" / "nas" / "job_nas.yaml"
yaml_cfg = Yaml(yaml_fn)
yaml_cfg["config"]["provider_address"] = provider_address
for storage_id in storage_ids:
yaml_cfg["config"]["source_code"]["storage_id"] = storage_id
benchmark_name = create_nas_job_script(is_small=True)
submit_base = SubmitBase(yaml_cfg.path)
tx_hash = submit_base.submit(is_pass, required_confs)
if required_confs >= 1:
tx_receipt = get_tx_status(tx_hash, is_silent=True)
if tx_receipt["status"] == 1:
processed_logs = Ebb._eBlocBroker.events.LogJob().processReceipt(tx_receipt, errors=DISCARD)
try:
if processed_logs:
job_result = vars(processed_logs[0].args)
job_result["tx_hash"] = tx_hash
job_result["submitted_job_kind"] = f"nas_{benchmark_name}"
log(job_result)
except IndexError:
log(f"E: Tx({tx_hash}) is reverted")
# breakpoint() # DEBUG
def main():
console_ruler(f"NEW_TEST {Ebb.get_block_number()}")
if not is_process_on("mongod", "mongod"):
raise Exception("mongodb is not running in the background")
mc = MongoClient()
ebb_mongo = BaseMongoClass(mc, mc["ebloc_broker"]["tests"])
storage_ids = ["eudat", "gdrive", "ipfs"]
ipfs_ids = ["ipfs_gpg", "ipfs"]
# for provider_address in provider_addresses:
# pre_submit(storage_ids, provider_address)
benchmarks = ["nas", "cppr"]
test_dir = Path.home() / "ebloc-broker" / "broker" / "test_setup" / "nas"
nas_yaml_fn = test_dir / "job_nas.yaml"
cppr_yam_fn = test_dir / "job_cppr.yaml"
counter = 0
yaml_cfg = None
# storage = None
for _ in range(60):
for _ in range(2): # submitted as batch is faster
for idx, provider_address in enumerate(provider_addresses):
# yaml_cfg["config"]["data"]["data3"]["storage_id"] = random.choice(storage_ids)
storage_id = (idx + counter) % len(storage_ids)
selected_benchmark = random.choice(benchmarks)
storage = storage_ids[storage_id]
if storage == "ipfs":
storage = random.choice(ipfs_ids)
if selected_benchmark == "nas":
log(f" * Submitting job from NAS Benchmark to [green]{provider_address}", "bold blue")
yaml_cfg = Yaml(nas_yaml_fn)
benchmark_name = create_nas_job_script()
elif selected_benchmark == "cppr":
log(f" * Submitting job with cppr datasets to [green]{provider_address}", "bold blue")
yaml_cfg = Yaml(cppr_yam_fn)
hash_small_data, hash_med_data = create_cppr_job_script()
yaml_cfg["config"]["data"]["data1"]["hash"] = hash_small_data
yaml_cfg["config"]["data"]["data2"]["hash"] = hash_med_data
yaml_cfg["config"]["data"]["data3"]["storage_id"] = storage
small_datasets = Path.home() / "test_eblocbroker" / "dataset_zip" / "small"
dirs = [d for d in os.listdir(small_datasets) if os.path.isdir(os.path.join(small_datasets, d))]
dir_name = random.choice(dirs)
yaml_cfg["config"]["data"]["data3"]["path"] = str(small_datasets / dir_name)
yaml_cfg["config"]["source_code"]["storage_id"] = storage
yaml_cfg["config"]["provider_address"] = provider_address
try:
submit_base = SubmitBase(yaml_cfg.path)
submission_date = _time()
submission_timestamp = _timestamp()
requester_address = random.choice(users).lower()
yaml_cfg["config"]["requester_address"] = requester_address
log(f"requester={requester_address}", "bold")
tx_hash = submit_base.submit(is_pass=True)
log(f"tx_hash={tx_hash}", "bold")
tx_receipt = get_tx_status(tx_hash, is_silent=True)
if tx_receipt["status"] == 1:
processed_logs = Ebb._eBlocBroker.events.LogJob().processReceipt(tx_receipt, errors=DISCARD)
job_result = vars(processed_logs[0].args)
job_result["submit_date"] = submission_date
job_result["submit_timestamp"] = submission_timestamp
job_result["tx_hash"] = tx_hash
if selected_benchmark == "nas":
job_result["submitted_job_kind"] = f"{selected_benchmark}_{benchmark_name}"
elif selected_benchmark == "cppr":
job_result["submitted_job_kind"] = f"{selected_benchmark}_{hash_small_data}_{hash_med_data}"
ebb_mongo.add_item(tx_hash, job_result)
log(job_result)
countdown(seconds=5, is_silent=True)
except Exception as e:
print_tb(e)
counter += 1
sleep_time = randint(200, 400)
countdown(sleep_time)
if __name__ == "__main__":
main()
| 44.273092
| 120
| 0.61484
| 1,314
| 11,024
| 4.916286
| 0.210046
| 0.040867
| 0.044427
| 0.009907
| 0.412848
| 0.390402
| 0.315944
| 0.270743
| 0.225697
| 0.200619
| 0
| 0.083586
| 0.251179
| 11,024
| 248
| 121
| 44.451613
| 0.69897
| 0.040004
| 0
| 0.235849
| 0
| 0.028302
| 0.349082
| 0.148078
| 0
| 0
| 0.015906
| 0
| 0
| 1
| 0.018868
| false
| 0.014151
| 0.075472
| 0
| 0.103774
| 0.023585
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc4031a6954bcf042bbc85f87eefa7a1869a0a28
| 8,441
|
py
|
Python
|
src/models/basic.py
|
ewanlee/mackrl
|
6dd505aa09830f16c35a022f67e255db935c807e
|
[
"Apache-2.0"
] | 26
|
2019-10-28T09:01:45.000Z
|
2021-09-20T08:56:12.000Z
|
src/models/basic.py
|
ewanlee/mackrl
|
6dd505aa09830f16c35a022f67e255db935c807e
|
[
"Apache-2.0"
] | 1
|
2020-07-25T06:50:05.000Z
|
2020-07-25T06:50:05.000Z
|
src/models/basic.py
|
ewanlee/mackrl
|
6dd505aa09830f16c35a022f67e255db935c807e
|
[
"Apache-2.0"
] | 6
|
2019-12-18T12:02:57.000Z
|
2021-03-03T13:15:47.000Z
|
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch as th
from components.transforms import _to_batch, _from_batch, _check_inputs_validity, _tdim, _vdim
class DQN(nn.Module):
def __init__(self, input_shapes, n_actions, output_type=None, output_shapes=None, layer_args=None, args=None):
super(DQN, self).__init__()
self.args = args
self.n_actions = n_actions
assert output_type is not None, "you have to set an output_type!"
self.output_type = output_type
# Set up input regions automatically if required (if sensible)
self.input_shapes = {}
self.input_shapes.update(input_shapes)
# Set up output_shapes automatically if required
self.output_shapes = {}
self.output_shapes["fc2"] = self.n_actions # output
if output_shapes is not None:
self.output_shapes.update(output_shapes)
# Set up layer_args automatically if required
self.layer_args = {}
self.layer_args["fc1"] = {"in": self.input_shapes["main"], "out":64}
self.layer_args["fc2"] = {"in": self.layer_args["fc1"]["out"], "out": self.output_shapes["fc2"]}
if layer_args is not None:
self.layer_args.update(layer_args)
# Set up network layers
self.fc1 = nn.Linear(self.layer_args["fc1"]["in"], self.layer_args["fc1"]["out"])
self.fc2 = nn.Linear(self.layer_args["fc2"]["in"], self.layer_args["fc2"]["out"])
def init_hidden(self, batch_size, *args, **kwargs):
"""
model has no hidden state, but we will pretend otherwise for consistency
"""
vbl = Variable(th.zeros(batch_size, 1, 1))
tformat = "bs*t*v"
return vbl.cuda() if self.args.use_cuda else vbl, tformat
def forward(self, inputs, tformat, loss_fn=None, hidden_states=None, **kwargs):
_check_inputs_validity(inputs, self.input_shapes, tformat)
# Execute model branch "main"
x, params, tformat = _to_batch(inputs["main"], tformat)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = _from_batch(x, params, tformat)
losses = None
if self.output_type in ["policies"]:
log_softmax = kwargs.get("log_softmax", False)
if log_softmax:
x = F.log_softmax(x, dim=_vdim(tformat))
else:
x = F.softmax(x, dim=_vdim(tformat))
if loss_fn is not None:
losses, _ = loss_fn(x, tformat=tformat)
return x, hidden_states, losses, tformat # output, hidden state, losses
class MLPEncoder(nn.Module):
def __init__(self, input_shapes, output_shapes={}, layer_args={}, args=None):
super(MLPEncoder, self).__init__()
self.args = args
# Set up input regions automatically if required (if sensible)
self.input_shapes = {}
assert set(input_shapes.keys()) == {"main"}, \
"set of input_shapes does not coincide with model structure!"
self.input_shapes.update(input_shapes)
# Set up layer_args automatically if required
self.output_shapes = {}
self.output_shapes["fc1"] = 64 # output
self.output_shapes.update(output_shapes)
# Set up layer_args automatically if required
self.layer_args = {}
self.layer_args["fc1"] = {"in":input_shapes["main"], "out":output_shapes["main"]}
self.layer_args.update(layer_args)
#Set up network layers
self.fc1 = nn.Linear(self.input_shapes["main"], self.output_shapes["main"])
pass
def forward(self, inputs, tformat):
x, n_seq, tformat = _to_batch(inputs["main"], tformat)
x = F.relu(self.fc1(x))
return _from_batch(x, n_seq, tformat), tformat
class RNN(nn.Module):
def __init__(self, input_shapes, n_actions, output_type=None, output_shapes={}, layer_args={}, args=None, **kwargs):
super(RNN, self).__init__()
self.args = args
self.n_actions = n_actions
assert output_type is not None, "you have to set an output_type!"
# self.output_type=output_type
# Set up input regions automatically if required (if sensible)
self.input_shapes = {}
self.input_shapes.update(input_shapes)
# Set up layer_args automatically if required
self.output_shapes = {}
self.output_shapes["output"] = self.n_actions # output
if self.output_shapes is not None:
self.output_shapes.update(output_shapes)
# Set up layer_args automatically if required
self.layer_args = {}
self.layer_args["encoder"] = {"in":self.input_shapes["main"], "out":64}
self.layer_args["gru"] = {"in":self.layer_args["encoder"]["out"], "hidden":64}
self.layer_args["output"] = {"in":self.layer_args["gru"]["hidden"], "out":self.output_shapes["output"]}
self.layer_args.update(layer_args)
# Set up network layers
self.encoder = MLPEncoder(input_shapes=dict(main=self.layer_args["encoder"]["in"]),
output_shapes=dict(main=self.layer_args["encoder"]["out"]))
self.gru = nn.GRUCell(self.layer_args["gru"]["in"], self.layer_args["gru"]["hidden"])
self.output = nn.Linear(self.layer_args["output"]["in"], self.layer_args["output"]["out"])
def init_hidden(self, batch_size=1):
vbl = Variable(th.zeros(batch_size, 1, self.layer_args["gru"]["hidden"]))
tformat = "bs*t*v"
return vbl.cuda() if self.args.use_cuda else vbl, tformat
def forward(self, inputs, hidden_states, tformat, loss_fn=None, **kwargs):
"""
If data contains whole sequences, can pass loss_fn to forward pass in order to generate all losses
automatically.
Can either be operated in sequence mode, or operated step-by-step
"""
_check_inputs_validity(inputs, self.input_shapes, tformat)
_inputs = inputs["main"]
loss = None
t_dim = _tdim(tformat)
assert t_dim == 2, "t_dim along unsupported axis"
t_len = _inputs.shape[t_dim]
loss_x = []
output_x = []
h_list = [hidden_states]
for t in range(t_len):
x = _inputs[:, :, slice(t, t + 1), :].contiguous()
x, tformat = self.encoder({"main":x}, tformat)
x, params_x, tformat_x = _to_batch(x, tformat)
h, params_h, tformat_h = _to_batch(h_list[-1], tformat)
h = self.gru(x, h)
x = self.output(h)
h = _from_batch(h, params_h, tformat_h)
x = _from_batch(x, params_x, tformat_x)
h_list.append(h)
loss_x.append(x)
# we will not branch the variables if loss_fn is set - instead return only tensor values for x in that case
output_x.append(x) if loss_fn is None else output_x.append(x.clone())
if loss_fn is not None:
_x = th.cat(loss_x, dim=_tdim(tformat))
loss = loss_fn(_x, tformat=tformat)[0]
return th.cat(output_x, t_dim), \
th.cat(h_list[1:], t_dim), \
loss, \
tformat
class FCEncoder(nn.Module):
def __init__(self, input_shapes, output_shapes=None, layer_args=None, args=None):
super(FCEncoder, self).__init__()
self.args = args
# Set up input regions automatically if required (if sensible)
self.input_shapes = {}
assert set(input_shapes.keys()) == {"main"}, \
"set of input_shapes does not coincide with model structure!"
self.input_shapes.update(input_shapes)
# Set up layer_args automatically if required
self.output_shapes = {}
self.output_shapes["fc1"] = 64
if output_shapes is not None:
self.output_shapes.update(output_shapes)
# Set up layer_args automatically if required
self.layer_args = {}
self.layer_args["fc1"] = {"in":input_shapes["main"], "out":output_shapes["main"]}
if layer_args is not None:
self.layer_args.update(layer_args)
#Set up network layers
self.fc1 = nn.Linear(self.input_shapes["main"], self.output_shapes["main"])
pass
def forward(self, inputs, tformat):
x, n_seq, tformat = _to_batch(inputs["main"], tformat)
x = F.relu(self.fc1(x))
return _from_batch(x, n_seq, tformat), tformat
| 39.260465
| 120
| 0.621016
| 1,144
| 8,441
| 4.356643
| 0.133741
| 0.083066
| 0.075642
| 0.043339
| 0.727929
| 0.66573
| 0.64065
| 0.604133
| 0.551966
| 0.512039
| 0
| 0.006382
| 0.257434
| 8,441
| 214
| 121
| 39.443925
| 0.788768
| 0.136358
| 0
| 0.460993
| 0
| 0
| 0.069813
| 0
| 0
| 0
| 0
| 0
| 0.035461
| 1
| 0.070922
| false
| 0.014184
| 0.035461
| 0
| 0.177305
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc40fcfa271ed898e5b820a6656b997eb787c958
| 401
|
py
|
Python
|
02 - Estruturas de controle/ex041.py
|
epedropaulo/MyPython
|
cdb3602a01aedac26047f5b11a36a2262d4cc2ea
|
[
"MIT"
] | null | null | null |
02 - Estruturas de controle/ex041.py
|
epedropaulo/MyPython
|
cdb3602a01aedac26047f5b11a36a2262d4cc2ea
|
[
"MIT"
] | null | null | null |
02 - Estruturas de controle/ex041.py
|
epedropaulo/MyPython
|
cdb3602a01aedac26047f5b11a36a2262d4cc2ea
|
[
"MIT"
] | null | null | null |
from datetime import date
nascimento = int(input('Digite o ano de nascimento: '))
idade = date.today().year - nascimento
print(f'Ele tem {idade} anos.')
if idade <= 9:
lugar = 'mirim'
elif idade <= 14:
lugar = 'infantil'
elif idade <= 19:
lugar = 'junior'
elif idade <= 25:
lugar = 'sênior'
else:
lugar = 'master'
print(f'Logo ele pertence a classe dos {lugar}.')
| 23.588235
| 56
| 0.618454
| 55
| 401
| 4.509091
| 0.672727
| 0.108871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023026
| 0.241895
| 401
| 16
| 57
| 25.0625
| 0.792763
| 0
| 0
| 0
| 0
| 0
| 0.309091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc45ba5985dd0e3782dd94c105224f3ada5ebf10
| 1,159
|
py
|
Python
|
scripts/collect_infer.py
|
TsaiTung-Chen/wtbd
|
60fb153eec88e83c04ad4eac3c4a94188a28eeac
|
[
"MIT"
] | null | null | null |
scripts/collect_infer.py
|
TsaiTung-Chen/wtbd
|
60fb153eec88e83c04ad4eac3c4a94188a28eeac
|
[
"MIT"
] | null | null | null |
scripts/collect_infer.py
|
TsaiTung-Chen/wtbd
|
60fb153eec88e83c04ad4eac3c4a94188a28eeac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 27 18:11:58 2021
@author: TSAI, TUNG-CHEN
@update: 2021/10/05
"""
MODEL_NAME = 'PhysicalCNN'
DIRECTORY = r"../dataset/preprocessed/data/"
WALK = True
SUBSET = 'all'
from wtbd.infer import infer
from wtbd.utils import print_info
from wtbd.data_collectors import SubsetDataCollector
# =============================================================================
#
# =============================================================================
def collect_infer(modelname, directory, walk=True, subset='all'):
data_collector = SubsetDataCollector()
data = data_collector(directory, subset=subset)
print_info(data['info'])
results = infer(modelname, data)
return data, results
# =============================================================================
#
# =============================================================================
if __name__ == '__main__':
data, results = collect_infer(MODEL_NAME,
DIRECTORY,
walk=WALK,
subset=SUBSET)
| 26.953488
| 79
| 0.449525
| 96
| 1,159
| 5.25
| 0.541667
| 0.047619
| 0.055556
| 0.06746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023732
| 0.200173
| 1,159
| 42
| 80
| 27.595238
| 0.519957
| 0.378775
| 0
| 0
| 0
| 0
| 0.082504
| 0.041252
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.277778
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc48fd52654d6c5c9dcc17149850ed8b8aa26647
| 2,262
|
py
|
Python
|
coll_plw.py
|
flychensc/apple
|
1b85d05e589b3c53b0ac4df29f53d4793c1a3686
|
[
"Apache-2.0"
] | 2
|
2018-12-18T13:00:22.000Z
|
2020-01-08T07:40:52.000Z
|
coll_plw.py
|
flychensc/apple
|
1b85d05e589b3c53b0ac4df29f53d4793c1a3686
|
[
"Apache-2.0"
] | null | null | null |
coll_plw.py
|
flychensc/apple
|
1b85d05e589b3c53b0ac4df29f53d4793c1a3686
|
[
"Apache-2.0"
] | 1
|
2019-04-23T00:16:37.000Z
|
2019-04-23T00:16:37.000Z
|
"""
排列五的概率输出
"""
from lottery import get_history
import pandas as pd
import dash
import dash_html_components as html
import dash_core_components as dcc
CODE = '排列五'
MIN = 1
COUNT = 200
def _calc_prob(num, datas):
"""
计算num在datas里的概率
e.g.:
num = 1
data = [[2,3],[1,7],[3,6]]
retunr 1/3
"""
count = datas.count(num)
# count/len(datas)
return round(count/len(datas)*100)
def calc_loc(historys, loc):
"""
{
0: [0.16, 0.16, 0.15, ...],
1: [0.16, 0.16, 0.22, ...],
...
9: [0.16, 0.16, 0.02, ...],
}
"""
history_numbers = [history['result'][loc-1] for history in historys]
result = dict()
for num in range(0,10): #0-9
# result.setdefault(num, [])
prob_list = list()
size = len(history_numbers)
while size >= MIN:
prob_list.append(_calc_prob(num, history_numbers[:size]))
size -= 1
result[num] = prob_list
return result
def gen_xls(historys):
with pd.ExcelWriter('排列五.xlsx') as writer:
for loc in range(1,5+1):
cols1 = ["近%d期" % i for i in range(len(historys), MIN-1, -1)]
data1 = calc_loc(historys, loc)
df1 = pd.DataFrame.from_dict(data1, orient='index', columns=cols1)
df1.to_excel(writer, sheet_name=f"第{loc}位")
def gen_html(historys):
children = [
html.H1(children='排列五分析'),
html.Div(children='数学期望值趋势'),
]
for loc in range(1,5+1):
cols = ["近%d期" % i for i in range(len(historys), MIN-1, -1)]
datas = []
for k,v in calc_loc(historys, loc).items():
data={'type':'line', 'name':k}
data['x'] = cols
data['y'] = v
datas.append(data)
children.append(dcc.Graph(
id=f'{loc}-exp-val-graph',
figure={
'data':datas,
'layout':{
'title':f'第{loc}位趋势'
}
}
))
app = dash.Dash()
app.layout = html.Div(children=children)
app.run_server(debug=True)
if __name__ == '__main__':
historys = get_history(CODE, COUNT)
#gen_xls(historys)
gen_html(historys)
| 23.5625
| 78
| 0.516799
| 299
| 2,262
| 3.792642
| 0.367893
| 0.015873
| 0.021164
| 0.047619
| 0.101411
| 0.082892
| 0.082892
| 0.054674
| 0.054674
| 0.054674
| 0
| 0.046113
| 0.328912
| 2,262
| 95
| 79
| 23.810526
| 0.700922
| 0.115385
| 0
| 0.035088
| 0
| 0
| 0.059252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.087719
| 0
| 0.192982
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc490adc1922a89384ac9296797453d77a47df06
| 1,947
|
py
|
Python
|
shipper/build.py
|
obukhov-sergey/shipper
|
574b30b2b76087d6ebb8ccf9824e175bd1c9d123
|
[
"Apache-2.0"
] | 1
|
2021-11-27T14:40:26.000Z
|
2021-11-27T14:40:26.000Z
|
shipper/build.py
|
obukhov-sergey/shipper
|
574b30b2b76087d6ebb8ccf9824e175bd1c9d123
|
[
"Apache-2.0"
] | null | null | null |
shipper/build.py
|
obukhov-sergey/shipper
|
574b30b2b76087d6ebb8ccf9824e175bd1c9d123
|
[
"Apache-2.0"
] | 1
|
2021-11-27T14:40:27.000Z
|
2021-11-27T14:40:27.000Z
|
"""
Copyright [2013] [Rackspace]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path
import tarfile
from StringIO import StringIO
def parse_build(path=None, fobj=None):
"""Parses build parameters. Returns tuple
(archive, remote)
Where archive is a tar archive and remote is remote url if set.
One of the tuple elements will be null
"""
if path:
for prefix in ('http://', 'https://', 'github.com/', 'git://'):
if path.startswith(prefix):
return None, path
if path.startswith("~"):
path = os.path.expanduser(path)
return _archive_from_folder(path), None
else:
if not fobj:
raise ValueError("Set path or fobj")
return _archive_from_file(fobj), None
def _archive_from_folder(path):
memfile = StringIO()
try:
t = tarfile.open(mode='w', fileobj=memfile)
t.add(path, arcname='.')
return memfile.getvalue()
finally:
memfile.close()
def _archive_from_file(dockerfile):
memfile = StringIO()
try:
t = tarfile.open(mode='w', fileobj=memfile)
if isinstance(dockerfile, StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = dockerfile.len
else:
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
t.addfile(dfinfo, dockerfile)
return memfile.getvalue()
finally:
memfile.close()
| 29.5
| 75
| 0.657422
| 250
| 1,947
| 5.068
| 0.484
| 0.047356
| 0.020521
| 0.025257
| 0.140489
| 0.140489
| 0.077348
| 0.077348
| 0.077348
| 0.077348
| 0
| 0.00545
| 0.24602
| 1,947
| 65
| 76
| 29.953846
| 0.857629
| 0.366718
| 0
| 0.388889
| 0
| 0
| 0.05995
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc4ce47e67db9699366e5b5fb9b6dab99a776e52
| 581
|
py
|
Python
|
platformio_script.py
|
aircarto/Nebulo
|
3134def3810e5c9579350a63299a33670583a722
|
[
"MIT"
] | 1
|
2021-12-27T01:07:26.000Z
|
2021-12-27T01:07:26.000Z
|
platformio_script.py
|
aircarto/Nebulo
|
3134def3810e5c9579350a63299a33670583a722
|
[
"MIT"
] | null | null | null |
platformio_script.py
|
aircarto/Nebulo
|
3134def3810e5c9579350a63299a33670583a722
|
[
"MIT"
] | null | null | null |
Import("env")
import hashlib
import os
import shutil
def _file_md5_hexdigest(fname):
return hashlib.md5(open(fname, 'rb').read()).hexdigest()
def after_build(source, target, env):
if not os.path.exists("builds"):
os.mkdir("builds")
lang = env.GetProjectOption('lang')
target_name = lang.lower()
with open(f"builds/latest_{target_name}.bin.md5", "w") as md5:
print(_file_md5_hexdigest(target[0].path), file = md5)
shutil.copy(target[0].path, f"builds/latest_{target_name}.bin")
env.AddPostAction("$BUILD_DIR/firmware.bin", after_build)
| 26.409091
| 67
| 0.695353
| 84
| 581
| 4.642857
| 0.47619
| 0.053846
| 0.082051
| 0.097436
| 0.133333
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0.146299
| 581
| 21
| 68
| 27.666667
| 0.770161
| 0
| 0
| 0
| 0
| 0
| 0.19105
| 0.153184
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.266667
| 0.066667
| 0.466667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc4e03aa9fb9cf8d29ed71ffc7fbde9900f6b98a
| 8,184
|
py
|
Python
|
htmldiffer/utils.py
|
IYIagnus/htmldiffer
|
0778e8771905db57f57f32d84471d35cf9a0f39d
|
[
"MIT"
] | 9
|
2017-10-06T01:39:54.000Z
|
2021-08-21T12:59:27.000Z
|
htmldiffer/utils.py
|
IYIagnus/htmldiffer
|
0778e8771905db57f57f32d84471d35cf9a0f39d
|
[
"MIT"
] | 21
|
2017-09-08T04:03:12.000Z
|
2021-12-23T12:28:44.000Z
|
htmldiffer/utils.py
|
anastasia/htmldiff
|
2a4e770eca6f742216afd33a1a6e82bb25f5fea0
|
[
"MIT"
] | 10
|
2017-10-06T01:40:03.000Z
|
2021-12-23T12:30:11.000Z
|
import re
import os
from bs4 import BeautifulSoup
from . import settings
def html2list(html_string, level='word'):
"""
:param html_string: any ol' html string you've got
level: either 'word' or 'character'. If level='word', elements will be words.
If level='character', elements will be individial characters.
:return: list of elements, making sure not to break up open tags (even if they contain attributes)
Note that any blacklisted tag will not be broken up
Example:
html_str = "<h1>This is a simple header</h1>"
result = html2list(html_str)
result == ['<h1>', 'This ', 'is ', 'a ', 'simple ', 'header', '</h1>']
Blacklisted tag example:
BLACKLISTED_TAGS = ['head']
html_str = "<head><title>Page Title</title></head>"
result = html2list(html_str)
result == ['<head><title>Page Title</title></head>']
"""
# different modes for parsing
CHAR, TAG = 'char', 'tag'
mode = CHAR
cur = ''
out = []
# TODO: use generators
# iterate through the string, character by character
for c in html_string:
# tags must be checked first to close tags
if mode == TAG:
# add character to current element
cur += c
# if we see the end of the tag
if c == '>':
out.append(cur) # add the current element to the output
cur = '' # reset the character
mode = CHAR # set the mode back to character mode
elif mode == CHAR:
# when we are in CHAR mode and see an opening tag, we must switch
if c == '<':
# clear out string collected so far
if cur != "":
out.append(cur) # if we have already started a new element, store it
cur = c # being our tag
mode = TAG # swap to tag mode
# if c is a special character, store 'word', store c, continue
elif is_special_character(c):
out.append(cur)
out.append(c)
cur = ''
# otherwise, simply continue building up the current element
else:
if level == 'word':
cur += c
elif level == 'character':
out.append(c)
else:
raise ValueError('level must be "word" or "character"')
# TODO: move this to its own function `merge_blacklisted` or `merge_tags` return to a generator instead of list
cleaned = list()
blacklisted_tag = None
blacklisted_string = ""
for x in out:
if not blacklisted_tag:
for tag in settings.BLACKLISTED_TAGS:
if verified_blacklisted_tag(x, tag):
blacklisted_tag = tag
blacklisted_string += x
break
if not blacklisted_tag:
cleaned.append(x)
else:
if x == "</{0}>".format(blacklisted_tag):
blacklisted_string += x
cleaned.append(blacklisted_string)
blacklisted_tag = None
blacklisted_string = ""
else:
blacklisted_string += x
return cleaned
def check_html(html, encoding=None):
if isinstance(html, BeautifulSoup):
html = html.prettify()
elif os.path.isfile(html):
with open(html, "r", encoding=encoding) as file:
html = file.read()
else:
html = html
return html
def verified_blacklisted_tag(x, tag):
"""
check for '<' + blacklisted_tag + ' ' or '>'
as in: <head> or <head ...> (should not match <header if checking for <head)
"""
initial = x[0:len(tag) + 1 + 1]
blacklisted_head = "<{0}".format(tag)
return initial == (blacklisted_head + " ") or initial == (blacklisted_head + ">")
def add_stylesheet(html_list):
stylesheet_tag = '<link rel="stylesheet" type="text/css" href="{}">'.format(settings.STYLESHEET)
for idx, el in enumerate(html_list):
if "</head>" in el:
# add at the very end of head tag cause we is important
head = el.split("</head>")
new_head = head[0] + stylesheet_tag + "</head>" + "".join(head[1:])
html_list[idx] = new_head
return html_list
def extract_tagname(el):
if not is_tag(el):
raise Exception("Not a tag!")
tag_parts = el[el.index('<')+1:el.index('>')].replace("/", "")
return tag_parts.split(" ")[0]
def compare_tags(tag_a, tag_b):
"""
returns markers for deleted, inserted, and combined
"""
tag_parts_a = chart_tag(tag_a)
tag_parts_b = chart_tag(tag_b)
# first test whether we have any new attributes
deleted_attributes = set(tag_parts_a.keys()) - set(tag_parts_b.keys())
inserted_attributes = set(tag_parts_b.keys()) - set(tag_parts_a.keys())
# then look at every attribute set and check whether the values are the same
changed_attributes = list()
for attribute in set(tag_parts_a.keys()) & set(tag_parts_b.keys()):
if tag_parts_a[attribute] != tag_parts_b[attribute]:
changed_attributes.append(attribute)
return {
'deleted_attributes': list(deleted_attributes),
'inserted_attributes': list(inserted_attributes),
'changed_attributes': changed_attributes,
}
def chart_tag(tag_string):
"""
Takes tag and returns dict that charts out tag parts
example:
tag = '<div title="somewhere">'
parts = chart_tag(tag)
print(parts)
# {'tag': 'div', 'title': 'somewhere'}
"""
tag_parts = dict()
if tag_string[0] != "<" and tag_string[-1] != ">":
raise Exception("Got malformed tag", tag_string)
t = tag_string.split(" ")
for el in t:
if el[0] == "<":
# grab the tag type
tag_parts['tag'] = el[1:]
else:
check_element = el[:-1] if el[-1] == ">" else el
check_element = check_element.replace('"', '').replace('/', '')
if len(check_element.split("=")) > 1:
attribute, values = check_element.split("=")
tag_parts[attribute] = values
else:
# if unattached elements, these are probably extra values from
# the previous attribute, so we add them
tag_parts[attribute] += ' ' + check_element
if el[-1] == ">":
return tag_parts
def get_class_decorator(name, diff_type=''):
"""returns class like `htmldiffer-tag-change`"""
if diff_type:
return "%s_%s" % (settings.HTMLDIFFER_CLASS_STRINGS[name], diff_type)
else:
return "%s" % (settings.HTMLDIFFER_CLASS_STRINGS[name])
# ===============================
# Predicate functions
# ===============================
# Note: These make assumptions about consuming valid html text. Validations should happen before these internal
# predicate functions are used -- these are not currently used for parsing.
def is_blacklisted_tag(tag):
return tag in settings.BLACKLISTED_TAGS
def is_comment(text):
return "<!--" in text
def is_ignorable(text):
return is_comment(text) or is_closing_tag(text) or text.isspace()
def is_whitelisted_tag(tag):
# takes a tag and checks against WHITELISTED
return tag in settings.WHITELISTED_TAGS
def is_open_script_tag(x):
return "<script " in x
def is_closed_script_tag(x):
return "<\script" in x
def is_tag(x):
return len(x) > 0 and x[0] == "<" and x[-1] == ">"
def is_opening_tag(x):
return x[0] == "<" and x[1] != "/"
def is_closing_tag(x):
return x[0:2] == "</"
def is_self_closing_tag(x):
return len(x) > 0 and x[0] == "<" and x[-2:] == "/>"
def is_text(x):
return ("<" not in x) and (">" not in x)
def is_div(x):
return x[0:4] == "<div" and x[-6:] == "</div>"
def is_special_character(string):
char_re = re.compile(r'[^a-zA-Z0-9]')
string = char_re.search(string)
return bool(string)
| 30.651685
| 115
| 0.568793
| 1,031
| 8,184
| 4.376334
| 0.240543
| 0.031915
| 0.014628
| 0.006649
| 0.143617
| 0.079787
| 0.052305
| 0.038121
| 0.038121
| 0.024823
| 0
| 0.007047
| 0.306452
| 8,184
| 266
| 116
| 30.766917
| 0.787879
| 0.299731
| 0
| 0.2
| 0
| 0
| 0.054666
| 0
| 0
| 0
| 0
| 0.003759
| 0
| 1
| 0.144828
| false
| 0
| 0.027586
| 0.082759
| 0.324138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc53da809995bfcf906eeef477759efe194c27ed
| 1,157
|
py
|
Python
|
node/trust.py
|
cflynn07/OpenBazaar
|
439511e9c81a658cdad2014b02d3fdf13abab5c4
|
[
"MIT"
] | null | null | null |
node/trust.py
|
cflynn07/OpenBazaar
|
439511e9c81a658cdad2014b02d3fdf13abab5c4
|
[
"MIT"
] | null | null | null |
node/trust.py
|
cflynn07/OpenBazaar
|
439511e9c81a658cdad2014b02d3fdf13abab5c4
|
[
"MIT"
] | null | null | null |
import obelisk
import logging
import bitcoin
from twisted.internet import reactor
_log = logging.getLogger('trust')
TESTNET = False
def burnaddr_from_guid(guid_hex):
_log.debug("burnaddr_from_guid: %s", guid_hex)
if TESTNET:
guid_hex = '6f' + guid_hex
else:
guid_hex = '00' + guid_hex
_log.debug("GUID address on bitcoin net: %s", guid_hex)
guid = guid_hex.decode('hex')
_log.debug("Decoded GUID address on bitcoin net")
# perturbate GUID
# to ensure unspendability through
# near-collision resistance of SHA256
# by flipping the last non-checksum bit of the address
guid = guid[:-1] + chr(ord(guid[-1]) ^ 1)
_log.debug("Perturbated bitcoin proof-of-burn address")
return obelisk.bitcoin.EncodeBase58Check(guid)
def get_global(guid, callback):
get_unspent(burnaddr_from_guid(guid), callback)
def get_unspent(addr, callback):
_log.debug('get_unspent call')
def get_history():
history = bitcoin.history(addr)
total = 0
for tx in history:
total += tx['value']
callback(total)
reactor.callFromThread(get_history)
| 21.425926
| 59
| 0.673293
| 153
| 1,157
| 4.921569
| 0.437909
| 0.074369
| 0.063745
| 0.053121
| 0.061089
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013393
| 0.225583
| 1,157
| 53
| 60
| 21.830189
| 0.827009
| 0.11841
| 0
| 0
| 0
| 0
| 0.159606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.137931
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc58d71d7ae86ba704bcdd360c5f0a8dc49d8018
| 1,446
|
py
|
Python
|
yogit/yogit/logger.py
|
jmaneyrol69/yogit
|
347a6ee990e066eab36432dbae93ee05c681f329
|
[
"MIT"
] | 17
|
2019-07-15T08:01:08.000Z
|
2022-02-16T20:07:20.000Z
|
yogit/yogit/logger.py
|
jmaneyrol69/yogit
|
347a6ee990e066eab36432dbae93ee05c681f329
|
[
"MIT"
] | 5
|
2019-07-16T12:49:08.000Z
|
2020-10-20T15:27:41.000Z
|
yogit/yogit/logger.py
|
jmaneyrol69/yogit
|
347a6ee990e066eab36432dbae93ee05c681f329
|
[
"MIT"
] | 4
|
2019-07-16T12:38:46.000Z
|
2020-10-19T12:51:37.000Z
|
"""
Application logger
"""
import logging
import os
import sys
from yogit import get_name, get_version
from yogit.yogit.paths import get_log_path, SETTINGS_DIR
def get_logger(stdout=False, logger_name=get_name(), version=get_version()):
"""
Create and configure a logger using a given name.
"""
os.makedirs(SETTINGS_DIR, exist_ok=True)
application_str = logger_name
if version:
application_str += " " + version
formatter = logging.Formatter(
fmt=(
"%(asctime)s "
"[{application}:%(process)d] "
"[%(levelname)s] "
"%(message)s".format(application=application_str)
),
datefmt="%Y-%m-%dT%H:%M:%S%z",
)
file_log_handler = logging.FileHandler(get_log_path())
file_log_handler.setLevel(logging.DEBUG)
file_log_handler.setFormatter(formatter)
local_logger = logging.getLogger(logger_name)
local_logger.setLevel(logging.DEBUG)
local_logger.addHandler(file_log_handler)
if stdout:
console_log_handler = logging.StreamHandler(sys.stdout)
console_log_handler.setLevel(logging.DEBUG)
console_log_handler.setFormatter(formatter)
local_logger.addHandler(console_log_handler)
return local_logger
LOGGER = get_logger()
def enable_stdout():
"""
Prints logs in stdout
"""
global LOGGER # pylint: disable=global-statement
LOGGER = get_logger(stdout=True)
| 24.931034
| 76
| 0.677732
| 173
| 1,446
| 5.421965
| 0.381503
| 0.085288
| 0.059701
| 0.053305
| 0.153518
| 0.089552
| 0
| 0
| 0
| 0
| 0
| 0
| 0.215076
| 1,446
| 57
| 77
| 25.368421
| 0.826432
| 0.085754
| 0
| 0
| 0
| 0
| 0.067757
| 0.021028
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.142857
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc5982992e201503c11c0734dd8dc758ba63e2ee
| 488
|
py
|
Python
|
sane_logger/sane_logger.py
|
JesseAldridge/sane_logger
|
d8850a67f7ffe6cd3d8b25ef5a9d482a254870bc
|
[
"MIT"
] | 1
|
2021-06-06T15:37:13.000Z
|
2021-06-06T15:37:13.000Z
|
sane_logger/sane_logger.py
|
JesseAldridge/sane_logger
|
d8850a67f7ffe6cd3d8b25ef5a9d482a254870bc
|
[
"MIT"
] | null | null | null |
sane_logger/sane_logger.py
|
JesseAldridge/sane_logger
|
d8850a67f7ffe6cd3d8b25ef5a9d482a254870bc
|
[
"MIT"
] | null | null | null |
import logging, sys
def sane_logger(log_level=logging.INFO):
logger = logging.getLogger()
logger.setLevel(log_level)
sh = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)s [%(filename)s.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S %Z'
)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
if __name__ == '__main__':
logger = sane_logger(logging.DEBUG)
logger.info('test log')
| 24.4
| 85
| 0.684426
| 66
| 488
| 4.878788
| 0.575758
| 0.062112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145492
| 488
| 19
| 86
| 25.684211
| 0.772182
| 0
| 0
| 0
| 0
| 0.066667
| 0.241803
| 0.077869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc5a9edbb995731c204cc21e2a54a1a17f5290cd
| 7,104
|
py
|
Python
|
core_tools/drivers/hardware/virtual_gate_matrix.py
|
opietx/core_tools
|
d5bd2d4beed74791b80ff5bdabd67774403763ef
|
[
"BSD-2-Clause"
] | null | null | null |
core_tools/drivers/hardware/virtual_gate_matrix.py
|
opietx/core_tools
|
d5bd2d4beed74791b80ff5bdabd67774403763ef
|
[
"BSD-2-Clause"
] | null | null | null |
core_tools/drivers/hardware/virtual_gate_matrix.py
|
opietx/core_tools
|
d5bd2d4beed74791b80ff5bdabd67774403763ef
|
[
"BSD-2-Clause"
] | null | null | null |
from core_tools.data.SQL.SQL_connection_mgr import SQL_database_manager
from core_tools.drivers.hardware.hardware_SQL_backend import virtual_gate_queries
import time
import numpy as np
def lamda_do_nothing(matrix):
return matrix
class virtual_gate_matrix():
def __init__(self, name, gates, v_gates, data,
forward_conv_lamda = lamda_do_nothing, backward_conv_lamda = lamda_do_nothing):
self.name = name
self.gates = gates
self.v_gates = v_gates
self._matrix = data
self.forward_conv_lamda = forward_conv_lamda
self.backward_conv_lamda = backward_conv_lamda
self.last_update = time.time()
@property
def matrix(self):
return self.forward_conv_lamda(self._matrix)
@matrix.setter
def matrix(self, matrix):
if self._matrix.shape != matrix.shape:
raise ValueError('input shape of matrix does not match the one in the virtual gate matrix')
self._matrix[:,:] = self.backward_conv_lamda(matrix)
self.save()
@property
def inv(self):
l_inv_f = combine_lamdas(self.forward_conv_lamda, lamda_invert)
l_inv_b = combine_lamdas(self.backward_conv_lamda, lamda_invert)
return virtual_gate_matrix(self.name, self.gates, self.v_gates, self._matrix, l_inv_f, l_inv_b)
def reduce(self, gates, v_gates = None):
'''
reduce size of the virtual gate matrix
Args:
gates (list<str>) : name of the gates where to reduce to reduce the current matrix to.
v_gates (list<str>) : list with the names of the virtual gates (optional)
'''
v_gates = name_virtual_gates(v_gates, gates)
v_gate_matrix = np.eye(len(gates))
for i in range(len(gates)):
for j in range(len(gates)):
if gates[i] in self.gates:
v_gate_matrix[i, j] = self[v_gates[i],gates[j]]
return virtual_gate_matrix('dummy', gates, v_gates, v_gate_matrix)
def __getitem__(self, index):
if isinstance(index, tuple):
idx_1, idx_2 = index
idx_1 = self.__evaluate_index(idx_1, self.v_gates)
idx_2 = self.__evaluate_index(idx_2, self.gates)
return self.matrix[idx_1,idx_2]
else:
raise ValueError("wrong input foramt provided ['virtual_gate','gate'] expected).".format(v_gate))
def __setitem__(self, index, value):
self.last_update = time.time()
if isinstance(index, tuple):
idx_1, idx_2 = index
idx_1 = self.__evaluate_index(idx_1, self.v_gates)
idx_2 = self.__evaluate_index(idx_2, self.gates)
m = self.matrix
m[idx_1,idx_2] = value
self._matrix[:,:] = self.backward_conv_lamda(m)
self.save()
else:
raise ValueError("wrong input foramt provided ['virtual_gate','gate'] expected).".format(v_gate))
def __evaluate_index(self, idx, options):
if isinstance(idx, int) >= len(options):
raise ValueError("gate out of range ({}), size of virtual matrix {}x{}".format(idx, len(options), len(options)))
if isinstance(idx, str):
if idx not in options:
raise ValueError("{} gate does not exist in virtual gate matrix".format(idx))
else:
idx = options.index(idx)
return idx
def save(self):
if self.name != 'dummy':
save(self)
def __len__(self):
return len(self.gates)
def __repr__(self):
descr = "Virtual gate matrix named {}\nContents:\n".format(self.name)
content = "\nGates : {}\nVirtual gates : {}\nMatrix :\n".format(self.gates, self.v_gates, self.matrix)
for row in self.matrix:
content += "{}\n".format(row)
return descr + content
def lamda_invert(matrix):
return np.linalg.inv(matrix)
def lamda_norm(matrix_norm):
matrix_no_norm = np.empty(matrix_norm.shape)
for i in range(matrix_norm.shape[0]):
matrix_no_norm[i, :] = matrix_norm[i, :]/matrix_norm[i, i]
return matrix_no_norm
def lamda_unnorm(matrix_no_norm):
matrix_norm = np.empty(matrix_no_norm.shape)
for i in range(matrix_norm.shape[0]):
matrix_norm[i, :] = matrix_no_norm[i]/np.sum(matrix_no_norm[i, :])
return matrix_norm
def combine_lamdas(l1, l2):
def new_lamda(matrix):
return l1(l2(matrix))
return new_lamda
def load_virtual_gate(name, real_gates, virtual_gates=None):
conn = SQL_database_manager().conn_local
virtual_gate_queries.generate_table(conn)
virtual_gates = name_virtual_gates(virtual_gates, real_gates)
if virtual_gate_queries.check_var_in_table_exist(conn, name):
real_gate_db, virtual_gate_db, matrix_db = virtual_gate_queries.get_virtual_gate_matrix(conn, name)
entries_to_add = set(real_gates) - set(real_gate_db)
gates = real_gate_db + list(entries_to_add)
dummy_matrix = np.eye(len(gates))
dummy_matrix[:len(real_gate_db) , :len(real_gate_db)] = matrix_db
dummy_v_gates = virtual_gate_matrix('dummy', gates, name_virtual_gates(None, gates), dummy_matrix)
v_gate_matrix = np.eye(len(real_gates))
for i in range(len(real_gates)):
for j in range(len(real_gates)):
v_gate_matrix[i, j] = dummy_v_gates['v' + real_gates[i],real_gates[j]]
return virtual_gate_matrix(name, real_gates, virtual_gates, v_gate_matrix)
else:
return virtual_gate_matrix(name, real_gates, virtual_gates, np.eye(len(real_gates)))
def save(vg_matrix):
conn = SQL_database_manager().conn_local
if virtual_gate_queries.check_var_in_table_exist(conn, vg_matrix.name):
# merge in case there are more entries
real_gate_db, virtual_gate_db, matrix_db = virtual_gate_queries.get_virtual_gate_matrix(conn, vg_matrix.name)
all_gates = list(set(real_gate_db + vg_matrix.gates))
dummy_v_gates = virtual_gate_matrix('dummy', all_gates, name_virtual_gates(None, all_gates), np.eye(len(all_gates)))
for i in range(len(real_gate_db)):
for j in range(len(real_gate_db)):
dummy_v_gates['v' + real_gate_db[i], real_gate_db[j]] = matrix_db[i,j]
for i in range(len(vg_matrix.gates)):
for j in range(len(vg_matrix.gates)):
dummy_v_gates['v' + vg_matrix.gates[i], vg_matrix.gates[j]] = vg_matrix._matrix[i,j]
virtual_gate_queries.set_virtual_gate_matrix(conn, vg_matrix.name,
dummy_v_gates.gates, dummy_v_gates.v_gates, dummy_v_gates._matrix)
else:
virtual_gate_queries.set_virtual_gate_matrix(conn, vg_matrix.name,
vg_matrix.gates, vg_matrix.v_gates, vg_matrix._matrix)
def name_virtual_gates(v_gate_names, real_gates):
if v_gate_names is None:
v_gates = []
for i in real_gates:
v_gates += ['v' + i]
else:
v_gates = v_gate_names
return v_gates
| 36.244898
| 125
| 0.649352
| 1,019
| 7,104
| 4.19529
| 0.138371
| 0.072047
| 0.059649
| 0.015439
| 0.413099
| 0.34386
| 0.25076
| 0.208187
| 0.208187
| 0.185731
| 0
| 0.004123
| 0.248874
| 7,104
| 195
| 126
| 36.430769
| 0.797039
| 0.035473
| 0
| 0.20438
| 0
| 0
| 0.059671
| 0.006761
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145985
| false
| 0
| 0.029197
| 0.036496
| 0.29927
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc61601dbf922a6709fa7e843196b29dbf744599
| 54,648
|
py
|
Python
|
outlier-analyzer-wrapper/cal_outlier.py
|
vasu018/outlier-analyzers
|
a46102e90f7c87560efa2f33dff3b337561486b5
|
[
"Apache-2.0"
] | null | null | null |
outlier-analyzer-wrapper/cal_outlier.py
|
vasu018/outlier-analyzers
|
a46102e90f7c87560efa2f33dff3b337561486b5
|
[
"Apache-2.0"
] | null | null | null |
outlier-analyzer-wrapper/cal_outlier.py
|
vasu018/outlier-analyzers
|
a46102e90f7c87560efa2f33dff3b337561486b5
|
[
"Apache-2.0"
] | null | null | null |
import sys
import re
import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import MultiLabelBinarizer
from scipy.spatial.distance import cdist
from colorama import Fore, Style
from kneed import KneeLocator
import copy
import time
import pickle
import os
def error_msg(error_msg, arg):
"""
Helper function to display error message on the screen.
Input:
The error message along with its respective argument.
(Values include - filename, selected action).
Output:
The formatted error message on the screen along with the argument.
"""
print("****************************")
print(Fore.RED, end='')
print(error_msg,":", arg)
print(Style.RESET_ALL, end='')
print("****************************")
sys.exit(0)
def printINFO(info):
"""
Helper function to ask the user for Input.
Input:
The message that is to be displayed.
Output:
The formatted message on the screen.
"""
print(Fore.BLUE, end='')
print(info)
print(Style.RESET_ALL, end='')
# *****************************************************************************
# *****************************************************************************
# Helper Methods Start
def calculate_num_clusters(df, acl_weights):
"""
Calculates the optimal number of clusters using the elbow_graph approach.
Input:
The Pandas dataframe of the input file (ACL.json)
output:
The value of k that provides the least MSE.
"""
files = ['IP_Access_List', 'Route_Filter_List', 'VRF', 'AS_Path_Access_List',
'IKE_Phase1_Keys', 'IPsec_Phase2_Proposals', 'Routing_Policy']
k_select_vals = [41, 17, 42, 5, 3, 2, 58]
curr_file = file_name.split(".")[0]
file_index = files.index(curr_file)
return k_select_vals[file_index]
features = df[df.columns]
ran = min(len(df.columns), len(discrete_namedstructure))
if ran > 50:
k_range = range(1, 587)
else:
k_range = range(1, ran)
print(k_range)
k_range = range(1, 580)
distortions = []
np.seed = 0
clusters_list = []
f = open('distortions.txt', 'w')
for k in k_range:
print(k)
kmeans = KMeans(n_clusters=k).fit(features, None, sample_weight=acl_weights)
clusters_list.append(kmeans)
cluster_centers = kmeans.cluster_centers_
k_distance = cdist(features, cluster_centers, "euclidean")
distance = np.min(k_distance, axis=1)
distortion = np.sum(distance)/features.shape[0]
distortions.append(distortion)
f.write(str(distortion))
f.write("\n")
kn = KneeLocator(list(k_range), distortions, S=3.0, curve='convex', direction='decreasing')
print("Knee is: ", kn.knee)
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.plot(k_range, distortions, 'bx-')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
if kn.knee is None:
if ran < 5:
return ran - 1
else:
return 5
return kn.knee
'''
for i in range(1, len(avg_within)):
if (avg_within[i-1] - avg_within[i]) < 1:
break
# return i-1 if len(avg_within) > 1 else 1
# return i - 1 if i > 1 else 1
'''
def perform_kmeans_clustering(df, ns_weights):
"""
To get a mapping of the rows into respective clusters generated using the K-means algorithm.
Input:
df:The Pandas data-frame of the input file (ACL.json)
ns_weights: The weights of each name structure which allows the weighted k-means algorithm to work.
Output:
Adding respective K-means cluster label to the input dataframe.
Example:
Row1 - Label 0 //Belongs to Cluster 0
Row2 - Label 0 //Belongs to Cluster 0
Row3 - Label 1 //Belongs to Cluster 1
"""
global k_select
k_select = calculate_num_clusters(df, ns_weights)
features = df[df.columns]
kmeans = KMeans(n_clusters=k_select)
kmeans.fit(features, None, sample_weight=ns_weights)
labels = kmeans.labels_
df["kmeans_cluster_number"] = pd.Series(labels)
def extract_keys(the_dict, prefix=''):
"""
Recursive approach to gather all the keys that have nested keys in the input file.
Input:
The dictionary file to find all the keys in.
Output:
All the keys found in the nested dictionary.
Example:
Consider {key1:value1, key2:{key3:value3}, key4:[value4], key5:[key6:{key7:value7}]}
The function returns key2, key5=key6
"""
key_list = []
for key, value in the_dict.items():
if len(prefix) == 0:
new_prefix = key
else:
new_prefix = prefix + '=' + key
try:
if type(value) == dict:
key_list.extend(extract_keys(value, new_prefix))
elif type(value) == list and type(value[0]) == dict:
key_list.extend(extract_keys(value[0], new_prefix))
elif type(value) == list and type(value[0]) != dict:
key_list.append(new_prefix)
else:
key_list.append(new_prefix)
except:
key_list.append(new_prefix)
return key_list
def get_uniques(data):
"""
A helper function to get unique elements in a List.
Input:
A list that we need to capture uniques from.
Output:
A dictionary with unique entries and count of occurrences.
"""
acl_count_dict = {}
for acl in data:
acl = json.dumps(acl)
if acl not in acl_count_dict:
acl_count_dict[acl] = 1
else:
value = acl_count_dict[acl]
value += 1
acl_count_dict[acl] = value
keys = []
values = []
for key, value in acl_count_dict.items():
keys.append(key)
values.append(value)
return keys, values
def overall_dict(data_final):
"""
Parses through the dictionary and appends the frequency with which the keys occur.
Input:
A nested dictionary.
Example:
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
Output:
Returns a new array with the nested keys appended along with a tuple containing the un-nested value along with
the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':1}
}]
"""
overall_array = []
for data in data_final:
overall = {}
for item in data:
if item[0] is None:
continue
result = extract_keys(item[0])
for element in result:
value = item[0]
for key in element.split("="):
new_value = value[key]
if type(new_value) == list:
if len(new_value) != 0:
new_value = new_value[0]
else:
new_value = "#BUG#"
value = new_value
if element not in overall:
overall[element] = {}
if value not in overall[element]:
overall[element][value] = 1
else:
overall[element][value] += 1
overall_array.append(overall)
return overall_array
def get_overall_dict(data_final):
"""
Parses through the dictionary and appends the frequency with which the keys occur.
Input:
A nested dictionary.
Example:
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
Output:
Returns a new array with the nested keys appended along with a tuple containing the unnested value along with the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':1}
}]
"""
overall_array = []
for data in data_final:
overall = {}
new_value = None
flag = 0
for item in data:
visited = {"lines=name":1}
if item[0] is None:
continue
result = extract_keys(item[0])
for element in result:
value = item[0]
for key in element.split("="):
if element not in visited:
visited[element] = 1
new_value = value[key]
flag = 0
if type(new_value) == list:
if len(new_value) > 0:
for list_data in new_value:
if element not in overall:
overall[element] = {}
temp = element
temp_val = list_data
temp = temp.split("=", 1)[-1]
while len(temp.split("=")) > 1:
temp_val = temp_val[temp.split("=")[0]]
temp = temp.split("=", 1)[-1]
list_key = temp
check = 0
try:
if type(temp_val[list_key]) == list:
if temp_val[list_key][0] not in overall[element]:
overall[element][temp_val[list_key][0]] = 1
check = 1
else:
if temp_val[list_key] not in overall[element]:
overall[element][temp_val[list_key]] = 1
check = 1
except:
dummy=0
'''
do nothing
'''
try:
if check == 0:
if type(temp_val[list_key]) == list:
if temp_val[list_key][0] in overall[element]:
overall[element][temp_val[list_key][0]] += 1
else:
if temp_val[list_key] in overall[element]:
overall[element][temp_val[list_key]] += 1
except:
dummy=0
flag = 1
value = new_value
else:
'''
Type is not list
'''
value = new_value
else:
if flag == 0:
if element not in overall:
overall[element] = {}
if new_value not in overall[element]:
overall[element][new_value] = 1
else:
overall[element][new_value] += 1
if flag == 0:
if element not in overall:
overall[element] = {}
if new_value not in overall[element]:
overall[element][new_value] = 1
else:
overall[element][new_value] += 1
overall_array.append(overall)
return overall_array
def calculate_z_score(arr):
"""
Calculates the Z-score (uses mean) (or) Modified Z-score (uses median) of data-points
Input:
Data points generated from parsing through the input file.
Also considers the Z_SCORE_FLAG that is set previously with 0 (default) using the Modified Z-score and 1 using Z-score.
Output:
The Z-score of given data-points array.
"""
if len(arr) == 1:
return arr
z_score = []
'''
Calculates the Z-score using mean. Generally used if distribution is normal (Bell curve).
'''
if Z_SCORE_FLAG:
mean = np.mean(arr)
std = np.std(arr)
if std == 0:
return np.ones(len(arr)) * 1000
for val in arr:
z_score.append((val - mean) / std)
'''
Modified Z-score approach.
Calculates the Z-score using median. Generally used if distribution is skewed.
'''
else:
median_y = np.median(arr)
medians = [np.abs(y - median_y) for y in arr]
med = np.median(medians)
median_absolute_deviation_y = np.median([np.abs(y - median_y) for y in arr])
if median_absolute_deviation_y == 0:
return np.ones(len(arr)) * 1000
z_score = [0.6745 * (y - median_y) / median_absolute_deviation_y for y in arr]
return z_score
def calculate_signature_d(overall_arr):
"""
Uses Z-score to generate the signatures of data-points and also maps points on level of significance (include for
signature calculation, include for bug calculation, no significance).
If Z-score is equal to 1000.0 or in between sig_threshold and bug_threshold, no-significance.
If Z-score is >= sig_threshold, include for signature calculation.
If Z-score is <= bug_threshold, include for bug calculation.
Input:
The individual master-signature generated for each Cluster.
Output:
An array containing dictionaries marked with tags that represent the action that needs to be performed on them.
"""
signature = {}
for key, value in overall_arr.items():
sig_threshold = 0.5
bug_threshold = -0.1
key_points = []
data_points = []
sig_values = []
for k, v in value.items():
key_points.append(k)
data_points.append(v)
if len(data_points) == 1:
sig_values.append((key_points[0], (data_points[0])))
'''
Check for two data points case
'''
else:
z_score = calculate_z_score(data_points)
if len(z_score) > 0:
avg_z_score = sum(z_score)/len(z_score)
bug_threshold = bug_threshold + (avg_z_score - sig_threshold)
for i in range(len(z_score)):
present_zscore = z_score[i]
if present_zscore == 1000.0:
sig_values.append((key_points[i], "*", (data_points[i])))
elif present_zscore >= sig_threshold:
sig_values.append((key_points[i], (data_points[i])))
elif present_zscore <= bug_threshold:
sig_values.append((key_points[i], "!", (data_points[i])))
elif (present_zscore < sig_threshold) and (present_zscore > bug_threshold):
sig_values.append((key_points[i], "*", (data_points[i])))
if key in signature:
signature[key].append(sig_values)
else:
signature[key] = []
signature[key] += sig_values
return signature
def results(data, signatures):
title = file_name.split(".")[0] + "_Results.txt"
if not os.path.exists(os.path.dirname(title)):
os.makedirs(os.path.dirname(title))
f = open(title, "w")
f.write(title + "\n")
f.write("\n")
totalBugs = 0
totalConformers = 0
for cluster_index, clustered_namedStructure in enumerate(data):
numBugs = 0
numConformers = 0
cluster_signature = signatures[cluster_index]
for namedStructure in clustered_namedStructure:
keys = extract_keys(namedStructure[0])
namedStructure = flatten_json((namedStructure[0]), '=')
isNamedStructureABug = False
newNamedStructure = {}
for key, value in namedStructure.items():
flag = 0
for index, char in enumerate(key):
if char == '0' or char == '1' or char == '2' or char == '3' or char == '4' or char == '5' or char == '6' or char == '7' or char == '8' or char == '9':
flag = 1
if index == len(key)-1:
new_key = str(key[0:index-1])
newNamedStructure[new_key] = value
else:
new_key = str(key[0:index-1]) + str(key[index+1:len(key)])
newNamedStructure[new_key] = value
if not flag:
newNamedStructure[key] = value
flag = 0
for propertyKey, propertyValue in newNamedStructure.items():
try:
propValues = cluster_signature[propertyKey]
except:
print("EXCEPTION OCCURRED!")
print(propertyKey)
for value in propValues:
if value[0] == propertyValue and value[1] == '!':
numBugs += 1
isNamedStructureABug = True
if isNamedStructureABug:
numBugs += 1
else:
numConformers += 1
numBugs = len(clustered_namedStructure) - numConformers
f.write("Cluster Index: " + str(cluster_index) + "\n")
f.write(" Number of elements in Cluster = " + str(len(clustered_namedStructure)) + "\n")
f.write(" Number of Bugs using Z-score: " + str(len(clustered_namedStructure) - numConformers) + "\n")
f.write(" Number of Conformers using Z-score: " + str(numConformers) + "\n")
f.write("\n")
totalBugs += numBugs
totalConformers += numConformers
print("Total Bugs = ", totalBugs)
print("Total Confomers = ", totalConformers)
f.write("\n")
f.write("\n")
f.write("Total Bugs using Z-score: " + str(totalBugs) + "\n")
f.write("Total Conformers using Z-score: " + str(totalConformers))
def transform_data(data):
"""
A helper function to extract nested keys from the ACL and to add the frequency of the repeated value. Helps score data.
Input:
An ACL in the form {key1:value1, key2:{key3:value3}, key4:[value4], key5:[key6:{key7:value7}]}.
Output:
Extracted nested keys from the extract_keys function along with the frequency count.
Example:
[
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
]
Returns a new array with the nested keys appended along with a tuple containing the unnested value along with the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':3}
}]
"""
count = 1
overall = {}
flag = 0
i = 0
while i < count:
value = None
result = None
new_value = None
for item in data:
result = extract_keys(item)
for element in result:
value = item
for key in element.split("="):
if key in value:
new_value = value[key]
if (type(new_value) == list) and (len(new_value) > 1):
if flag == 0:
count = len(new_value)
flag = 1
try:
new_value = new_value[i]
except:
new_value = new_value[-1]
elif (type(new_value) == list) and (len(new_value) == 1):
new_value = new_value[0]
value = new_value
if element not in overall:
overall[element] = {}
if type(value) != dict and type(value) != list:
if value not in overall[element]:
overall[element][value] = 1
i += 1
return overall
def calculate_signature_score(signature):
"""
Calculates the signature score for each signature as the sum of all the weights in it but ignoring the weights marked with "*".
Input:
A signature that contains tags of whether or not the weight should be included in calculating the signature.
Output:
An array containing the weights of all the signatures that should be considered.
Example:
Consider [
{'key1=key2':['val1', 40], 'key3=key4':['val2':90]}, //40 + 90
{'key5=key6=key7':['val3', *, 20], 'key8=key9':['val4':80]}, //80
{'key10=key11':['val5', 40]} //40
Returns [130, 80, 40].
"""
score_arr = []
for sig in signature:
score = 0
for key, value in sig.items():
for val in value:
if (val[1] != "!") and (val[1] != "*"):
score += val[1]
elif val[1] == "!":
score += val[2]
score_arr.append(score)
return score_arr
def calculate_namedstructure_scores(data_final, all_signatures):
"""
Calculate the individual scores for each discrete-ACL. This includes calculating human_error scores,
signature_scores, and deviant scores.
Input:
data_final:
List of ACLs grouped into a Cluster.
Example:
[
[acl-1, acl-4, acl-5, acl-9], //Cluster-0
[acl-2, acl-3], //Cluster-1
[acl-7], //Cluster-2
[acl-6, acl-8] //Cluster-3
]
all_signatures:
Consolidated signature for each Cluster.
Output:
deviant_arr: Returns all deviant properties for the ACL. Empty list is returned if no deviant property
in the ACL.
count_arr: [[TODO]]
dev_score: Returns the deviant score for the deviant properties found. 0 if no deviant property.
acls_arr: [[TODO]]
sig_score: Returns the signature score of the ACL.
cluster_num: Returns the cluster number that the ACL belongs to.
acls_score: The score that is generated for each acl
human_errors_arr: Returns the human_error properties (IPValidity, DigitRepetition, PortRange) for each ACL and
empty list if no human_error properties present in the ACL.
human_error_score: Returns the score of the human error property calculated for the ACL. 0 is returned if
no human_error property exists in the ACL.
"""
deviant_arr = []
count_arr = []
acls_dict = {}
acls_arr = []
acls_score = []
sig_score = []
dev_score = []
cluster_num = []
human_errors_arr = []
human_errors_score = []
i = 0
for acl_list in data_final:
bug_count = 0
conformer_count = 0
signature = all_signatures[i]
for acl in acl_list:
flag = 0
if str(acl[0]) not in acls_dict:
acls_dict[str(acl[0])] = 1
acls_arr.append(acl[0])
cluster_num.append(i)
flag = 1
else:
print(acl[0])
print(acls_dict)
continue
sig_score.append(signature_scores[i])
deviant = []
count = 0
dev_c = 0
acl_c = 0
human_errors = []
human_error_category = {}
data = transform_data(acl)
for data_key, data_val in data.items():
if data_key in signature:
'''
Key Valid. Now check for actual Value
'''
for val in data_val.items():
(error_key, error_value), error_category = calculateHumanErrors(data_key, val[0], signature[data_key], file_name.split(".")[0])
if error_category:
human_errors.append((error_key, error_value))
if error_category not in human_error_category:
human_error_category[error_category] = 0
human_error_category[error_category] += 1
for sig_val in signature[data_key]:
if val[0] == sig_val[0]:
'''
value also present. Now check if value part of bug/sig/skip
'''
if sig_val[1] == "!":
dev_c += sig_val[2]
acl_c += sig_val[2]
deviant.append((data_key, sig_val[0]))
bug_count += 1
elif sig_val[1] == "*":
conformer_count += 1
continue
else:
conformer_count += 1
count += sig_val[1]
acl_c += sig_val[1]
else:
'''
Deviant Key
'''
if data_key != "lines=name":
deviant.append(data_key)
dev_c += data_val
acl_c += data_val
if flag == 1:
count_arr.append(count)
deviant_arr.append(deviant)
dev_score.append(dev_c)
acls_score.append(acl_c)
human_errors_arr.append(human_errors)
human_errors_score.append(calculate_human_error_score(human_error_category))
i += 1
return deviant_arr, count_arr, dev_score, acls_arr, sig_score, cluster_num, acls_score, human_errors_arr, human_errors_score
def checkIPValidity(ip_address):
"""
A reg-ex check to verify the validity of an IP address.
Input:
A list of IP addresses
Output:
A boolean representing the validity of the IP address.
Returns 'True' if all the IPs are valid and 'False' if any of the IP is invalid.
"""
try:
ip_address = ip_address.split(":")
for ip in ip_address:
IP_check = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])?(\/)?((3[01]|3[02]|[12][0-9]|[0-9])?)$"
match = re.match(IP_check, ip)
if not match:
return False
return True
except e:
print(e)
return True
def checkPortRange(port_range):
"""
A check to verify that the port range is specified correctly (elem0 <= elem1).
Input:
A string that contains two numbers separated by a '-'.
Output:
A boolean representing the validity of the range (elem0 <= elem1).
Example:
52108-52109 (True)
466 - 466 (True)
466 - 465 (False)
"""
try:
port_split = port_range.split("-")
if port_split[-1] < port_split[0]:
return False
return True
except:
return True
def checkDigitRepetition(digit, signature):
"""
Checks for Digit repetition.
Input:
The value for the following keys: srcPorts, dstPorts, lengthRange
Output:
Returns True if there is any Human Error and the digit is repeated twice.
"""
try:
if type(digit) == str:
digit = float(digit.split(":")[0])
if digit == 0:
return False
for item in signature:
if type(item) == str:
item = int(item.split(":")[0])
if digit == (item*10+item%10):
print("--------", digit, item*10 + item%10)
return True
return False
except:
return False
def calculateHumanErrors(data_key, data, signature, namedStructure):
"""
Checks for simple human errors like entering invalid IP Addresses, incorrect port-ranges, and digit repetitions.
Input:
data_key: The nested keys calculated in the overall_dict and get_overall_dict methods.
Example: key1=key2=key4
data: The data value for the keys.
signature: The signature for the keys that was calculated in the calculate_signature_d method.
namedStructure: The type of the IP file.
Possible values: IP_Access_List, Route_Filter_List, Routing_Policy, VRF, others.
Output:
Returns the error and the category it belongs to.
Example:
key1=key2=key3 [1333.0.0.13] [1333.0.0.13] IP_Access_List
Returns:
key1=key2=key3 [1333.0.0.13] IP
"""
human_error = (None, None)
category = None
data_key = data_key.split("=")[-1]
signature_items = []
for sig_item in signature:
signature_items.append(sig_item[0])
if namedStructure == "IP_Access_List":
if data_key == "ipWildcard":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif data_key in ["dstPorts", "srcPorts"]:
if not checkPortRange(data):
'''
Invalid Ports Range
'''
human_error = (data_key, data)
category = "RANGE"
elif namedStructure == "Route_Filter_List":
if data_key == "ipWildcard":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif data_key == "lengthRange":
if not checkPortRange(data):
'''
Invalid Ports Range
'''
human_error = (data_key, data)
category = "RANGE"
elif namedStructure == "Routing_Policy":
if data_key == "communities":
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
elif data_key == "ips":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif namedStructure == "VRF":
if data_key in ["administrativeCost", "remoteAs", "metric", "localAs", "referenceBandwidth", ]:
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
elif data_key in ["peerAddress", "localIp", "routerId", "network"]:
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
'''
Any Other namedStructure
'''
else:
try:
if re.search('IP|ip', data_key) and not re.search('[a-zA-Z]', data):
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif not re.search("[a-zA-Z]", data):
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
except:
pass
return human_error, category
def calculate_human_error_score(category_dict):
"""
Scores the human_errors that have been found with IPValidity and DigitRepetition errors
weighed as 'high,' i.e, 0.8 and PortRange errors weighed 'medium,' i.e., 0.5.
Input:
A dictionary containing the count of the error occurrences.
Output:
A weighted sum of all the errors found.
"""
total_score = 0
low = 0.2
medium = 0.5
high = 0.8
weightage_dict = {"IP": high, "RANGE": medium, "DIGIT": high}
for category, count in category_dict.items():
if count != 0:
#print("* Human Error Found *")
total_score += weightage_dict[category]/np.log(1+count)
return round(total_score/len(category_dict), 2) if category_dict else total_score
def flatten_json(data, delimiter):
"""
Flattens a JSON file.
Input:
data:
A JSON dictionary of hierarchical format.
{key1: {key2: value2, key3: value3}, key4: {key5: value5, key6: [value6, value7, value8]}}
delimiter:
A parameter to separate the keys in order to facilitate easy splitting.
Output:
A flattened dictionary with keys separated by the delimiter parameter.
key1_key2:value2, key1_key3:value3, key4_key5:value5, key4_key6:value6, key4_key6:value7, key4_key6:value8
"""
out = {}
def flatten(data, name=''):
if type(data) is dict:
for key in data:
flatten(data[key], name + key + delimiter)
elif type(data) is list:
i = 0
for elem in data:
flatten(elem, name + str(i) + delimiter)
i += 1
else:
out[name[:-1]] = data
flatten(data)
return out
def encode_data(data):
"""
Converts categorical values into numeric values. We use MultiLabelBinarizer to encode categorical data.
This is done in order to pass the data into clustering and other similar algorithms that can only handle numerical data.
Flattens each ACL list and then encodes them.
Input:
A Python list that contains all discrete-ACLs.
Output:
A Python list after encoding.
"""
flattenedData = []
allKeys = []
for NS in data:
flattenedNamedStructure = flatten_json(NS, '_')
flattenedData.append(flattenedNamedStructure)
for key in flattenedNamedStructure.keys():
if key not in allKeys:
allKeys.append(key)
mergedData = []
for NS in flattenedData:
mergedNS = []
for key, value in NS.items():
mergedNS.append(str(value))
mergedData.append(mergedNS)
mlb = MultiLabelBinarizer()
data_T = mlb.fit_transform(mergedData)
print("MLb classes=")
print(mlb.classes_)
return data_T, mlb.classes_
def export_clusters(data, acl_weight_mapper):
"""
Helper Method to verify authenticity of Clusters being formed.
Input:
The data that is sorted into list of Clusters.
Example:
[
[acl-1, acl-4, acl-5, acl-9], //Cluster-0
[acl-2, acl-3], //Cluster-1
[acl-7], //Cluster-2
[acl-6, acl-8] //Cluster-3
]
We also make use of acl_dict and node_name_dict dictionaries by searching for the ACL and
then getting the appropriate ACL_name and the nodes that the ACL is present in.
Output:
A csv file by the name of Generated_Clusters is written in the format:
Cluster-0 |||| Cluster-0 Names |||| Cluster-0 Nodes |||| Cluster-1 |||| Cluster-1 Names |||| Cluster-1 Nodes
acl-1 |||| permit tcp eq 51107 |||| st55in15hras |||| acl-2 |||| permit udp any eq 1200 |||| rt73ve11m5ar
acl-4 |||| permit tcp eq 51102 |||| st55in15hras, st55in17hras |||| acl-3 |||| permit udp any eq 120002 |||| rt73ve10m4ar
acl-5 |||| permit tcp eq 51100 |||| st55in17hras ||||
acl-9 |||| permit tcp eq 51109 |||| st55in17hras ||||
"""
column_labels = []
for index in range(len(data)):
column_labels.append("Cluster " + str(index))
column_labels.append("Cluster " + str(index) + " ACL Weights")
column_labels.append("Cluster " + str(index) + " Nodes")
data_to_export = pd.DataFrame(columns=column_labels)
for cluster_index, cluster_data in enumerate(data):
discrete_ACL_nodes = []
cluster_weights = []
for discrete_ACL in cluster_data:
temp = json.dumps(discrete_ACL[0], sort_keys=True)
temp_arr = []
try:
for node in namedstructure_node_mapper[temp]:
temp_arr.append(node)
discrete_ACL_nodes.append(temp_arr)
except:
discrete_ACL_nodes.append(None)
cluster_weights.append(acl_weight_mapper[temp])
cluster_data = pd.Series(cluster_data)
cluster_weights_series = pd.Series(cluster_weights)
discrete_ACL_nodes = pd.Series(discrete_ACL_nodes)
data_to_export["Cluster " + str(cluster_index)] = cluster_data
data_to_export["Cluster " + str(cluster_index) + " ACL Weights"] = cluster_weights_series
data_to_export["Cluster " + str(cluster_index) + " Nodes"] = discrete_ACL_nodes
file = file_name.split(".")[0]
print(file)
title = "Clusters_" + file + ".csv"
print(title)
data_to_export.to_csv(title)
def parse_data():
"""
A helper method to parse through the input configuration files and capture necessary information.
Input:
None. The file path parameter is read from the commandline arguments.
Output:
discrete_namedstructure: A list that contains stringified named-structures.
namedstructure_nod_mapper: A dictionary that contains the named-structure configuration as key and a list of
nodes it is a part of as value.
"""
df = pd.read_json(sys.argv[2], orient="index")
discrete_namedstructure = []
namedstructure_node_mapper = {} # Maps each discrete_acl with all the nodes that it belongs to
discrete_nodes = []
for column in df.columns:
for index, data in df[column].iteritems():
if data is not None:
if 'lines' in data[0]:
data_holder = 'lines'
data_to_look_under = data[0][data_holder]
elif 'statements' in data[0]:
data_holder = 'statements'
data_to_look_under = data[0][data_holder]
else:
data_to_look_under = data
for discrete_acl in data_to_look_under:
if 'name' in discrete_acl:
del discrete_acl['name']
discrete_acl = json.dumps(discrete_acl, sort_keys=True)
discrete_namedstructure.append(discrete_acl)
if discrete_acl in namedstructure_node_mapper:
nodes = namedstructure_node_mapper[discrete_acl]
if index not in nodes:
nodes.append(index)
namedstructure_node_mapper[discrete_acl] = nodes
else:
namedstructure_node_mapper[discrete_acl] = [index]
if index not in discrete_nodes:
discrete_nodes.append(index)
print("The number of discrete nodes in a network is: ", len(discrete_nodes))
return discrete_namedstructure, namedstructure_node_mapper
def perform_pca_analysis(encoded_data, column_names):
"""
A helper method to analyse the data using PCA
"""
pca = PCA()
pca.fit(encoded_data)
cumulative_variance = np.cumsum(np.round(pca.explained_variance_ratio_, decimals=8) * 100);
labels = [x for x in range(1, len(cumulative_variance) + 1)];
loadings = pd.DataFrame(pca.components_.T, columns=labels, index=column_names)
significance = {}
for index in loadings.index:
temp_list = loadings.loc[index]
sig = 0
for value in temp_list:
sig += value * value
significance[index] = sig
plt.plot(cumulative_variance)
plt.xlabel("N-components")
plt.ylabel("Cumulative Explained Variance")
plt.show()
sorted_significance = sorted(significance.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
top_ten_attributes = []
for sigAttr in sorted_significance:
top_ten_attributes.append(sigAttr[0])
print("Top Ten Attributes:")
print(top_ten_attributes)
def silhouette_analysis(data, acl_weights):
"""
A helper method to perform an analysis of various scoring functions
"""
from sklearn.metrics import silhouette_score, davies_bouldin_score
k_range = range(2, 30)
elbow_scores = []
silhouette_scores = []
davies_bouldin_scores = []
elbow_file = open("elbow_scores.txt", "w")
silhouette_file = open("silhouette_scores.txt", "w")
davies_bouldin_file = open("davies_bouldin_scores.txt", "w")
for num_clusters in k_range:
print(num_clusters)
kmeans = KMeans(n_clusters=num_clusters)
cluster_labels = kmeans.fit_predict(data, None, sample_weight=acl_weights)
cluster_centers = kmeans.cluster_centers_
k_distance = cdist(data, cluster_centers, "euclidean")
distance = np.min(k_distance, axis=1)
distortion = np.sum(distance) / data.shape[0]
silhouette_avg = silhouette_score(data, cluster_labels)
davies_bouldin_avg = davies_bouldin_score(data, cluster_labels)
silhouette_scores.append(silhouette_avg)
davies_bouldin_scores.append(davies_bouldin_avg)
elbow_scores.append(distortion)
silhouette_file.write(str(silhouette_avg) + " ")
davies_bouldin_file.write(str(davies_bouldin_avg) + " ")
elbow_file.write(str(distortion) + " ")
kn_elbow = KneeLocator(list(k_range), elbow_scores, S=5.0, curve='convex', direction='decreasing')
plt.scatter(x=k_range, y=elbow_scores)
plt.xlabel("Range")
plt.ylabel("Elbow Score")
plt.vlines(kn_elbow.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
kn_silhouette = KneeLocator(list(k_range), silhouette_scores, S=5.0, curve='convex', direction='increasing')
plt.scatter(x=k_range, y=silhouette_scores)
plt.xlabel("Range")
plt.ylabel("Silhouette Score")
plt.vlines(kn_silhouette.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
kn_davies_bouldin = KneeLocator(list(k_range), davies_bouldin_scores, S=5.0, curve='convex', direction='decreasing')
plt.scatter(x=k_range, y=davies_bouldin_scores)
plt.xlabel("Range")
plt.ylabel("Davies Bouldin Score")
plt.vlines(kn_davies_bouldin.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
'''
# Helper Methods End
# *****************************************************************************
# *****************************************************************************
'''
whitelistDict = {}
Z_SCORE_FLAG = 0
ACTION_FLAG = 0
k_select = 0
'''
Parsing Data
'''
try:
file_name = sys.argv[2].split("/")[-1]
network_name = "DATA_HERE_" + sys.argv[2].split("/")[-2]
print(network_name)
'''
Making Outlier Directory for Current Network
'''
if not os.path.exists(network_name):
os.makedirs(network_name)
flag_file = network_name + '/' + '.flag_' + file_name
if sys.argv[1] == "-j":
df = pd.read_json(sys.argv[2], orient="index")
try:
if sys.argv[4] == "-a":
ACTION_FLAG = 3
except:
ACTION_FLAG = 0
try:
Z_SCORE_FLAG = int(sys.argv[3])
except:
error_msg("Invalid Z-Score Argument sent", sys.argv[3])
f = open(flag_file,'w')
f.write('{}'.format(ACTION_FLAG))
f.close()
elif sys.argv[1] == "-e":
df = pd.read_json(sys.argv[2], orient= "index")
try:
with open(sys.argv[3], 'rb') as handle:
whitelistDict = pickle.load(handle)
except:
print("FileNotFoundError: Please check if file exists.")
ACTION_FLAG = 1
elif sys.argv[1] == "-d":
df = pd.read_json(sys.argv[2],orient = "index")
ACTION_FLAG = 2
else:
error_msg("Invalid Argument or flags sent", sys.argv[1])
except:
error_msg("Invalid File specified. Please check the input dataset", sys.argv[2])
outlier_filename = network_name + '/' + 'outlier_' + file_name
cluster_filename = network_name + '/' + '.cluster_' + file_name
sig_filename = network_name + '/' + '.sig_' + file_name
outlier_nodes_filename = network_name + '/' + '.outlier_nodes_' + file_name
print(outlier_filename, cluster_filename ,sig_filename, outlier_nodes_filename)
print("===========================================================")
print(Fore.BLUE, end='')
print("outlier-analyzer code started ...")
print(Style.RESET_ALL)
print(Fore.GREEN, end='')
start = time.time()
'''
Calculating outliers selected
'''
f = open(flag_file, 'r')
flag = f.readline()
f.close()
discrete_namedstructure, namedstructure_node_mapper = parse_data()
if (ACTION_FLAG == 0) or (ACTION_FLAG == 3):
mlb = MultiLabelBinarizer()
ns_weight_mapper = {}
data_for_clustering = []
namedstructure_weights = []
for ns in discrete_namedstructure:
if ns not in ns_weight_mapper.keys():
ns_weight_mapper[ns] = 1
else:
value = ns_weight_mapper[ns]
ns_weight_mapper[ns] += 1
for ns, weight in ns_weight_mapper.items():
ns = json.loads(ns)
data_for_clustering.append(ns)
namedstructure_weights.append(weight)
encodedLists, column_names = encode_data(data_for_clustering)
df_enc = pd.DataFrame(encodedLists)
df_enc = df_enc.dropna(axis=1, how='any')
# perform_pca_analysis(encodedLists, column_names)
print("data encoding done...")
'''
Perform K-Means
'''
print("starting data clustering...")
perform_kmeans_clustering(df_enc, namedstructure_weights)
print("data clustering done...")
# silhouette_analysis(df_enc, acl_weights)
'''
Grouping data based on their Clusters
'''
cluster_range = np.arange(k_select)
data_final = []
data_final_enc = []
for index in cluster_range:
temp = []
temp_enc = []
for i in range(len(df_enc)):
if df_enc['kmeans_cluster_number'][i] == index:
temp.append([data_for_clustering[i]])
temp_enc.append([data_for_clustering[i]])
data_final.append(temp)
data_final_enc.append(temp_enc)
# export_clusters(data_final, acl_weight_mapper)
'''
Writing Clustered Data into a file
'''
with open(cluster_filename, 'w') as f:
f.write(json.dumps(data_final))
'''
Calculating Overall Structure per Cluster
'''
if ACTION_FLAG == 3:
overall_array_0 = overall_dict(data_final)
try:
overall_array = get_overall_dict(data_final)
except:
overall_array = overall_dict(data_final)
'''
Generating Signatures
'''
all_signatures = []
for i in range(len(overall_array)):
signature = calculate_signature_d(overall_array[i])
all_signatures.append(signature)
print("signature creation done...")
'''
Retuning Signature
'''
elif ACTION_FLAG == 1:
all_signatures = []
try:
with open(sig_filename, 'r') as f:
for item in f:
all_signatures.append(json.loads(item))
except FileNotFoundError:
print(Fore.RED, end='')
print("\nERROR: Calculate outliers on this data first!\n")
print(Style.RESET_ALL)
print("__________________________________")
print(Fore.RED, end='')
print("outlier-analyzer code failed #")
print(Style.RESET_ALL)
print("__________________________________")
sys.exit()
all_signatures = all_signatures[0]
wlDict = copy.deepcopy(whitelistDict['deviant'])
for edit_key, edit_value in whitelistDict['deviant']:
flag = 0
for signature in all_signatures:
if edit_key in signature:
for j in range(len(signature[edit_key])):
if edit_value in signature[edit_key][j][0]:
if signature[edit_key][j][1] == "!" or signature[edit_key][j][1] == "*":
try:
temp = (edit_value, signature[edit_key][j][2])
signature[edit_key][j] = temp
flag = 1
except Exception as e:
print(e)
if flag == 1:
wlDict.remove((edit_key, edit_value))
if wlDict:
print(Fore.RED, end='')
print("\nERROR : Specified Attributes {} either\n\tnot present or not a bug!".format(wlDict))
print(Style.RESET_ALL, end='')
print("__________________________________")
print(Fore.RED, end='')
print("outlier-analyzer code failed #")
print(Style.RESET_ALL, end='')
print("__________________________________")
sys.exit(0)
print("signature re-tuning done...")
data_final = []
with open(cluster_filename, 'r') as f:
for item in f:
data_final.append(json.loads(item))
data_final = data_final[0]
'''
Displaying the Outlier Nodes
'''
elif ACTION_FLAG == 2:
outlier_nodes_arr = []
try:
with open(outlier_nodes_filename, 'r') as f:
for item in f:
outlier_nodes_arr.append(json.loads(item))
except FileNotFoundError:
print(Fore.RED, end='')
print("\nERROR: Calculate outliers on this data first!\n")
print(Style.RESET_ALL)
print("__________________________________")
print(Fore.RED, end='')
print("outlier-analyzer code failed #")
print(Style.RESET_ALL)
print("__________________________________")
sys.exit()
print(Style.RESET_ALL)
print("########################")
print("Outlier Nodes are:")
outlier_nodes_arr = outlier_nodes_arr[0]
print(Fore.RED, end='')
print(*outlier_nodes_arr, sep="\n")
print(Style.RESET_ALL)
print("########################")
sys.exit(0)
'''
Scoring Signature
'''
signature_scores = calculate_signature_score(all_signatures)
print("signature scoring done...")
'''
Scoring ACLs
'''
deviant_arr, count_arr, dev_score, acls_arr, sig_score, cluster_num, acls_score, human_errors_arr, human_errors_score \
= calculate_namedstructure_scores(data_final, all_signatures)
print("acl scoring done...")
'''
Calculate outlier nodes
'''
count = 0
outlier_nodes = set()
for i in range(len(deviant_arr)):
if len(deviant_arr[i]) > 0:
count += 1
temp = json.dumps(acls_arr[i], sort_keys=True)
for item in namedstructure_node_mapper[temp]:
outlier_nodes.add(item)
with open(outlier_nodes_filename, 'w') as f:
f.write(json.dumps(list(outlier_nodes)))
'''
writing all signature to a hidden file
'''
with open(sig_filename, 'w') as f:
f.write(json.dumps(all_signatures))
nodes = []
for i in range(len(acls_arr)):
temp = json.dumps(acls_arr[i], sort_keys=True)
tempArr = []
try:
for item in namedstructure_node_mapper[temp]:
tempArr.append(item)
nodes.append(tempArr)
except:
nodes.append(None)
'''
Creating dataframe and exporting as a json file
'''
df_final = pd.DataFrame()
with open("deviant_array.txt", "w") as f:
print(deviant_arr, file=f)
print(human_errors_arr)
master_signatures = []
for i in range(len(data_final)):
for index in data_final[i]:
master_signatures.append(all_signatures[i])
# df_final['acl_name'] = acl_names
df_final['cluster_number'] = cluster_num
df_final['Conformer/Signature Definition'] = master_signatures
df_final['acl_structure'] = acls_arr
df_final['nodes'] = nodes
df_final['deviant_properties'] = deviant_arr
df_final['human_error_properties'] = human_errors_arr
df_final['human_error_score'] = human_errors_score
df_final['similarity_score'] = count_arr
df_final['acl_score'] = acls_score
df_final['max_sig_score'] = sig_score
outlier_flag = ['T' if len(deviant_prop)==0 else 'F' for deviant_prop in deviant_arr]
df_final['Outlier Flag'] = outlier_flag
df_final.to_json(outlier_filename, orient='split', index=False)
print(Style.RESET_ALL, end="")
end = time.time()
print(df_final)
print("###")
print(Fore.BLUE, end='')
print("OUTLIER-ANALYZER SUCCESSFUL #")
print("time to run : {} seconds".format(round(end - start), 3))
print(Style.RESET_ALL, end='')
print()
print("###########################################################")
print(outlier_nodes)
print(Fore.BLUE, end='')
print("\nTotal Outliers Count = {}".format(len(outlier_nodes)))
print(Style.RESET_ALL, end='')
print("\nTo view the detailed report, open the")
print("json file named: '{}'\n".format(outlier_filename))
print("###########################################################")
print()
sys.exit(0)
| 34.326633
| 170
| 0.549114
| 6,327
| 54,648
| 4.55113
| 0.111427
| 0.007293
| 0.007293
| 0.008752
| 0.308144
| 0.246327
| 0.223303
| 0.202153
| 0.184546
| 0.172009
| 0
| 0.021633
| 0.338512
| 54,648
| 1,591
| 171
| 34.348209
| 0.774932
| 0.20654
| 0
| 0.32319
| 0
| 0.001049
| 0.076594
| 0.019148
| 0
| 0
| 0
| 0.001257
| 0
| 1
| 0.027282
| false
| 0.001049
| 0.017838
| 0
| 0.07765
| 0.098636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc61b60657d94c8b13604d637733f5892011c4df
| 748
|
py
|
Python
|
war.py
|
hiraokusky/pronwar
|
bfc9afb958dcd0523578075a734350007d3f55ff
|
[
"MIT"
] | null | null | null |
war.py
|
hiraokusky/pronwar
|
bfc9afb958dcd0523578075a734350007d3f55ff
|
[
"MIT"
] | null | null | null |
war.py
|
hiraokusky/pronwar
|
bfc9afb958dcd0523578075a734350007d3f55ff
|
[
"MIT"
] | null | null | null |
import json
from collections import OrderedDict
import pprint
import json
import sys
# prons = sys.argv
prons = ['pron1.json', 'pron2.json','pron3.json','pron4.json']
def war(pron):
with open(pron, encoding='utf8') as f:
d_update = json.load(f, object_pairs_hook=OrderedDict)
synset = {}
for d in d_update:
if d['synset'] in synset:
if len(synset[d['synset']]) < len(d['senses']):
synset[d['synset']] = { 'lemma': d['lemma'], 'senses': d['senses'] }
else:
synset[d['synset']] = { 'lemma': d['lemma'], 'senses': d['senses'] }
return synset
for pron in prons:
synset = war(pron)
print(json.dumps(synset, indent=2, ensure_ascii=False))
| 29.92
| 85
| 0.580214
| 99
| 748
| 4.333333
| 0.444444
| 0.065268
| 0.090909
| 0.083916
| 0.172494
| 0.172494
| 0.172494
| 0.172494
| 0.172494
| 0
| 0
| 0.010753
| 0.254011
| 748
| 24
| 86
| 31.166667
| 0.758065
| 0.02139
| 0
| 0.2
| 0
| 0
| 0.167139
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.25
| 0
| 0.35
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc62fe75b877b6b99c9222c7927faf2a48f691e2
| 506
|
py
|
Python
|
tkinter/grid-pack/example-1.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 140
|
2017-02-21T22:49:04.000Z
|
2022-03-22T17:51:58.000Z
|
tkinter/grid-pack/example-1.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 5
|
2017-12-02T19:55:00.000Z
|
2021-09-22T23:18:39.000Z
|
tkinter/grid-pack/example-1.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 79
|
2017-01-25T10:53:33.000Z
|
2022-03-11T16:13:57.000Z
|
#!/usr/bin/env python3
'''set frame height 10%, 80%, 10%'''
import tkinter as tk
root = tk.Tk()
root.geometry('400x300')
header = tk.Frame(root, bg='green')
content = tk.Frame(root, bg='red')
footer = tk.Frame(root, bg='green')
root.columnconfigure(0, weight=1) # 100%
root.rowconfigure(0, weight=1) # 10%
root.rowconfigure(1, weight=8) # 80%
root.rowconfigure(2, weight=1) # 10%
header.grid(row=0, sticky='news')
content.grid(row=1, sticky='news')
footer.grid(row=2, sticky='news')
root.mainloop()
| 21.083333
| 40
| 0.679842
| 82
| 506
| 4.195122
| 0.426829
| 0.061047
| 0.09593
| 0.113372
| 0.104651
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073661
| 0.114625
| 506
| 23
| 41
| 22
| 0.694196
| 0.136364
| 0
| 0
| 0
| 0
| 0.074941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc66060295d57944997a0a9eb2202003307a5a50
| 825
|
py
|
Python
|
server/dataplay/usersvc/service.py
|
data2068/dataplay3
|
6834bbf589cefa6007176da3577b5146eda70f52
|
[
"Apache-2.0"
] | 153
|
2019-04-19T22:14:43.000Z
|
2022-03-17T08:48:46.000Z
|
server/dataplay/usersvc/service.py
|
diandian11/dataplay3
|
59d41028e9fc1d64f5abc0ede93a4f8b9184854c
|
[
"Apache-2.0"
] | 10
|
2019-04-14T16:10:11.000Z
|
2021-01-04T02:54:13.000Z
|
server/dataplay/usersvc/service.py
|
diandian11/dataplay3
|
59d41028e9fc1d64f5abc0ede93a4f8b9184854c
|
[
"Apache-2.0"
] | 51
|
2019-04-14T16:02:52.000Z
|
2022-03-05T00:52:42.000Z
|
from sanic import Blueprint
from sanic import response
from sanic.log import logger
from sanic_openapi import doc
from .user import get_user, get_routes
user_svc = Blueprint('user_svc')
@user_svc.get('/currentUser', strict_slashes=True)
@doc.summary('get current user info')
async def user(request):
try:
user = get_user()
return response.json(user, status=200)
except Exception:
logger.exception('faile to get current user')
return response.json({}, status=500)
@user_svc.get('/auth_routes', strict_slashes=True)
@doc.summary('get authorized routes')
async def routes(request):
try:
routes = get_routes()
return response.json(routes, 200)
except Exception:
logger.exception('faile to get get routes')
return response.json({}, status=500)
| 26.612903
| 53
| 0.699394
| 111
| 825
| 5.09009
| 0.315315
| 0.063717
| 0.127434
| 0.070796
| 0.417699
| 0.258407
| 0.152212
| 0.152212
| 0
| 0
| 0
| 0.018072
| 0.195152
| 825
| 30
| 54
| 27.5
| 0.832831
| 0
| 0
| 0.25
| 0
| 0
| 0.147879
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.208333
| 0
| 0.375
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc6617c6b503c309c32e9fd739483b4b5f04738c
| 15,071
|
py
|
Python
|
dnabot/test_ot2_scripts/purification_template_2.8_test.py
|
BASIC-DNA-ASSEMBLY/dnabot
|
d53710d58a4ae3fb2e950f2ca21765fe25abc2e8
|
[
"MIT"
] | 3
|
2019-11-17T17:28:55.000Z
|
2019-11-19T19:24:58.000Z
|
dnabot/test_ot2_scripts/purification_template_2.8_test.py
|
BASIC-DNA-ASSEMBLY/dnabot
|
d53710d58a4ae3fb2e950f2ca21765fe25abc2e8
|
[
"MIT"
] | 2
|
2019-11-15T20:33:03.000Z
|
2019-11-16T12:41:55.000Z
|
dnabot/test_ot2_scripts/purification_template_2.8_test.py
|
BASIC-DNA-ASSEMBLY/dnabot
|
d53710d58a4ae3fb2e950f2ca21765fe25abc2e8
|
[
"MIT"
] | 7
|
2019-11-15T20:21:19.000Z
|
2019-11-25T11:53:27.000Z
|
from opentrons import protocol_api
# Rename to 'purification_template' and paste into 'template_ot2_scripts' folder in DNA-BOT to use
metadata = {
'apiLevel': '2.8',
'protocolName': 'purification_template',
'description': 'Implements magbead purification reactions for BASIC assembly using an opentrons OT-2'}
# example values produced by DNA-BOT for a single construct containing 5 parts, un-comment and run to test the template:
sample_number=8
ethanol_well='A3'
def run(protocol: protocol_api.ProtocolContext):
# added run function for API verison 2
def magbead(
sample_number,
ethanol_well,
elution_buffer_well='A1',
sample_volume=30,
bead_ratio=1.8,
elution_buffer_volume=40,
incubation_time=5,
settling_time=2,
# if using Gen 2 magentic module, need to change time! see: https://docs.opentrons.com/v2/new_modules.html
# "The GEN2 Magnetic Module uses smaller magnets than the GEN1 version...this means it will take longer for the GEN2 module to attract beads."
# Recommended Magnetic Module GEN2 bead attraction time:
# Total liquid volume <= 50 uL: 5 minutes
# this template was written with the Gen 1 magnetic module, as it is compatible with API version 2
drying_time=5,
elution_time=2,
sample_offset=0,
tiprack_type="opentrons_96_tiprack_300ul"):
"""
Selected args:
ethanol_well (str): well in reagent container containing ethanol.
elution_buffer_well (str): well in reagent container containing elution buffer.
sample_offset (int): offset the intial sample column by the specified value.
"""
### Constants
# Pipettes
PIPETTE_ASPIRATE_RATE = 25
PIPETTE_DISPENSE_RATE = 150
TIPS_PER_SAMPLE = 9
PIPETTE_TYPE = 'p300_multi_gen2'
# new constant for easier swapping between pipette types
# Tiprack
CANDIDATE_TIPRACK_SLOTS = ['3', '6', '9', '2', '5']
# Magnetic Module
MAGDECK_POSITION = '1'
# Mix Plate
MIX_PLATE_TYPE = '4ti_96_wellplate_200ul'
# modified from custom labware as API 2 doesn't support labware.create anymore, so the old add_labware script can't be used
# also acts as the type of plate loaded onto the magnetic module
MIX_PLATE_POSITION = '4'
# Reagents
REAGENT_CONTAINER_TYPE = 'brooksreservoirplate_12_wellplate_21000ul'
# modified from custom labware as API 2 doesn't support labware.create anymore, so the old add_labware script can't be used
REAGENT_CONTAINER_POSITION = '7'
# Beads
BEAD_CONTAINER_TYPE = '4ti_96_wellplate_200ul'
# modified from custom labware as API 2 doesn't support labware.create anymore, so the old add_labware script can't be used
# old plate type was '4ti0136_96_deep-well'
BEAD_CONTAINER_POSITION = '8'
# Settings
LIQUID_WASTE_WELL = 'A5'
BEADS_WELL = 'A1'
DEAD_TOTAL_VOL = 5
SLOW_HEAD_SPEEDS = {'x': 600 // 4, 'y': 400 // 4, 'z': 125 // 10, 'a': 125 // 10}
DEFAULT_HEAD_SPEEDS = {'x': 400, 'y': 400, 'z': 125, 'a': 100}
IMMOBILISE_MIX_REPS = 10
MAGDECK_HEIGHT = 20
AIR_VOL_COEFF = 0.1
ETHANOL_VOL = 150
WASH_TIME = 0.5
ETHANOL_DEAD_VOL = 50
ELUTION_MIX_REPS = 20
ELUTANT_SEP_TIME = 1
ELUTION_DEAD_VOL = 2
### Errors
if sample_number > 48:
raise ValueError('sample number cannot exceed 48')
### Loading Tiprack
# Calculates whether one/two/three/four/five tipracks are needed, which are in slots 3, 6, 9, 2, and 5 respectively
total_tips = sample_number * TIPS_PER_SAMPLE
tiprack_num = total_tips // 96 + (1 if total_tips % 96 > 0 else 0)
slots = CANDIDATE_TIPRACK_SLOTS[:tiprack_num]
tipracks = [protocol.load_labware(tiprack_type, slot) for slot in slots]
# changed to protocol.load_labware for API version 2
### Loading Pipettes
pipette = protocol.load_instrument(PIPETTE_TYPE, mount="left", tip_racks=tipracks)
# changed to protocol.load_labware for API version 2
# changed from P300_MULTI to PIPETTE_TYPE constant, which is set to p300_multi_gen2
# removed 'aspirate_flow_rate=PIPETTE_ASPIRATE_RATE, dispense_flow_rate=PIPETTE_DISPENSE_RATE'
# no longer taken as arguements in API version 2
pipette.aspirate_flow_rate=PIPETTE_ASPIRATE_RATE
pipette.dispense_flow_rate=PIPETTE_DISPENSE_RATE
# for reference: default aspirate/dispense flow rate for p300_multi_gen2 is 94 ul/s
### Define Labware
# Magnetic Module
MAGDECK = protocol.load_module('magdeck', MAGDECK_POSITION)
# changed to protocol.load_module for API verison 2
# 'magdeck' is the gen 1 magnetic module, use 'magnetic module gen2' for the gen 2 magentic module
# if using gen 2 module, need to change settling time! (see comments under Constants)
MAGDECK.disengage()
# disengages the magnets when it is turned on
mag_plate = MAGDECK.load_labware(MIX_PLATE_TYPE)
# old code:
# mag_plate = labware.load(MIX_PLATE_TYPE, MAGDECK_POSITION, share=True)
# changed to MAGDECK.load_labware for API version 2
# removed MAGDECK_POSITION and share=True as API version 2 uses ModuleContext.load_labware() to load plates directly onto the magnetic module
# Mix Plate
mix_plate = protocol.load_labware(MIX_PLATE_TYPE, MIX_PLATE_POSITION)
# changed to protocol.load_labware for API version 2
# Reagents
reagent_container = protocol.load_labware(REAGENT_CONTAINER_TYPE, REAGENT_CONTAINER_POSITION)
# changed to protocol.load_labware for API version 2
# Beads Container
bead_container = protocol.load_labware(BEAD_CONTAINER_TYPE, BEAD_CONTAINER_POSITION)
# changed to protocol.load_labware for API version 2
### Calculating Columns
# Total number of columns
col_num = sample_number // 8 + (1 if sample_number % 8 > 0 else 0)
# Columns containing samples in location 1 (magentic module)
# generates a list of lists: [[A1, B1, C1...], [A2, B2, C2...]...]
samples = [col for col in mag_plate.columns()[sample_offset : col_num + sample_offset]]
# old code:
# samples = [col for col in mag_plate.cols()[0 + sample_offset : col_num + sample_offset]]
# load_labware needs to take 'columns' attribute instead of just 'cols' in API version 2
# removed '0 +'
# Columns to mix beads and samples in location 4 (mix plate)
mixing = [col for col in mix_plate.columns()[sample_offset:col_num + sample_offset]]
# old code:
# mixing = [col for col in mix_plate.columns()[0 + sample_offset:col_num + sample_offset]]
# load_labware needs to take 'columns' attribute instead of just 'cols' in API version 2
# removed '0 +'
# Columns to dispense output in location 1 (magnetic module)
# purified parts are dispensed 6 rows to the right of their initial location
# this is why the number of samples cannot exceed 48
output = [col for col in mag_plate.columns()[6 + sample_offset:col_num + 6 + sample_offset]]
# old code:
# output = [col for col in mag_plate.cols()[6 + sample_offset:col_num + 6 + sample_offset]]
# load_labware needs to take 'columns' attribute instead of just 'cols' in API version 2
### Defining Wells for Reagents, Liquid Waste, and Beads
liquid_waste = reagent_container.wells(LIQUID_WASTE_WELL)
ethanol = reagent_container.wells(ethanol_well)
elution_buffer = reagent_container.wells(elution_buffer_well)
beads = bead_container[BEADS_WELL]
# old code:
# beads = bead_container.wells(BEADS_WELL)
# removed .wells, which created a list containing a single well position instead of just a well position
### Define bead and mix volume
bead_volume = sample_volume * bead_ratio
if bead_volume / 2 > pipette.max_volume:
mix_vol = pipette.max_volume
else:
mix_vol = bead_volume / 2
total_vol = bead_volume + sample_volume + DEAD_TOTAL_VOL
### Steps
# Mix beads and parts
for target in range(int(len(samples))):
# Aspirate beads
pipette.pick_up_tip()
pipette.aspirate(bead_volume, beads)
protocol.max_speeds.update(SLOW_HEAD_SPEEDS)
# old code:
# robot.head_speed(**SLOW_HEAD_SPEEDS, combined_speed=max(SLOW_HEAD_SPEEDS.values()))
# robot.head_speed not used in API version 2
# replaced with protocol.max_speeds
# new code no longer uses the lower value between combined speed or specified speed
# just uses each axis' specified speed directly
# Aspirte samples
pipette.aspirate(sample_volume + DEAD_TOTAL_VOL, samples[target][0])
# old code:
# pipette.aspirate(sample_volume + DEAD_TOTAL_VOL, samples[target][0])
# TypeError: location should be a Well or Location, but it is [list of all wells in column 1]
# added [0] because samples[target] returned a list of every well in column 1
# the aspirate command for multi channel pipettes takes just one well (the well furthest from the door, row A) as the position of the pipette
# Transfer and mix on mix_plate
pipette.dispense(total_vol, mixing[target][0])
# similar to above, added [0] because samples[target] returned a list of every well in column 1 rather than just one well
pipette.mix(IMMOBILISE_MIX_REPS, mix_vol, mixing[target][0])
# similar to above, added [0] because samples[target] returned a list of every well in column 1 rather than just one well
pipette.blow_out()
# Dispose of tip
protocol.max_speeds.update(DEFAULT_HEAD_SPEEDS)
# old code:
# robot.head_speed(**DEFAULT_HEAD_SPEEDS, combined_speed=max(DEFAULT_HEAD_SPEEDS.values()))
# robot.head_speed not used in API version 2
# replaced with protocol.max_speeds
# new code no longer uses the lower value between combined speed or specified speed
# just uses each axis' specified speed directly
pipette.drop_tip()
# Immobilise sample
protocol.delay(minutes=incubation_time)
# old code:
# pipette.delay(minutes=incubation_time)
# API version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
# Transfer beads+samples back to magdeck
for target in range(int(len(samples))):
pipette.transfer(total_vol, mixing[target], samples[target], blow_out=True, blowout_location='destination well')
# added blowout_location=destination well because default location of blowout is waste in API version 2
# Engagae MagDeck and incubate
MAGDECK.engage(height=MAGDECK_HEIGHT)
protocol.delay(minutes=settling_time)
# old code:
# pipette.delay(minutes=settling_time)
# API Version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
# Remove supernatant from magnetic beads
for target in samples:
pipette.transfer(total_vol, target, liquid_waste, blow_out=True)
# Wash beads twice with 70% ethanol
air_vol = pipette.max_volume * AIR_VOL_COEFF
for cycle in range(2):
for target in samples:
pipette.transfer(ETHANOL_VOL, ethanol, target, air_gap=air_vol)
protocol.delay(minutes=WASH_TIME)
# old code:
# pipette.delay(minutes=WASH_TIME)
# API Version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
for target in samples:
pipette.transfer(ETHANOL_VOL + ETHANOL_DEAD_VOL, target, liquid_waste, air_gap=air_vol)
# Dry at room temperature
protocol.delay(minutes=drying_time)
# old code:
# pipette.delay(minutes=drying_time)
# API Version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
# Disengage MagDeck
MAGDECK.disengage()
# Mix beads with elution buffer
if elution_buffer_volume / 2 > pipette.max_volume:
mix_vol = pipette.max_volume
else:
mix_vol = elution_buffer_volume / 2
for target in samples:
pipette.transfer(elution_buffer_volume, elution_buffer, target, mix_after=(ELUTION_MIX_REPS, mix_vol))
# Incubate at room temperature
protocol.delay(minutes=elution_time)
# old code:
# pipette.delay(minutes=elution_time)
# API Version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
# Engage MagDeck (remains engaged for DNA elution)
MAGDECK.engage(height=MAGDECK_HEIGHT)
protocol.delay(minutes=ELUTANT_SEP_TIME)
# old code:
# pipette.delay(minutes=ELUTANT_SEP_TIME)
# API Version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
# Transfer purified parts to a new well
for target, dest in zip(samples, output):
pipette.transfer(elution_buffer_volume - ELUTION_DEAD_VOL, target,
dest, blow_out=False)
# Disengage MagDeck
MAGDECK.disengage()
magbead(sample_number=sample_number, ethanol_well=ethanol_well)
# removed elution buffer well='A1', added that to where the function is defined
| 47.393082
| 159
| 0.618141
| 1,865
| 15,071
| 4.821984
| 0.19571
| 0.023352
| 0.025687
| 0.010119
| 0.433559
| 0.404426
| 0.340265
| 0.301901
| 0.278772
| 0.258312
| 0
| 0.022544
| 0.317165
| 15,071
| 317
| 160
| 47.542587
| 0.851326
| 0.457169
| 0
| 0.12931
| 0
| 0
| 0.045306
| 0.017235
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0
| 0.008621
| 0
| 0.025862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc6abadfa6473eb99280316b2542ed0cff6cc265
| 3,056
|
py
|
Python
|
Features/CAI.py
|
jcg/d-tailor
|
7ea83bcf7a2cda21eb8727575ff2b20ac8b49606
|
[
"BSD-2-Clause"
] | 14
|
2016-05-19T08:31:44.000Z
|
2021-08-05T08:56:56.000Z
|
Features/CAI.py
|
jcg/d-tailor
|
7ea83bcf7a2cda21eb8727575ff2b20ac8b49606
|
[
"BSD-2-Clause"
] | 1
|
2018-09-25T12:00:23.000Z
|
2018-12-10T18:42:31.000Z
|
Features/CAI.py
|
jcg/d-tailor
|
7ea83bcf7a2cda21eb8727575ff2b20ac8b49606
|
[
"BSD-2-Clause"
] | 4
|
2016-06-23T21:40:49.000Z
|
2021-02-02T03:05:35.000Z
|
'''
Created on Nov 16, 2011
@author: jcg
'''
from Features.Feature import Feature
import Functions
from uuid import uuid4
class CAI(Feature):
"""
CAI Feature
solution - solution where CAI should be computed
label - some label to append to the name
cai_range - start and end position to calculate CAI - a tuple in the form (start, end)
mutable_region - a list with all bases that can be mutated
cds_region - a pair with begin and end of CDSs - example: (0,100)
keep_aa - boolean option indicating if in the design mode amino acids should be kept
"""
def __init__(self, caiObject = None, solution = None, label="", args = { 'cai_range' : (0,59),
'mutable_region' : None,
'cds_region' : None,
'keep_aa' : True }):
if caiObject == None: #create new instance
#General properties of feature
Feature.__init__(self, solution=solution, label=label)
#Specifics of this Feature
self.cai_range = args['cai_range']
self.sequence = solution.sequence[self.cai_range[0]:(self.cai_range[1]+1)]
self.mutable_region = args['mutable_region'] if args.has_key('mutable_region') else solution.mutable_region
self.cds_region = args['cds_region'] if args.has_key('cds_region') else solution.cds_region
self.keep_aa = args['keep_aa'] if args.has_key('keep_aa') else solution.keep_aa
self.set_scores()
self.set_level()
else: #copy instance
Feature.__init__(self, caiObject)
self.cai_range = caiObject.cai_range
self.sequence = caiObject.sequence
self.mutable_region = caiObject.mutable_region
self.cds_region = caiObject.cds_region
self.keep_aa = caiObject.keep_aa
self.codons_cai = caiObject.codons_cai
self.scores = caiObject.scores
def set_scores(self, scoring_function=Functions.analyze_cai):
self.scores[self.label+"CAI"] = scoring_function(self.sequence)
def mutate(self, operator=Functions.SimpleCAIOperator):
if not self.targetInstructions:
return None
new_seq = operator(self.solution.sequence, self.cai_range, self.keep_aa, self.mutable_region, self.cds_region, self.targetInstructions['direction'])
if not new_seq:
return None
return Solution.Solution(sol_id=str(uuid4().int), sequence=new_seq, cds_region = self.cds_region, mutable_region = self.mutable_region, parent=self.solution, design=self.solution.designMethod)
import Solution
| 51.79661
| 200
| 0.571008
| 341
| 3,056
| 4.920821
| 0.313783
| 0.085221
| 0.035757
| 0.045292
| 0.123957
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009073
| 0.350785
| 3,056
| 58
| 201
| 52.689655
| 0.836694
| 0.179647
| 0
| 0.055556
| 0
| 0
| 0.050061
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.111111
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc6c1dd9f30f9947c241979383cf14113417045e
| 17,690
|
py
|
Python
|
ebot_main/scripts/task5.py
|
Jovian-Dsouza/sahayak_bot
|
3565e910218e73e95ed8ce9b9d92dfcbe537894e
|
[
"MIT"
] | 3
|
2021-10-02T10:36:48.000Z
|
2022-03-18T15:47:17.000Z
|
ebot_main/scripts/task5.py
|
Jovian-Dsouza/sahayak_bot
|
3565e910218e73e95ed8ce9b9d92dfcbe537894e
|
[
"MIT"
] | 2
|
2021-10-06T07:21:57.000Z
|
2022-03-04T03:20:27.000Z
|
ebot_main/scripts/task5.py
|
Jovian-Dsouza/sahayak_bot
|
3565e910218e73e95ed8ce9b9d92dfcbe537894e
|
[
"MIT"
] | 1
|
2021-09-06T16:34:45.000Z
|
2021-09-06T16:34:45.000Z
|
#! /usr/bin/env python
'''
This node uses the detection_info topic and performs the actual Ur5 arm manipulation
'''
import rospy
import random
from math import pi, sin, cos
from geometry_msgs.msg import Point, Quaternion, Pose, PointStamped, PoseStamped
from std_msgs.msg import Header
from object_msgs.msg import ObjectPose
from std_srvs.srv import Empty
from tf.transformations import quaternion_from_euler
from tf.transformations import euler_from_quaternion
from ebot_mani.srv import *
from testNav import Ebot
from perception.srv import *
transformPose = rospy.ServiceProxy('/get_transform_pose', GetTransformPose)
transformPoint = rospy.ServiceProxy('/get_transform_point', GetTransformPoint)
def TransformPoint(point, from_frame, to_frame):
req = GetTransformPointRequest()
req.point = point
req.from_frame = from_frame
req.to_frame = to_frame
return transformPoint(req).point
# width estimate = 0.2 + width of detection window (printed in terminal)
# w_dict uses real model names
w_dict = {'coke_can': 0.27086,
'battery': 0.26500,
'glue': 0.31,
'eYFi_board': 0.5,
'adhesive': 0.267674286664,
'water_glass': 0.2,
'robot_wheels': 0.26,
'FPGA_board': 0.3
}
def printReached(name):
print(">> " + name + " Reached")
def printPicked(name):
print(">> " + name + " Picked")
def printDropped(name, dropbox):
print(">> " + name + " Dropped in " + dropbox)
def printPoint(point):
p = point
print("create_point(%0.5f, %0.5f, %0.5f)" %
(p.x, p.y, p.z))
def create_point(x, y, z):
position = Point()
position.x = x
position.y = y
position.z = z
return position
def printPose(pose):
p = pose.position
q = pose.orientation
print("create_pose_quaternion(%0.5f, %0.5f, %0.5f, %0.5f, %0.5f, %0.5f, %0.5f)" %
(p.x, p.y, p.z, q.x, q.y, q.z, q.w))
def create_pose_quaternion(x, y, z, qx, qy, qz, qw):
'''
returns a Pose() object from the given x, y, z, qx, qy , qz, qw values
'''
pose = Pose()
pose.position.x = x
pose.position.y = y
pose.position.z = z
pose.orientation.x = qx
pose.orientation.y = qy
pose.orientation.z = qz
pose.orientation.w = qw
return pose
def orient_from_euler(roll, pitch, yaw):
'''
Input is roll, pitch, yaw
output is Quaternion pose.orientation
'''
q = quaternion_from_euler(roll, pitch, yaw)
o = Quaternion()
o.x, o.y, o.z, o.w = q[0], q[1], q[2], q[3]
return o
def createPoseStamped(point):
poseStamped = PoseStamped()
poseStamped.header.frame_id = 'base_link'
poseStamped.header.stamp = rospy.Time.now()
poseStamped.pose.position = point
poseStamped.pose.orientation.x = 0
poseStamped.pose.orientation.y = -0.7071
poseStamped.pose.orientation.z = 0
poseStamped.pose.orientation.w = 0.7071
return poseStamped
def pickupObject(object_name):
'''
Note : object_name should be the real model name and not the gazebo model name
'''
ur5.openGripper()
graspPose_pub.publish(createPoseStamped(detect.dict[object_name]))
if object_name == 'eYFi_board':
# TODO need a better way of finding the object's yaw angle instead of manually giving it
return ur5.graspObjectVertical(detect.dict[object_name], width=w_dict[object_name], yaw=pi/4).success
elif object_name == 'FPGA_board':
# return ur5.graspObjectVertical(detect.dict[object_name], width=w_dict[object_name], yaw=pi/3).success
return ur5.graspObjectHorizontal(detect.dict[object_name], width=w_dict[object_name], yaw=-pi/6)
else:
# .success
return ur5.graspObjectHorizontal(detect.dict[object_name], width=w_dict[object_name], yaw=0)
class Detect():
def __init__(self):
self.dict = {}
rospy.loginfo("waiting for detect service")
rospy.wait_for_service('/ebot/detect')
self.detectTable = rospy.ServiceProxy('/ebot/detectTable', Empty)
self.detect_service = rospy.ServiceProxy('/ebot/detect', Empty)
rospy.Subscriber("/detection_info", ObjectPose, self.detect_callback)
def print_detected(self):
for item in self.dict.keys():
print(">> " + item + " Identified")
def detect(self):
self.dict = {}
self.detect_service()
rospy.sleep(2)
self.print_detected()
def detect_callback(self, msg):
self.dict[msg.name] = msg.pose.pose.position
self.frame_id = msg.pose.header.frame_id
class Ur5():
def __init__(self):
rospy.loginfo("waiting for ur5_service")
rospy.wait_for_service('ebot_mani/set_named_pose')
rospy.wait_for_service('ebot_mani/set_pose')
rospy.wait_for_service('ebot_mani/set_gripper')
rospy.wait_for_service('ebot_mani/open_gripper')
rospy.wait_for_service('ebot_mani/grasp_object_vertical')
rospy.wait_for_service('ebot_mani/grasp_object_horizontal')
rospy.wait_for_service('ebot_mani/set_pose_relative')
rospy.loginfo("connected to services")
self.go_to_named_pose = rospy.ServiceProxy(
'ebot_mani/set_named_pose', SetNamedPose)
self.print_name_pose = rospy.ServiceProxy(
'/ebot_mani/print_name_pose', SetNamedPose)
self.go_to_pose = rospy.ServiceProxy('ebot_mani/set_pose', SetPose)
self.closeGripper = rospy.ServiceProxy(
'ebot_mani/set_gripper', SetGripper)
self.openGripper = rospy.ServiceProxy('ebot_mani/open_gripper', Empty)
self.graspObjectVerticalService = rospy.ServiceProxy(
'ebot_mani/grasp_object_vertical', GraspObject)
self.graspObjectHorizontalService = rospy.ServiceProxy(
'ebot_mani/grasp_object_horizontal', GraspObject)
self.set_pose_relative = rospy.ServiceProxy(
'ebot_mani/set_pose_relative', SetPose)
self.getCurrentPoseOdom = rospy.ServiceProxy(
'ebot_mani/get_current_pose_odom', GetPose)
self.set_pose_odom = rospy.ServiceProxy(
'ebot_mani/set_pose_odom', SetPose)
self.set_pose_wrist = rospy.ServiceProxy(
'ebot_mani/set_pose_wrist', SetPose)
self.align_wrist = rospy.ServiceProxy('ebot_mani/align_wrist', Empty)
self.set_pose_wrist_no_align = rospy.ServiceProxy(
'ebot_mani/set_pose_wrist_no_align', SetPose)
def go_to_pose_wrist(self, arg_pose):
req = SetPoseRequest()
req.pose = arg_pose
return self.set_pose_wrist(req).success
def go_to_pose_wrist_no_align(self, arg_pose):
req = SetPoseRequest()
req.pose = arg_pose
return self.set_pose_wrist_no_align(req).success
def go_to_pose_relative(self, arg_pose):
req = SetPoseRequest()
req.pose = arg_pose
return self.set_pose_relative(req).success
# def graspObjectHorizontal(self, point, width, yaw=0):
# req = GraspObjectRequest()
# req.point = point
# req.width = width
# req.yaw = yaw
# return self.graspObjectHorizontalService(req)
def graspObjectVerticalOld(self, point, width, yaw):
req = GraspObjectRequest()
req.point = point
req.width = width
req.yaw = yaw
return self.graspObjectVerticalService(req).success
def graspObjectVertical(self, point, width, yaw):
'''
Given the position of object within reach it grasps it.
Argument : position (Point msg)
'''
self.align_wrist()
req = GetTransformPointRequest()
req.point = point
req.from_frame = "base_link"
req.to_frame = "wrist_3_link"
point = transformPoint(req).point
graspPose = Pose()
graspPose.position = point
graspPose.position.x -= 0.25 * sin(yaw)
graspPose.position.y -= 0.15 # + 0.1
graspPose.position.z -= 0.12 # Should be 0.25 * sin(grasp_angle)
# Pose just Above the object
flag = self.go_to_pose_wrist(graspPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
# Set grasping angle
if yaw != 0.0:
newOPose = Pose()
newOPose.orientation = orient_from_euler(0, 0, yaw)
flag = self.go_to_pose_wrist(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
newOPose = Pose()
newOPose.orientation = orient_from_euler(0.558505, 0, 0) # 32 deg
flag = self.go_to_pose_wrist_no_align(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
newOPose = Pose()
newOPose.position.z += 0.01
flag = self.go_to_pose_wrist(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
return flag
def graspObjectHorizontal(self, point, width, yaw):
'''
Given the position of object within reach it grasps it.
Argument : position (Point msg)
'''
self.align_wrist()
req = GetTransformPointRequest()
req.point = point
req.from_frame = "base_link"
req.to_frame = "wrist_3_link"
point = transformPoint(req).point
graspPose = Pose()
graspPose.position = point
graspPose.position.x -= 0.25 * sin(yaw)
graspPose.position.y -= 0.188 # + 0.1
graspPose.position.z -= 0.07 # Should be 0.25 * sin(grasp_angle)
# Pose just Above the object
flag = self.go_to_pose_wrist(graspPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
# Set grasping angle
if yaw != 0.0:
newOPose = Pose()
newOPose.orientation = orient_from_euler(0, 0, yaw) # 32 deg
flag = self.go_to_pose_wrist(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
newOPose = Pose()
newOPose.orientation = orient_from_euler(0.558505, 0, 0) # 32 deg
flag = self.go_to_pose_wrist_no_align(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
# # #Grasp
self.closeGripper(width)
rospy.sleep(1)
newOPose = Pose()
newOPose.position.z = -0.09
flag = self.go_to_pose_wrist_no_align(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
return True
def main():
# maind()
getFPGA()
ur5.openGripper()
def maind():
ur5.go_to_named_pose("navPose")
# ebot.go_to_goal('store_table_fpga')
ebot.go_to_goal('store_table')
# ebot.go_to_goal_precise('store_table')
ebot.print_current_pose()
# detect.detectTable()
# ur5.go_to_named_pose("seeObjectJ")
ur5.go_to_named_pose("fpgaPoseOdom")
detect.detect()
object_name = 'FPGA_board'
pointBaseLink = detect.dict[object_name]
graspPose_pub.publish(createPoseStamped(pointBaseLink))
pointOdom = TransformPoint(pointBaseLink, 'base_link', 'odom')
ur5.go_to_named_pose("graspVerticalJ")
pose = Pose()
pose.position.z = 0.1
ur5.go_to_pose_relative(pose)
ebot.go_to_goal_precise('store_table_close')
ebot.go_to_waypoint_relative(0.4, 0 ,0)
pointBaseLink = TransformPoint(pointOdom,'odom', 'base_link')
graspPose_pub.publish(createPoseStamped(pointBaseLink))
detect.detectTable()
rospy.sleep(0.1)
flag = ur5.graspObjectVerticalOld(
pointBaseLink, width=w_dict[object_name], yaw=pi/3)
while flag == False:
ebot.go_to_waypoint_relative(0.2, 0, 0)
detect.detect()
flag = ur5.graspObjectVerticalOld(
pointBaseLink, width=w_dict[object_name], yaw=pi/3)
ur5.openGripper()
def getFPGAnew():
ur5.go_to_named_pose("navPose")
# ebot.go_to_goal('store_table_fpga')
ebot.go_to_goal('store_table')
ebot.go_to_goal_precise('store_table_fpga')
ebot.print_current_pose()
detect.detectTable()
ur5.go_to_named_pose("seeObjectJ")
ur5.go_to_named_pose("fpgaPoseOdom")
ebot.go_to_waypoint_relative(0.25, 0, 0)
ur5.go_to_named_pose("fpgaPoseOdom")
detect.detect()
detect.detectTable()
ur5.openGripper()
object_name = 'FPGA_board'
graspPose_pub.publish(createPoseStamped(detect.dict[object_name]))
printPoint(detect.dict[object_name])
ur5.go_to_named_pose("graspVerticalJ")
pose = Pose()
pose.position.z = 0.1
ur5.go_to_pose_relative(pose)
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
while flag == False:
ebot.go_to_waypoint_relative(0.2, 0, 0)
detect.detect()
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
ebot.go_to_pose_relative(-1, 0, 0, rospy.Duration(5))
ur5.go_to_named_pose("navPose")
ebot.go_to_goal("store_exit")
def getFPGA():
ur5.go_to_named_pose("navPose")
# ebot.go_to_goal('store_table_fpga')
ebot.go_to_goal('store_table')
ebot.go_to_goal_precise('store_table_fpga')
ebot.print_current_pose()
detect.detectTable()
ur5.go_to_named_pose("seeObjectJ")
ur5.go_to_named_pose("fpgaPoseOdom")
ebot.go_to_waypoint_relative(0.25, 0, 0)
ur5.go_to_named_pose("fpgaPoseOdom")
detect.detect()
detect.detectTable()
ur5.openGripper()
object_name = 'FPGA_board'
graspPose_pub.publish(createPoseStamped(detect.dict[object_name]))
printPoint(detect.dict[object_name])
ur5.go_to_named_pose("seeObjectJ")
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
while flag == False:
ebot.go_to_waypoint_relative(0.2, 0, 0)
detect.detect()
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
ebot.go_to_pose_relative(-1, 0, 0, rospy.Duration(5))
ur5.go_to_named_pose("navPose")
ebot.go_to_goal("store_exit")
def getGlue():
ur5.go_to_named_pose("navPose")
# TODO check if in meeting Room
# ebot.go_to_goal('meeting_entry')
# print("Entered room")
ebot.print_current_pose()
ebot.go_to_goal_precise('meeting_table')
ebot.go_to_goal('meeting_table')
print("Reached Goal")
ebot.print_current_pose()
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("meetingTable")
detect.detect()
pickupObject('glue')
ur5.go_to_named_pose("navPose")
ebot.releaseBrakes()
def enter_pantry():
ur5.go_to_named_pose("navPose")
ebot.go_to_goal('pantry_entry')
ebot.go_to_waypoint_relative(1.3, 0, 0)
printReached("pantry")
def getCoke():
enter_pantry()
ebot.go_to_goal_precise('pantry_table1')
ebot.go_to_goal('pantry_table1')
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("pantryTable1Odom")
detect.detect()
pickupObject('coke_can')
ur5.go_to_named_pose("navPoseOld")
ebot.releaseBrakes()
exit_pantry()
def exit_pantry():
# ebot.go_to_goal('pantry_exit')
# ebot.go_to_waypoint_relative(1.2,0,0)
# ebot.go_to_goal('pantry_exit_old')
ebot.go_to_goal_precise('pantry_exit')
ebot.set_yaw(pi/2)
ebot.go_to_waypoint_relative(1.2, 0, 0)
def dropbox3():
ebot.go_to_goal('research_entry')
ebot.print_current_pose()
ebot.go_to_goal('research_dropbox')
ebot.print_current_pose()
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("researchDropbox")
ur5.openGripper()
rospy.sleep(0.5)
ur5.go_to_named_pose("navPose")
ebot.releaseBrakes()
def exit_meeting():
ebot.go_to_goal_precise('meeting_exit')
ebot.go_to_goal('meeting_exit')
def enter_meeting():
ebot.go_to_goal('meeting_entry')
ebot.go_to_waypoint_relative(1, 0, 0)
def dropbox2():
ebot.go_to_goal_precise('meeting_dropbox')
# ebot.go_to_goal('meeting_dropbox')
ebot.print_current_pose()
detect.detectTable()
ur5.go_to_named_pose("researchDropboxJ")
ur5.go_to_named_pose("meetingDropboxOdom")
ur5.openGripper()
rospy.sleep(0.5)
ur5.go_to_named_pose("navPose")
# ebot.go_to_pose_relative(0.95,0,0)
def enter_conference_room():
ebot.go_to_goal('conference_entry')
ebot.go_to_waypoint_relative(1, 0, 0)
def dropbox1():
ur5.go_to_named_pose("navPose")
enter_conference_room()
ebot.go_to_goal('conference_dropbox')
ebot.print_current_pose()
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("conferenceDropbox")
ur5.openGripper()
rospy.sleep(0.5)
ur5.go_to_named_pose("navPose")
ebot.releaseBrakes()
exit_conference_room()
def exit_conference_room():
ebot.set_yaw(-3*pi/2)
ebot.go_to_waypoint_relative(1, 0, 0)
def subtask1():
getFPGA()
dropbox1()
def subtask2():
getCoke()
enter_meeting()
dropbox2()
def subtask3():
getGlue()
exit_meeting()
dropbox3()
if __name__ == '__main__':
rospy.init_node('grasping_node')
graspPose_pub = rospy.Publisher("/graspPose", PoseStamped, queue_size=1)
ur5 = Ur5()
ebot = Ebot()
detect = Detect()
# main()
getFPGA()
# subtask1()
# subtask2()
# subtask3()
# ebot.releaseBrakes()
| 28.624595
| 111
| 0.656981
| 2,307
| 17,690
| 4.789337
| 0.124404
| 0.032582
| 0.032582
| 0.036474
| 0.629559
| 0.582858
| 0.514436
| 0.493981
| 0.452801
| 0.43651
| 0
| 0.022219
| 0.229112
| 17,690
| 617
| 112
| 28.670989
| 0.788003
| 0.097456
| 0
| 0.46875
| 0
| 0.002404
| 0.119858
| 0.031831
| 0
| 0
| 0
| 0.003241
| 0
| 1
| 0.096154
| false
| 0
| 0.028846
| 0
| 0.182692
| 0.067308
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc6ca3b5cf9fc18b22965b52903b9910ca3fbede
| 2,224
|
py
|
Python
|
attacks.py
|
giuscri/thesis
|
d7aa0a8476f53ad304495b437841af1a8d6c87d4
|
[
"MIT"
] | null | null | null |
attacks.py
|
giuscri/thesis
|
d7aa0a8476f53ad304495b437841af1a8d6c87d4
|
[
"MIT"
] | 10
|
2018-05-11T08:40:48.000Z
|
2018-06-29T16:14:27.000Z
|
attacks.py
|
giuscri/thesis
|
d7aa0a8476f53ad304495b437841af1a8d6c87d4
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from cleverhans.attacks import FastGradientMethod
from cleverhans.utils_keras import KerasModelWrapper
from keras.utils import to_categorical
import keras.backend as K
import numpy as np
from functools import lru_cache
from pickle import loads, dumps
from models import filter_correctly_classified_examples
@lru_cache()
def __fast_gradient_sign_tf_symbols(model, serializedX, serializedy_target):
X = loads(serializedX)
y_target = loads(serializedy_target)
cleverhans_model = KerasModelWrapper(model)
attack = FastGradientMethod(cleverhans_model)
X_sym = tf.placeholder(tf.float32, shape=model.input.shape)
eta_sym = tf.placeholder(tf.float32)
if y_target is not None:
one_hot_y_target_sym = tf.placeholder(tf.float32, shape=model.output.shape)
else:
one_hot_y_target_sym = None
kwargs = {"eps": eta_sym, "ord": np.inf, "clip_min": 0., "clip_max": 1.}
if y_target is not None:
kwargs["y_target"] = one_hot_y_target_sym
example_sym = attack.generate(X_sym, **kwargs)
return X_sym, one_hot_y_target_sym, example_sym, eta_sym
def adversarial_example(model, X, y_target=None, eta=0.15):
assert y_target is None or len(y_target) == len(X)
with_target = y_target is not None
if with_target:
num_classes = model.output.shape.as_list()[-1]
one_hot_y_target = to_categorical(y_target, num_classes=num_classes)
serializedX = dumps(X)
serializedy_target = dumps(y_target)
symbols = __fast_gradient_sign_tf_symbols(model, serializedX, serializedy_target)
X_sym, one_hot_y_target_sym, example_sym, eta_sym = symbols
session = K.get_session()
feed_dict = {X_sym: X, eta_sym: eta}
if with_target:
feed_dict[one_hot_y_target_sym] = one_hot_y_target
return session.run(example_sym, feed_dict=feed_dict)
def adversarial_score(model, X_test, y_test, eta=None, y_target=None):
X, y = filter_correctly_classified_examples(model, X_test, y_test)
adversarialX = adversarial_example(model, X, y_target, eta)
fooling_examples, _ = filter_correctly_classified_examples(model, adversarialX, y)
score = 1 - len(fooling_examples) / len(X)
return score
| 34.215385
| 86
| 0.748201
| 330
| 2,224
| 4.70303
| 0.254545
| 0.090206
| 0.036082
| 0.06701
| 0.375644
| 0.246778
| 0.183634
| 0.121778
| 0.121778
| 0.121778
| 0
| 0.007023
| 0.167716
| 2,224
| 64
| 87
| 34.75
| 0.831442
| 0
| 0
| 0.085106
| 0
| 0
| 0.013489
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 1
| 0.06383
| false
| 0
| 0.191489
| 0
| 0.319149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc749c4fcb4807611aed92e6a739be63d7aba80c
| 1,746
|
py
|
Python
|
demos/matplotlib/gradient_chart.py
|
0lru/p3ui
|
162c6c68f4a55ec109a593c8ced66a62520b5602
|
[
"MIT"
] | 21
|
2021-07-22T21:33:01.000Z
|
2022-02-12T15:17:46.000Z
|
demos/matplotlib/gradient_chart.py
|
0lru/p3ui
|
162c6c68f4a55ec109a593c8ced66a62520b5602
|
[
"MIT"
] | 3
|
2021-07-26T19:00:39.000Z
|
2021-12-12T09:29:09.000Z
|
demos/matplotlib/gradient_chart.py
|
0lru/p3ui
|
162c6c68f4a55ec109a593c8ced66a62520b5602
|
[
"MIT"
] | 2
|
2021-07-23T04:57:21.000Z
|
2021-12-15T22:51:45.000Z
|
from p3ui import *
import matplotlib.pyplot as plt
import numpy as np
def gradient_image(ax, extent, direction=0.3, cmap_range=(0, 1), **kwargs):
phi = direction * np.pi / 2
v = np.array([np.cos(phi), np.sin(phi)])
X = np.array([[v @ [1, 0], v @ [1, 1]],
[v @ [0, 0], v @ [0, 1]]])
a, b = cmap_range
X = a + (b - a) / X.max() * X
im = ax.imshow(X, extent=extent, interpolation='bicubic',
vmin=0, vmax=1, **kwargs)
return im
def gradient_bar(ax, x, y, width=0.5, bottom=0):
for left, top in zip(x, y):
right = left + width
gradient_image(ax, extent=(left, right, bottom, top),
cmap=plt.cm.Blues_r, cmap_range=(0, 0.8))
class GradientChart(MatplotlibSurface):
# https://matplotlib.org/stable/gallery/lines_bars_and_markers/bar_stacked.html#sphx-glr-gallery-lines-bars-and-markers-bar-stacked-py
def __init__(self, **kwargs):
width = kwargs.pop('width', (auto, 1, 1))
height = kwargs.pop('height', (auto, 1, 1))
super().__init__(width=width, height=height, **kwargs)
self._update()
def _update(self):
with self as figure:
np.random.seed(19680801)
figure.clear()
ax = figure.add_subplot()
ax.set(xlim=(0, 10), ylim=(0, 1), autoscale_on=False)
gradient_image(ax, direction=1, extent=(0, 1, 0, 1), transform=ax.transAxes,
cmap=plt.cm.RdYlGn, cmap_range=(0.2, 0.8), alpha=0.5)
N = 10
x = np.arange(N) + 0.15
y = np.random.rand(N)
gradient_bar(ax, x, y, width=0.7)
ax.set_aspect('auto')
async def update(self):
self._update()
| 34.92
| 138
| 0.555556
| 256
| 1,746
| 3.679688
| 0.402344
| 0.010616
| 0.047771
| 0.044586
| 0.121019
| 0.121019
| 0.121019
| 0
| 0
| 0
| 0
| 0.0456
| 0.284078
| 1,746
| 49
| 139
| 35.632653
| 0.708
| 0.075029
| 0
| 0.051282
| 0
| 0
| 0.013648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.076923
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc757cd407448942e86aaa6d46fed30c9d25b05b
| 816
|
py
|
Python
|
Stack/StackOperations.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
Stack/StackOperations.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
Stack/StackOperations.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
from collections import deque
"""
Operations
* Enqueue
* Dequeue
* Peek (first element)
* Size
* IsEmpty
* Print
"""
def demo_queue_operation_using_deque():
stack = deque()
# enqueue - inserting at the Right
stack.append(1)
stack.append(2)
stack.append(3)
stack.append(4)
stack.append(5)
stack.append(6)
print(stack) # Print
print(stack[-1]) # Last Element / Top
print(len(stack)) # Length
print(len(stack) == 0) # isEmpty
# enqueue - Popping from Right
stack.pop()
stack.pop()
print(stack) # Print
print(stack[0]) # First Element / Top
print(len(stack)) # Length
print(len(stack) == 0) # isEmpty
if __name__ == '__main__':
demo_queue_operation_using_deque()
pass
"""
deque([1, 2, 3, 4, 5])
deque([3, 4, 5])
3
"""
| 16.653061
| 42
| 0.610294
| 108
| 816
| 4.462963
| 0.37963
| 0.136929
| 0.107884
| 0.095436
| 0.427386
| 0.207469
| 0.207469
| 0.207469
| 0.207469
| 0.207469
| 0
| 0.030844
| 0.245098
| 816
| 48
| 43
| 17
| 0.751623
| 0.17402
| 0
| 0.363636
| 0
| 0
| 0.015152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0.045455
| 0.045455
| 0
| 0.090909
| 0.363636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc7685e77b5702f2bcf63b723074355167f96c83
| 7,664
|
py
|
Python
|
app/data_model/find_songs.py
|
TemsyChen/Spotifinder
|
b069ffcd63bd7654e1afd51cde3288c9678d121a
|
[
"MIT"
] | null | null | null |
app/data_model/find_songs.py
|
TemsyChen/Spotifinder
|
b069ffcd63bd7654e1afd51cde3288c9678d121a
|
[
"MIT"
] | 3
|
2021-04-23T22:52:32.000Z
|
2021-04-26T04:02:51.000Z
|
app/data_model/find_songs.py
|
TemsyChen/Spotifinder
|
b069ffcd63bd7654e1afd51cde3288c9678d121a
|
[
"MIT"
] | 6
|
2021-04-28T21:14:10.000Z
|
2021-10-15T01:47:55.000Z
|
'''
Contains the implementation of the FindSongs class.
'''
from re import compile as rcompile
from zipfile import ZipFile
from os.path import dirname
import pandas as pd
from tensorflow.keras.models import load_model
from sklearn.neighbors import NearestNeighbors
from joblib import load
DIR = dirname(__file__)
rex = rcompile('[^a-zA-Z 0-9]')
tokenize = lambda x: rex.sub('', x.lower().replace(',', ' ').replace('-', ' '))
MODELS_DIR = DIR + '/../../models/'
DATA_DIR = DIR + '/../../data/'
ENCODER = 'encoder.h5'
FG_ENCODER = 'fg_encoder.h5'
ENCODER_PATH = MODELS_DIR + ENCODER + '.zip'
ENCODED_DTM = MODELS_DIR + 'encoded_dtm.pkl'
TFIDF = MODELS_DIR + 'tfidf.pkl'
FG_ENCODER_PATH = MODELS_DIR + FG_ENCODER
FG_ENCODED_DF = MODELS_DIR + 'fg_encoded_df.pkl'
GENRES_TFIDF = MODELS_DIR + 'genres_tfidf.pkl'
SCALER = MODELS_DIR + 'scaler.pkl'
TRACKS = DATA_DIR + 'tracks_genres_lyrics_en.csv.zip'
class FindSongs():
'''
This class implements 3 methods:
(1) find_song_entries - Given a song suggestion string containing partial/whole song name
and/or artist, returns a dataframe of possible matches
(2) find_song_entry - Given a song suggestion string returns either a dataframe of
possible matches(if the best_choice kw argument is False) or a single entry(if the
best_choice argumen is True - this is the default value)
(3) get_recommendations - Given a song entry returns a dataframe of songs that are
similar.
'''
def __init__(self):
# Extract encoder.h5 from encoder.h5.zip
with ZipFile(ENCODER_PATH, 'r') as zipObj:
zipObj.extractall()
# Load the model saved in ../../models/encoder.h5
self.encoder = load_model(ENCODER)
# Load the TfIDF vectorizer saved in tfidf.pkl
self.tfidf = load(TFIDF)
# Load the encoded DTM saved in encoded_dtm.pkl
self.encoded_dtm = load(ENCODED_DTM)
# Fit NearestNeighbors on encoded DTM
self.nn = NearestNeighbors(n_neighbors=5, algorithm='ball_tree')
self.nn.fit(self.encoded_dtm)
# Numerical features associated with a song entry
self.features = [
'popularity', 'duration_ms', 'explicit', 'danceability',
'energy', 'key', 'loudness', 'mode', 'speechiness',
'acousticness', 'instrumentalness', 'liveness', 'valence',
'tempo', 'time_signature'
]
# Load the model saved in fg_encoder.h5
self.fg_encoder = load_model(FG_ENCODER_PATH)
# Load the TfIDF vectorizer for genres data saved in genres_tfidf.pkl
self.genres_tfidf = load(GENRES_TFIDF)
# The original DF is DTM generated by genres_tfidf from genres data
# in the dataset + Numerical features
# Load the encoded DF from fg_encoded_df.pkl
self.fg_encoded_df = load(FG_ENCODED_DF)
# Load the StandardScaler saved at scaler.pkl
self.scaler = load(SCALER)
# Fit NearestNeighbors on encoded DF
self.fg_nn = NearestNeighbors(n_neighbors=5, algorithm='ball_tree')
self.fg_nn.fit(self.fg_encoded_df)
# Load tracks_df from zipped csv file tracks_genres_lyrics_en.csv.zip
self.tracks_df = pd.read_csv(TRACKS)
# Get rid of superfluous columns and rows
self.tracks_df.drop(columns=['Unnamed: 0'], inplace=True)
self.tracks_df = self.tracks_df[self.tracks_df.genres.isna() == False]
def find_song_entries(self, sugg_str):
'''
Given sugg_str(a string containing part/whole of the
song's name and/or artist) returns a dataframe of
song entries that are the closest matches.
'''
# Vectorize the sugg_str by running it through tfidf
vec = self.tfidf.transform([tokenize(sugg_str)]).todense()
# Reduce dimensionality by running through encoder
encoded_vec = self.encoder.predict(vec)
# Get list of indices of entries that are closest to sugg_str
entries = self.nn.kneighbors(encoded_vec)[1][0].tolist()
# Get the list of indices of closest matches sorted in descending
# order of popularity i.e. the first entry will have the highest
# popularity value
entries = self.tracks_df.iloc[entries].popularity.\
sort_values(ascending=False).index.tolist()
# Return a dataframe containing the entries
return self.tracks_df.loc[entries]
def find_song_entry(self, sugg_str, best_choice=True):
'''
Given sugg_str(a string containing part/whole of the
song's name and/or artist) returns either a dataframe of
song entries that are the closest matches(best_choice=False)
or a single song entry(best_choice=True)
'''
# Get dataframe of song entries that are closest match
# to sugg_str which is a string containing part/whole
# of the song's name and/or artist.
df = self.find_song_entries(sugg_str)
# Convert sugg_str to a set of tokens
sugg_set = set(tokenize(sugg_str).split())
# Get the list of index values for the dataframe
choice = df.index.tolist()
if best_choice:
# The caller wants just one entry for the best match
# Given index value of a song entry row, returns a set of tokens from the combined
# name and artists columns.
# The array syntax ['name'] is used in place of the dot syntax .name because
# .name returns the value from the index column
name_artists = lambda x: set(tokenize(df.loc[x]['name']+' '+df.loc[x].artists).split())
# Given a set of tokens, it returns the length of its intersection with the sugg_set
# This is used as a measure how similar the input is to the sugg_set - the larger the
# return value, the greater the similarity
score_func = lambda x: len(sugg_set.intersection(x))
choices = [(y, name_artists(y)) for y in choice]
best_idx = 0
best_score = score_func(choices[0][1])
for idx, nm_art in enumerate(choices[1:]):
score = score_func(nm_art[1])
#print(f'{nm_art[1]}/{choices[best_idx][1]}/{sugg_set}:: {score}/{best_score}')
if score > best_score:
best_score = score
best_idx = idx+1
choice = choices[best_idx][0]
return df.loc[choice]
def get_recommendations(self, x):
'''
Given a song entry x, returns a dataframe of similar songs.
The similarity is determined based on the numerical features(detailed
in self.features) along with genres feature.
'''
# Convert the genres feature to a vector
gvec = self.genres_tfidf.transform([tokenize(x.genres)]).todense()
# Standardize the numerical features
fvec = self.scaler.transform([x[self.features]])
# Combine bot vectors to create a single features vector
vec = [fvec.tolist()[0] + gvec.tolist()[0]]
# Perform dimensionality reduction by running through fg_encoder
encoded_vec = self.fg_encoder.predict(vec)
# Get the list of indices of entries that are closest to
# the input entry
entries = self.fg_nn.kneighbors(encoded_vec)[1][0].tolist()
# Sort the list of indices in descending order of popularity
entries = self.tracks_df.iloc[entries].popularity.\
sort_values(ascending=False).index.tolist()
# Return a dataframe containing the sorted list of entries.
return self.tracks_df.loc[entries]
| 41.204301
| 99
| 0.656315
| 1,056
| 7,664
| 4.629735
| 0.233902
| 0.01575
| 0.02209
| 0.012272
| 0.250358
| 0.199836
| 0.170382
| 0.143792
| 0.136838
| 0.100839
| 0
| 0.005262
| 0.256133
| 7,664
| 185
| 100
| 41.427027
| 0.852307
| 0.429541
| 0
| 0.075949
| 0
| 0
| 0.081146
| 0.007464
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050633
| false
| 0
| 0.088608
| 0
| 0.189873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc7c0a236ab84be9186e7062cc86b20f502b6f05
| 7,270
|
py
|
Python
|
src/academia_tag_recommender/classwise_classifier.py
|
gefei-htw/academia_tag_recommender
|
aea56d36e16584824ef217d1f2caaee3414098f8
|
[
"MIT"
] | null | null | null |
src/academia_tag_recommender/classwise_classifier.py
|
gefei-htw/academia_tag_recommender
|
aea56d36e16584824ef217d1f2caaee3414098f8
|
[
"MIT"
] | null | null | null |
src/academia_tag_recommender/classwise_classifier.py
|
gefei-htw/academia_tag_recommender
|
aea56d36e16584824ef217d1f2caaee3414098f8
|
[
"MIT"
] | 1
|
2021-01-29T19:41:47.000Z
|
2021-01-29T19:41:47.000Z
|
"""This module handles classifier calculation."""
from academia_tag_recommender.definitions import MODELS_PATH
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, recall_score
from sklearn.model_selection import StratifiedKFold
from joblib import dump, load
from pathlib import Path
import random
import numpy as np
DATA_FOLDER = Path(MODELS_PATH) / 'experimental_classifier' / 'classwise'
SAMPLE_RATIO = 1 / 25
RANDOM_STATE = 0
random.seed(RANDOM_STATE)
scorer = make_scorer(recall_score)
k_fold = StratifiedKFold(shuffle=True, random_state=RANDOM_STATE)
class ClasswiseClassifier:
"""The BR Classwise Classifier that is capable of of grid search and undersampling.
Attributes:
name: The experimental classifiers name as :class:`str`.
path: The experimental classifiers path on the disc as :class:`pathlib.Path`.
classifier_options: The options for grid search as :class:`list(ClassifierOption)`.
path: The path where the individual base classifiers are stored as :class:`pathlib.Path`.
undersample: If True undersampling is used.
"""
def __init__(self, name, classifier_options, folder_path, undersample=False):
self.name = name
self.classifier_options = classifier_options
self.path = DATA_FOLDER / folder_path
Path.mkdir(self.path, exist_ok=True)
self.undersample = undersample
def fit(self, X, y):
"""Fit classifier to given data.
Args:
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The classifier as :class:`ClasswiseClassifier`.
"""
self._clfs = []
for y_i, _ in enumerate(y[0]):
y_train = y[:, y_i]
if self.undersample:
X_sample, y_sample = self._undersample(X, y_train)
else:
X_sample, y_sample = X, y_train
clf = self._choose_classifier(X_sample, y_sample)
path = self._dump_clf(clf, y_i)
self._clfs.append(path)
return self
def _positive_samples(self, X, y):
"""Extract only positive samples.
Args:
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The positive samples as :class:`list`.
"""
i_positive = [i for i, _ in enumerate(X) if y[i]]
return random.sample(i_positive, len(i_positive))
def _negative_samples(self, X, y, n_pos):
"""Extract negative samples with adjusted ratio to positive samples.
Args:
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The negative samples as :class:`list`.
"""
i_negative = [i for i, _ in enumerate(X) if not y[i]]
n_neg = min(len(i_negative), round(n_pos / SAMPLE_RATIO))
return random.sample(i_negative, n_neg)
def _undersample(self, X, y):
"""Reduce X and y to an adjusted ratio of positive and negative samples.
Args:
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The adjusted samples as :class:`list`.
"""
i_pos = self._positive_samples(X, y)
i_neg = self._negative_samples(X, y, len(i_pos))
i = i_pos + i_neg
return np.array(X)[i], np.array(y)[i]
def _choose_classifier(self, X, y):
"""Find the best fitting classifier.
Args:
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The best classifier.
"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, test_size=0.25, random_state=RANDOM_STATE)
clfs = [self._fit_clf(clf_option, X_train, y_train)
for clf_option in self.classifier_options]
clf = self._get_best_clf(clfs, X_test, y_test)
return clf
def _fit_clf(self, clf_option, X, y):
"""Train classifiers as defined by the option.
Args:
clf_option:
The classifier that should be trained as :class:`ClassifierOption`.
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The trained classifier.
"""
if clf_option.grid_search:
return GridSearchCV(clf_option.clf, clf_option.parameter, cv=k_fold, scoring=scorer).fit(X, y).best_estimator_
else:
return clf_option.clf.fit(X, y)
def _get_best_clf(self, clfs, X, y):
"""Calculate scores for each classifier and return best.
Args:
clfs:
The classifiers to choose from as :class:`list`.
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The best classifier.
"""
clf_scores = [(clf, self._score_clf(clf, X, y)) for clf in clfs]
clf = sorted(clf_scores, key=lambda x: x[1], reverse=True)[0][0]
return clf
def _score_clf(self, clf, X, y):
"""Calculate score using the predicted labels by given classifier.
Args:
clfs:
The classifiers to use.
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The score as :class:`float`.
"""
prediction = clf.predict(X)
score = recall_score(y, prediction)
return score
def _dump_clf(self, clf, i):
"""Store a classifier on the disc.
Args:
clfs:
The classifiers to store.
i:
Number of the label the classifier handles as :class:`int`.
Returns:
The path where the classifier was stored as :class:`joblib.Path`.
"""
path = self.path / (self.name + '_classifier_' + str(i) + '.joblib')
dump(clf, path)
return path
def predict(self, X):
"""Predict labels based on X.
Args:
X:
The samples as :class:`list`.
Returns:
The prediction as :class:`list`.
"""
prediction = []
for path in self._clfs:
clf = load(path)
prediction.append(clf.predict(X))
return np.transpose(prediction)
def __str__(self):
return self.name
class ClassifierOption:
"""A classifier and optional gridsearch parameters.
Attributes:
clf: The classifier.
grid_search: If True gridsearch will be used.
parameter: The parameter to test while gridsearching.
"""
def __init__(self, clf, grid_search=False, parameter={}):
self.clf = clf
self.grid_search = grid_search
self.parameter = parameter
| 31.068376
| 122
| 0.573315
| 889
| 7,270
| 4.528684
| 0.183352
| 0.0539
| 0.062842
| 0.053651
| 0.205912
| 0.148286
| 0.148286
| 0.132389
| 0.132389
| 0.132389
| 0
| 0.002277
| 0.335488
| 7,270
| 233
| 123
| 31.201717
| 0.831091
| 0.392572
| 0
| 0.048193
| 0
| 0
| 0.014096
| 0.006357
| 0
| 0
| 0
| 0
| 0
| 1
| 0.156627
| false
| 0
| 0.108434
| 0.012048
| 0.433735
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc8419824483cd3b9a3d2944955bdaa815ca7106
| 2,332
|
py
|
Python
|
Lib/site-packages/node/tests/test_nodespace.py
|
Dr8Ninja/ShareSpace
|
7b445783a313cbdebb1938e824e98370a42def5f
|
[
"MIT"
] | 11
|
2015-04-02T17:47:44.000Z
|
2020-10-26T20:27:43.000Z
|
Lib/site-packages/node/tests/test_nodespace.py
|
Dr8Ninja/ShareSpace
|
7b445783a313cbdebb1938e824e98370a42def5f
|
[
"MIT"
] | 5
|
2017-01-18T11:05:42.000Z
|
2019-03-30T06:19:21.000Z
|
Lib/site-packages/node/tests/test_nodespace.py
|
Dr8Ninja/ShareSpace
|
7b445783a313cbdebb1938e824e98370a42def5f
|
[
"MIT"
] | 2
|
2015-09-15T06:50:22.000Z
|
2016-12-01T11:12:01.000Z
|
from node.behaviors import Adopt
from node.behaviors import DefaultInit
from node.behaviors import Nodespaces
from node.behaviors import Nodify
from node.behaviors import OdictStorage
from node.tests import NodeTestCase
from odict import odict
from plumber import plumbing
###############################################################################
# Mock objects
###############################################################################
@plumbing(
Adopt,
Nodespaces,
Nodify,
OdictStorage)
class NodespacesNode(odict):
pass
@plumbing(
Adopt,
Nodify,
DefaultInit,
OdictStorage)
class SomeNode(object):
pass
###############################################################################
# Tests
###############################################################################
class TestNodespace(NodeTestCase):
def test_Nodespaces(self):
node = NodespacesNode()
self.assertTrue(isinstance(node.nodespaces, odict))
self.assertEqual(node.nodespaces['__children__'], node)
child = node['__children__']['child'] = SomeNode()
self.assertEqual(node['child'], child)
self.assertTrue(node['__children__']['child'] is node['child'])
foo = node['__foo__'] = SomeNode()
self.assertEqual(node['__foo__'], foo)
child = node['__foo__']['child'] = SomeNode()
self.assertEqual(node['__foo__']['child'], child)
self.assertFalse(node['__foo__']['child'] is node['child'])
self.assertEqual(len(node.nodespaces), 2)
self.assertEqual(node.nodespaces['__children__'], node)
self.assertEqual(node.nodespaces['__foo__'], foo)
def __getitem__fails():
node['__inexistent__']
err = self.expectError(KeyError, __getitem__fails)
self.assertEqual(str(err), '\'__inexistent__\'')
def __getitem__fails2():
node['inexistent']
err = self.expectError(KeyError, __getitem__fails2)
self.assertEqual(str(err), '\'inexistent\'')
del node['child']
self.assertEqual(node.keys(), [])
self.assertEqual(list(node['__foo__'].keys()), ['child'])
del node['__foo__']
self.assertEqual(len(node.nodespaces), 1)
self.assertEqual(list(node.nodespaces.keys()), ['__children__'])
| 29.15
| 79
| 0.565609
| 206
| 2,332
| 6.029126
| 0.228155
| 0.157005
| 0.107085
| 0.092593
| 0.321256
| 0.141707
| 0.075684
| 0
| 0
| 0
| 0
| 0.002101
| 0.183533
| 2,332
| 79
| 80
| 29.518987
| 0.65021
| 0.007719
| 0
| 0.230769
| 0
| 0
| 0.096241
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 1
| 0.057692
| false
| 0.038462
| 0.153846
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc85a942cbf7b59c5567afa83b5cd9d0434c51dd
| 3,673
|
py
|
Python
|
extract/river/extract_rivers.py
|
parkermac/LO
|
09e0197de7f2166bfa835ec62018b7a8fbfa7379
|
[
"MIT"
] | 1
|
2022-01-31T23:12:22.000Z
|
2022-01-31T23:12:22.000Z
|
extract/river/extract_rivers.py
|
parkermac/LO
|
09e0197de7f2166bfa835ec62018b7a8fbfa7379
|
[
"MIT"
] | 1
|
2022-03-18T23:51:54.000Z
|
2022-03-21T18:02:44.000Z
|
extract/river/extract_rivers.py
|
parkermac/LO
|
09e0197de7f2166bfa835ec62018b7a8fbfa7379
|
[
"MIT"
] | null | null | null |
"""
Extract as-run river time series.
To test on mac:
run extract_rivers -gtx cas6_v3_lo8b -0 2019.07.04 -1 2019.07.04
To run on perigee:
run extract_rivers -gtx cas6_v3_lo8b -0 2018.01.01 -1 2018.01.10
run extract_rivers -gtx cas6_v3_lo8b -0 2018.01.01 -1 2018.12.31
Performance: takes 23 sec per year on perigee
Modified to include all NPZD tracers, and package the results as
an xarray Dataset.
***
NOTE: this is hard-coded to LiveOcean_output / [gtag] / riv2 so it
pretty specific to the cas6_v3_lo8b run. Also, it expects to find all
the NPZDOC variables.
***
"""
from lo_tools import Lfun, zrfun
from lo_tools import extract_argfun as exfun
Ldir = exfun.intro() # this handles the argument passing
from datetime import datetime, timedelta
from time import time
import numpy as np
import pandas as pd
import xarray as xr
from pathlib import Path
ds0 = Ldir['ds0']
ds1 = Ldir['ds1']
tt0 = time()
# long list of variables to extract
vn_list = ['transport', 'salt', 'temp', 'oxygen',
'NO3', 'phytoplankton', 'zooplankton', 'detritus', 'Ldetritus',
'TIC', 'alkalinity']
print(' Doing river extraction for '.center(60,'='))
print(' gtag = ' + Ldir['gtag'])
outname = 'extraction_' + ds0 + '_' + ds1 + '.nc'
# make sure the output directory exists
out_dir = Ldir['LOo'] / 'pre' / 'river' / Ldir['gtag'] / 'Data_roms'
Lfun.make_dir(out_dir)
out_fn = out_dir / outname
out_fn.unlink(missing_ok=True)
dt0 = datetime.strptime(ds0, Lfun.ds_fmt)
dt1 = datetime.strptime(ds1, Lfun.ds_fmt)
ndays = (dt1-dt0).days + 1
# make mds_list: list of datestrings (e.g. 2017.01.01) to loop over
mds_list = []
mdt = dt0
while mdt <= dt1:
mds_list.append(datetime.strftime(mdt, Lfun.ds_fmt))
mdt = mdt + timedelta(days=1)
# get list of river names
# (this is a bit titchy because of NetCDF 3 limitations on strings, forcing them
# to be arrays of characters)
mds = mds_list[0]
fn = Path('/boildat1').absolute() / 'parker' / 'LiveOcean_output' / Ldir['gtag'] / ('f' + mds) / 'riv2' / 'rivers.nc'
ds = xr.open_dataset(fn)
rn = ds['river_name'].values
NR = rn.shape[1]
riv_name_list = []
for ii in range(NR):
a = rn[:,ii]
r = []
for l in a:
r.append(l.decode())
rr = ''.join(r)
riv_name_list.append(rr)
ds.close()
NT = len(mds_list)
nanmat = np.nan * np.ones((NT, NR))
v_dict = dict()
for vn in vn_list:
v_dict[vn] = nanmat.copy()
tt = 0
for mds in mds_list:
fn = Path('/boildat1').absolute() / 'parker' / 'LiveOcean_output' / Ldir['gtag'] / ('f' + mds) / 'riv2' / 'rivers.nc'
ds = xr.open_dataset(fn)
# The river transport is given at noon of a number of days surrounding the forcing date.
# Here we find the index of the time for the day "mds".
RT = pd.to_datetime(ds['river_time'].values)
mdt = datetime.strptime(mds, Lfun.ds_fmt) + timedelta(hours=12)
mask = RT == mdt
for vn in vn_list:
if vn == 'transport':
v_dict[vn][tt,:] = ds['river_' + vn][mask,:]
else:
# the rest of the variables allow for depth variation, but we
# don't use this, so, just use the bottom value
v_dict[vn][tt,:] = ds['river_' + vn][mask,0,:]
ds.close()
tt += 1
# make transport positive
v_dict['transport'] = np.abs(v_dict['transport'])
# store output in an xarray Dataset
mdt_list = [(datetime.strptime(item, Lfun.ds_fmt) + timedelta(hours=12)) for item in mds_list]
times = pd.Index(mdt_list)
x = xr.Dataset(coords={'time': times,'riv': riv_name_list})
for vn in vn_list:
v = v_dict[vn]
x[vn] = (('time','riv'), v)
x.to_netcdf(out_fn)
x.close()
print('Total time for extraction = %d seconds' % (time() - tt0))
| 28.253846
| 121
| 0.659406
| 600
| 3,673
| 3.925
| 0.371667
| 0.020807
| 0.019108
| 0.024204
| 0.177919
| 0.172399
| 0.139278
| 0.139278
| 0.107856
| 0.107856
| 0
| 0.038031
| 0.198203
| 3,673
| 129
| 122
| 28.472868
| 0.76163
| 0.319902
| 0
| 0.123288
| 0
| 0
| 0.151454
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.109589
| 0
| 0.109589
| 0.041096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc884c667af3244727045d36ce0f98e65a2527d2
| 6,361
|
py
|
Python
|
laia/models/kws/dortmund_phocnet.py
|
basbeu/PyLaia
|
d14458484b56622204b1730a7d53220c5d0f1bc1
|
[
"MIT"
] | 2
|
2020-09-10T13:31:17.000Z
|
2021-07-31T09:44:17.000Z
|
laia/models/kws/dortmund_phocnet.py
|
basbeu/PyLaia
|
d14458484b56622204b1730a7d53220c5d0f1bc1
|
[
"MIT"
] | 1
|
2020-12-06T18:11:52.000Z
|
2020-12-06T18:19:38.000Z
|
laia/models/kws/dortmund_phocnet.py
|
basbeu/PyLaia
|
d14458484b56622204b1730a7d53220c5d0f1bc1
|
[
"MIT"
] | 2
|
2020-04-20T13:40:56.000Z
|
2020-10-17T11:59:55.000Z
|
from __future__ import absolute_import
import math
import operator
from collections import OrderedDict
from functools import reduce
from typing import Union, Sequence, Optional
import torch
from laia.data import PaddedTensor
from laia.nn.pyramid_maxpool_2d import PyramidMaxPool2d
from laia.nn.temporal_pyramid_maxpool_2d import TemporalPyramidMaxPool2d
class Identity(torch.nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def build_conv_model(unittest=False):
model = torch.nn.Sequential(
OrderedDict(
[
# conv1_1
("conv1_1", torch.nn.Conv2d(1, 64, kernel_size=3, padding=1)),
("relu1_1", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv1_2
("conv1_2", torch.nn.Conv2d(64, 64, kernel_size=3, padding=1)),
("relu1_2", Identity() if unittest else torch.nn.ReLU(inplace=True)),
("maxpool1", torch.nn.MaxPool2d(2, ceil_mode=True)),
# conv2_1
("conv2_1", torch.nn.Conv2d(64, 128, kernel_size=3, padding=1)),
("relu2_1", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv2_2
("conv2_2", torch.nn.Conv2d(128, 128, kernel_size=3, padding=1)),
("relu2_2", Identity() if unittest else torch.nn.ReLU(inplace=True)),
("maxpool2", torch.nn.MaxPool2d(2, ceil_mode=True)),
# conv3_1
("conv3_1", torch.nn.Conv2d(128, 256, kernel_size=3, padding=1)),
("relu3_1", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv3_2
("conv3_2", torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)),
("relu3_2", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv3_3
("conv3_3", torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)),
("relu3_3", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv3_4
("conv3_4", torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)),
("relu3_4", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv3_5
("conv3_5", torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)),
("relu3_5", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv3_6
("conv3_6", torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)),
("relu3_6", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv4_1
("conv4_1", torch.nn.Conv2d(256, 512, kernel_size=3, padding=1)),
("relu4_1", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv4_2
("conv4_2", torch.nn.Conv2d(512, 512, kernel_size=3, padding=1)),
("relu4_2", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv4_3
("conv4_3", torch.nn.Conv2d(512, 512, kernel_size=3, padding=1)),
("relu4_3", Identity() if unittest else torch.nn.ReLU(inplace=True)),
]
)
)
return model
def size_after_conv(xs):
# type: (torch.Tensor) -> torch.Tensor
xs = xs.float()
xs = torch.ceil(xs / 2.0)
xs = torch.ceil(xs / 2.0)
return xs.long()
class DortmundPHOCNet(torch.nn.Module):
def __init__(
self, phoc_size, tpp_levels=range(1, 6), spp_levels=None, unittest=False
):
# type: (int, Optional[Sequence[int]], Optional[Sequence[int]], bool) -> None
super(DortmundPHOCNet, self).__init__()
assert tpp_levels or spp_levels
if tpp_levels is None:
tpp_levels = []
if spp_levels is None:
spp_levels = []
self.conv = build_conv_model(unittest=unittest)
self.tpp = TemporalPyramidMaxPool2d(levels=tpp_levels) if tpp_levels else None
self.spp = PyramidMaxPool2d(levels=spp_levels) if spp_levels else None
# Size after the temporal and spatial pooling layers
fc_input_dim = 512 * (sum(tpp_levels) + sum(4 ** (lv - 1) for lv in spp_levels))
self.fc = torch.nn.Sequential(
OrderedDict(
[
("fc6", torch.nn.Linear(fc_input_dim, 4096)),
("relu6", Identity() if unittest else torch.nn.ReLU(inplace=True)),
("drop6", torch.nn.Dropout(p=0 if unittest else 0.5)),
("fc7", torch.nn.Linear(4096, 4096)),
("relu7", Identity() if unittest else torch.nn.ReLU(inplace=True)),
("drop7", torch.nn.Dropout(p=0 if unittest else 0.5)),
("fc8", torch.nn.Linear(4096, phoc_size)),
]
)
)
self.reset_parameters()
def reset_parameters(self):
# Initialize parameters as Caffe does
for name, param in self.named_parameters():
if name[-5:] == ".bias":
# Initialize bias to 0
param.data.fill_(0)
else:
# compute fan in
fan_in = reduce(operator.mul, param.size()[1:])
param.data.normal_(mean=0, std=math.sqrt(2.0 / fan_in))
return self
def forward(self, x):
# type: (Union[torch.Tensor, PaddedTensor]) -> torch.Tensor
x, xs = (x.data, x.sizes) if isinstance(x, PaddedTensor) else (x, None)
x = self.conv(x)
if xs is not None:
xs = size_after_conv(xs)
x = PaddedTensor(x, xs)
if self.tpp and self.spp:
x = torch.cat((self.tpp(x), self.spp(x)), dim=1)
else:
x = self.tpp(x) if self.tpp else self.spp(x)
return self.fc(x)
def convert_old_parameters(params):
"""Convert parameters from the old model to the new one."""
# type: OrderedDict -> OrderedDict
new_params = []
for k, v in params.items():
if k.startswith("conv"):
new_params.append(("conv.{}".format(k), v))
elif k.startswith("fc"):
new_params.append(("fc.{}".format(k), v))
else:
new_params.append((k, v))
return OrderedDict(new_params)
| 41.575163
| 88
| 0.564377
| 813
| 6,361
| 4.265683
| 0.184502
| 0.07872
| 0.068627
| 0.095156
| 0.395329
| 0.395329
| 0.372837
| 0.309977
| 0.309977
| 0.284602
| 0
| 0.056284
| 0.304512
| 6,361
| 152
| 89
| 41.848684
| 0.727622
| 0.076246
| 0
| 0.079646
| 0
| 0
| 0.042735
| 0
| 0
| 0
| 0
| 0
| 0.00885
| 1
| 0.070796
| false
| 0
| 0.088496
| 0.00885
| 0.230089
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc8a2d2f86634a18b7bce0839a293dfedd5c4feb
| 1,541
|
py
|
Python
|
robocute/widget/__init__.py
|
kfields/robocute
|
f6f15ab74266053da5fe4ede3cc81310a62146e5
|
[
"MIT"
] | 1
|
2015-08-24T21:58:34.000Z
|
2015-08-24T21:58:34.000Z
|
robocute/widget/__init__.py
|
kfields/robocute
|
f6f15ab74266053da5fe4ede3cc81310a62146e5
|
[
"MIT"
] | null | null | null |
robocute/widget/__init__.py
|
kfields/robocute
|
f6f15ab74266053da5fe4ede3cc81310a62146e5
|
[
"MIT"
] | null | null | null |
import pyglet
from pyglet.gl import *
from robocute.node import *
from robocute.vu import *
from robocute.shape import Rect
class WidgetVu(Vu):
def __init__(self, node):
super().__init__(node)
#
self.content = Rect()
#
self.margin_top = 5
self.margin_bottom = 5
self.margin_left = 5
self.margin_right = 5
#
self.hspace = 5
self.vspace = 5
#
self.skin = None
def validate(self):
super().validate()
if not self.skin:
return
#else
self.skin.validate()
#
self.margin_left = self.skin.margin_left
self.margin_right = self.skin.margin_right
self.width = self.content.width + self.margin_left + self.margin_right
#
self.margin_top = self.skin.margin_top
self.margin_bottom = self.skin.margin_bottom
self.height = self.content.height + self.margin_bottom + self.margin_top
def draw(self, graphics):
super().draw(graphics)
if not self.skin:
return
#else
g = graphics.copy()
g.width = self.width
g.height = self.height
self.skin.draw(g)
class Widget(Node):
def __init__(self, items = None):
self.items = items
def add_item(self, item):
self.items.append(item)
def remove_item(self, item):
self.items.remove(item)
| 27.035088
| 81
| 0.541856
| 178
| 1,541
| 4.522472
| 0.241573
| 0.149068
| 0.069565
| 0.032298
| 0.181366
| 0.129193
| 0
| 0
| 0
| 0
| 0
| 0.006141
| 0.365996
| 1,541
| 57
| 82
| 27.035088
| 0.81781
| 0.010383
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.119048
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc8d3cb600d5b30b2c38ef88bbba0a6183336176
| 8,954
|
py
|
Python
|
sp3-diffusion/extract-convert.py
|
robfairh/npre555-cp03
|
2aea7ae2df4a720c5d09003f98192f8986a6d107
|
[
"BSD-3-Clause"
] | null | null | null |
sp3-diffusion/extract-convert.py
|
robfairh/npre555-cp03
|
2aea7ae2df4a720c5d09003f98192f8986a6d107
|
[
"BSD-3-Clause"
] | 2
|
2021-01-04T12:29:30.000Z
|
2021-02-01T11:13:45.000Z
|
sp3-diffusion/extract-convert.py
|
robfairh/npre555-cp03
|
2aea7ae2df4a720c5d09003f98192f8986a6d107
|
[
"BSD-3-Clause"
] | null | null | null |
# This script is based on moltres/python/extractSerpent2GCs.py
import os
import numpy as np
import argparse
import subprocess
import serpentTools as sT
def makePropertiesDir(
outdir,
filebase,
mapFile,
unimapFile,
serp1=False,
fromMain=False):
""" Takes in a mapping from branch names to material temperatures,
then makes a properties directory.
Serp1 means that the group transfer matrix is transposed."""
# the constants moltres looks for:
goodStuff = ['Tot', 'Sp0', 'Sp2', 'Fiss', 'Nsf', 'Kappa', 'Sp1', 'Sp3',
'Invv', 'Chit', 'Chip', 'Chid', 'BETA_EFF', 'lambda']
goodMap = dict([(thing, 'inf' + thing) for thing in goodStuff])
goodMap['BETA_EFF'] = 'betaEff'
goodMap['lambda'] = 'lambda'
# map material names to universe names from serpent
with open(unimapFile) as fh:
uniMap = []
for line in fh:
uniMap.append(tuple(line.split()))
# this now maps material names to serpent universes
uniMap = dict(uniMap)
# list of material names
inmats = list(uniMap.keys())
print("Making properties for materials:")
print(inmats)
coeList = dict([(mat, sT.read(mat + '.coe')) for mat in inmats])
# primary branch to temp mapping
branch2TempMapping = open(mapFile)
# Check if calculation uses 6 neutron precursor groups.
# This prevents writing of excess zeros. Check if any
# entries in the 7th and 8th group precursor positions
# are nonzero, if so, use 8 groups.
use8Groups = False
for line in branch2TempMapping:
item, temp = tuple(line.split())
for mat in inmats:
if mat in item:
currentMat = mat
break
strData = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].gc[goodMap['BETA_EFF']]
strData = strData[1:9]
if np.any(strData[-2:] != 0.0):
use8Groups = True
# Now loop through a second time
branch2TempMapping.close()
branch2TempMapping = open(mapFile)
for line in branch2TempMapping:
item, temp = tuple(line.split())
for mat in inmats:
if mat in item:
currentMat = mat
break
else:
print('Considered materials: {}'.format(inmats))
raise Exception(
'Couldnt find a material corresponding to branch {}'.format(
item))
try:
totxsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Tot']]
sp0xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp0']]
sp1xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp1']]
sp2xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp2']]
sp3xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp3']]
G = len(totxsdata)
remxs0g = totxsdata - sp0xsdata.reshape((G, G)).diagonal()
remxs1g = totxsdata - sp1xsdata.reshape((G, G)).diagonal()
remxs2g = totxsdata - sp2xsdata.reshape((G, G)).diagonal()
remxs3g = totxsdata - sp3xsdata.reshape((G, G)).diagonal()
with open(outdir + '/' + filebase + '_' + currentMat +
'_DIFFCOEFA.txt', 'a') as fh:
strData = 1./3./remxs1g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_DIFFCOEFB.txt', 'a') as fh:
strData = 9./35./remxs3g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_REMXSA.txt', 'a') as fh:
strData = remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_REMXSB.txt', 'a') as fh:
strData = remxs2g + 4./5*remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_COUPLEXSA.txt', 'a') as fh:
strData = 2*remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_COUPLEXSB.txt', 'a') as fh:
strData = 2./5*remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
for coefficient in ['Chit', 'Chip', 'Chid', 'Fiss', 'Nsf', 'Sp0',
'Kappa', 'Invv', 'BETA_EFF', 'lambda']:
with open(outdir + '/' + filebase + '_' + currentMat +
'_' + coefficient.upper() + '.txt', 'a') as fh:
if coefficient == 'lambda' or coefficient == 'BETA_EFF':
strData = coeList[currentMat].branches[
item].universes[
uniMap[currentMat], 0, 0, None].gc[
goodMap[coefficient]]
# some additional formatting is needed here
strData = strData[1:9]
# Cut off group 7 and 8 precursor params in 6
# group calcs
if not use8Groups:
strData = strData[0:6]
else:
strData = coeList[currentMat].branches[
item].universes[
uniMap[currentMat], 0, 0, None].infExp[
goodMap[coefficient]]
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
except KeyError:
raise Exception('Check your mapping and secondary branch files.')
if __name__ == '__main__':
# make it act like a nice little terminal program
parser = argparse.ArgumentParser(
description='Extracts Serpent 2 group constants, \
and puts them in a directory suitable for moltres.')
parser.add_argument('outDir', metavar='o', type=str, nargs=1,
help='name of directory to write properties to.')
parser.add_argument('fileBase', metavar='f', type=str,
nargs=1, help='File base name to give moltres')
parser.add_argument(
'mapFile',
metavar='b',
type=str,
nargs=1,
help='File that maps branches to temperatures')
parser.add_argument(
'universeMap',
metavar='u',
type=str,
nargs=1,
help='File that maps material names to serpent universe')
parser.add_argument(
'--serp1',
dest='serp1',
action='store_true',
help='use this flag for serpent 1 group transfer matrices')
parser.set_defaults(serp1=False)
args = parser.parse_args()
# these are unpacked, so it fails if they werent passed to the script
outdir = args.outDir[0]
fileBase = args.fileBase[0]
mapFile = args.mapFile[0]
unimapFile = args.universeMap[0]
makePropertiesDir(outdir, fileBase, mapFile, unimapFile, serp1=args.serp1,
fromMain=True)
print("Successfully made property files in directory {}.".format(outdir))
| 38.594828
| 78
| 0.52256
| 916
| 8,954
| 5.069869
| 0.272926
| 0.027132
| 0.042205
| 0.049957
| 0.447244
| 0.406546
| 0.372308
| 0.372308
| 0.359819
| 0.359819
| 0
| 0.016878
| 0.364753
| 8,954
| 231
| 79
| 38.761905
| 0.799578
| 0.094036
| 0
| 0.41954
| 0
| 0
| 0.096758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005747
| false
| 0
| 0.028736
| 0
| 0.034483
| 0.022989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc8e5699c6d924eefec86f5ef76ba0da8a0749bf
| 63,581
|
py
|
Python
|
ms1searchpy/main.py
|
markmipt/ms1searchpy
|
1fae3ba9ca25ac151b34110d333820f0a063ee11
|
[
"Apache-2.0"
] | 6
|
2020-01-28T12:29:02.000Z
|
2022-02-01T14:43:44.000Z
|
ms1searchpy/main.py
|
markmipt/ms1searchpy
|
1fae3ba9ca25ac151b34110d333820f0a063ee11
|
[
"Apache-2.0"
] | 3
|
2021-07-30T01:28:05.000Z
|
2021-11-25T09:14:31.000Z
|
ms1searchpy/main.py
|
markmipt/ms1searchpy
|
1fae3ba9ca25ac151b34110d333820f0a063ee11
|
[
"Apache-2.0"
] | 2
|
2020-07-23T10:01:10.000Z
|
2021-05-04T12:46:04.000Z
|
import os
from . import utils
import numpy as np
from scipy.stats import scoreatpercentile
from scipy.optimize import curve_fit
from scipy import exp
import operator
from copy import copy, deepcopy
from collections import defaultdict, Counter
import re
from pyteomics import parser, mass, fasta, auxiliary as aux, achrom
try:
from pyteomics import cmass
except ImportError:
cmass = mass
import subprocess
from sklearn import linear_model
import tempfile
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from multiprocessing import Queue, Process, cpu_count
from itertools import chain
try:
import seaborn
seaborn.set(rc={'axes.facecolor':'#ffffff'})
seaborn.set_style('whitegrid')
except:
pass
from .utils import calc_sf_all, recalc_spc
import lightgbm as lgb
import pandas as pd
from sklearn.model_selection import train_test_split
from scipy.stats import zscore, spearmanr
import pandas as pd
from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser
from pyteomics import electrochem
import numpy as np
import random
SEED = 42
from sklearn.model_selection import train_test_split
from os import path, mkdir
from collections import Counter, defaultdict
import warnings
import pylab as plt
warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\n'
import pandas as pd
from sklearn.model_selection import train_test_split, KFold
import os
from collections import Counter, defaultdict
from scipy.stats import scoreatpercentile
from sklearn.isotonic import IsotonicRegression
import warnings
import numpy as np
import matplotlib
import numpy
import pandas
import random
import sklearn
import matplotlib.pyplot as plt
from sklearn import (
feature_extraction, feature_selection, decomposition, linear_model,
model_selection, metrics, svm
)
import scipy
from scipy.stats import rankdata
from copy import deepcopy
import csv
from scipy.stats import rankdata
import lightgbm as lgb
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import chain
import time as timemodule
import ast
from sklearn import metrics
SEED = 50
def worker_RT(qin, qout, shift, step, RC=False, elude_path=False, ns=False, nr=False, win_sys=False):
pepdict = dict()
if elude_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
for seq, RT in zip(ns, nr):
outtrain.write(seq + '\t' + str(RT) + '\n')
outtrain.close()
outtest_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtest = open(outtest_name, 'w')
maxval = len(qin)
start = 0
while start + shift < maxval:
item = qin[start+shift]
outtest.write(item + '\n')
start += step
outtest.close()
subprocess.call([elude_path, '-t', outtrain_name, '-e', outtest_name, '-a', '-o', outres_name])
for x in open(outres_name).readlines()[3:]:
seq, RT = x.strip().split('\t')
pepdict[seq] = float(RT)
else:
maxval = len(qin)
start = 0
while start + shift < maxval:
item = qin[start+shift]
pepdict[item] = achrom.calculate_RT(item, RC)
start += step
if win_sys:
return pepdict
else:
qout.put(pepdict)
qout.put(None)
def final_iteration(resdict, mass_diff, rt_diff, pept_prot, protsN, base_out_name, prefix, isdecoy, isdecoy_key, escore, fdr, nproc, fname=False):
n = nproc
prots_spc_basic = dict()
p1 = set(resdict['seqs'])
pep_pid = defaultdict(set)
pid_pep = defaultdict(set)
banned_dict = dict()
for pep, pid in zip(resdict['seqs'], resdict['ids']):
pep_pid[pep].add(pid)
pid_pep[pid].add(pep)
if pep in banned_dict:
banned_dict[pep] += 1
else:
banned_dict[pep] = 1
if len(p1):
prots_spc_final = dict()
prots_spc_copy = False
prots_spc2 = False
unstable_prots = set()
p0 = False
names_arr = False
tmp_spc_new = False
decoy_set = False
while 1:
if not prots_spc2:
best_match_dict = dict()
n_map_dict = defaultdict(list)
for k, v in protsN.items():
n_map_dict[v].append(k)
decoy_set = set()
for k in protsN:
if isdecoy_key(k):
decoy_set.add(k)
decoy_set = list(decoy_set)
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc2 = dict(prots_spc2)
unstable_prots = set(prots_spc2.keys())
top100decoy_N = sum([val for key, val in protsN.items() if isdecoy_key(key)])
names_arr = np.array(list(prots_spc2.keys()))
n_arr = np.array([protsN[k] for k in names_arr])
tmp_spc_new = dict((k, len(v)) for k, v in prots_spc2.items())
top100decoy_score_tmp = [tmp_spc_new.get(dprot, 0) for dprot in decoy_set]
top100decoy_score_tmp_sum = float(sum(top100decoy_score_tmp))
tmp_spc = tmp_spc_new
prots_spc = tmp_spc_new
if not prots_spc_copy:
prots_spc_copy = deepcopy(prots_spc)
for idx, v in enumerate(decoy_set):
if v in unstable_prots:
top100decoy_score_tmp_sum -= top100decoy_score_tmp[idx]
top100decoy_score_tmp[idx] = prots_spc.get(v, 0)
top100decoy_score_tmp_sum += top100decoy_score_tmp[idx]
p = float(sum(top100decoy_score_tmp)) / top100decoy_N
p = top100decoy_score_tmp_sum / top100decoy_N
n_change = set(protsN[k] for k in unstable_prots)
for n_val in n_change:
for k in n_map_dict[n_val]:
v = prots_spc[k]
if n_val not in best_match_dict or v > prots_spc[best_match_dict[n_val]]:
best_match_dict[n_val] = k
n_arr_small = []
names_arr_small = []
v_arr_small = []
for k, v in best_match_dict.items():
n_arr_small.append(k)
names_arr_small.append(v)
v_arr_small.append(prots_spc[v])
prots_spc_basic = dict()
all_pvals = calc_sf_all(np.array(v_arr_small), n_arr_small, p)
for idx, k in enumerate(names_arr_small):
prots_spc_basic[k] = all_pvals[idx]
if not p0:
p0 = float(p)
prots_spc_tmp = dict()
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_tmp[k] = all_pvals[idx]
sortedlist_spc = sorted(prots_spc_tmp.items(), key=operator.itemgetter(1))[::-1]
with open(base_out_name + '_proteins_full_noexclusion.tsv', 'w') as output:
output.write('dbname\tscore\tmatched peptides\ttheoretical peptides\n')
for x in sortedlist_spc:
output.write('\t'.join((x[0], str(x[1]), str(prots_spc_copy[x[0]]), str(protsN[x[0]]))) + '\n')
best_prot = utils.keywithmaxval(prots_spc_basic)
best_score = prots_spc_basic[best_prot]
unstable_prots = set()
if best_prot not in prots_spc_final:
prots_spc_final[best_prot] = best_score
banned_pids = set()
for pep in prots_spc2[best_prot]:
for pid in pep_pid[pep]:
banned_pids.add(pid)
for pid in banned_pids:
for pep in pid_pep[pid]:
banned_dict[pep] -= 1
if banned_dict[pep] == 0:
for bprot in pept_prot[pep]:
tmp_spc_new[bprot] -= 1
unstable_prots.add(bprot)
else:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
prot_fdr = aux.fdr(prots_spc_final.items(), is_decoy=isdecoy)
if prot_fdr >= 12.5 * fdr:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
prots_spc_basic2 = copy(prots_spc_final)
prots_spc_final = dict()
prots_spc_final2 = dict()
if n == 0:
try:
n = cpu_count()
except NotImplementedError:
n = 1
if n == 1 or os.name == 'nt':
qin = []
qout = []
for mass_koef in range(10):
rtt_koef = mass_koef
qin.append((mass_koef, rtt_koef))
qout = worker(qin, qout, mass_diff, rt_diff, resdict, protsN, pept_prot, isdecoy_key, isdecoy, fdr, prots_spc_basic2, True)
for item, item2 in qout:
if item2:
prots_spc_copy = item2
for k in protsN:
if k not in prots_spc_final:
prots_spc_final[k] = [item.get(k, 0.0), ]
else:
prots_spc_final[k].append(item.get(k, 0.0))
else:
qin = Queue()
qout = Queue()
for mass_koef in range(10):
rtt_koef = mass_koef
qin.put((mass_koef, rtt_koef))
for _ in range(n):
qin.put(None)
procs = []
for proc_num in range(n):
p = Process(target=worker, args=(qin, qout, mass_diff, rt_diff, resdict, protsN, pept_prot, isdecoy_key, isdecoy, fdr, prots_spc_basic2))
p.start()
procs.append(p)
for _ in range(n):
for item, item2 in iter(qout.get, None):
if item2:
prots_spc_copy = item2
for k in protsN:
if k not in prots_spc_final:
prots_spc_final[k] = [item.get(k, 0.0), ]
else:
prots_spc_final[k].append(item.get(k, 0.0))
for p in procs:
p.join()
for k in prots_spc_final.keys():
prots_spc_final[k] = np.mean(prots_spc_final[k])
prots_spc = deepcopy(prots_spc_final)
sortedlist_spc = sorted(prots_spc.items(), key=operator.itemgetter(1))[::-1]
with open(base_out_name + '_proteins_full.tsv', 'w') as output:
output.write('dbname\tscore\tmatched peptides\ttheoretical peptides\n')
for x in sortedlist_spc:
output.write('\t'.join((x[0], str(x[1]), str(prots_spc_copy[x[0]]), str(protsN[x[0]]))) + '\n')
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=fdr, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1, full_output=True, correction=1)
if len(filtered_prots) < 1:
filtered_prots = aux.filter(prots_spc.items(), fdr=fdr, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1, full_output=True, correction=0)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('TOP 5 identified proteins:')
print('dbname\tscore\tnum matched peptides\tnum theoretical peptides')
for x in filtered_prots[:5]:
print('\t'.join((str(x[0]), str(x[1]), str(int(prots_spc_copy[x[0]])), str(protsN[x[0]]))))
print('results:%s;number of identified proteins = %d' % (base_out_name, identified_proteins, ))
# print('R=', r)
with open(base_out_name + '_proteins.tsv', 'w') as output:
output.write('dbname\tscore\tmatched peptides\ttheoretical peptides\n')
for x in filtered_prots:
output.write('\t'.join((x[0], str(x[1]), str(prots_spc_copy[x[0]]), str(protsN[x[0]]))) + '\n')
if fname:
fig = plt.figure(figsize=(16, 12))
DPI = fig.get_dpi()
fig.set_size_inches(2000.0/float(DPI), 2000.0/float(DPI))
df0 = pd.read_table(os.path.splitext(fname)[0].replace('.features', '') + '.features' + '.tsv')
# Features RT distribution
# TODO add matched features and matched to 1% FDR proteins features
ax = fig.add_subplot(3, 1, 1)
bns = np.arange(0, df0['rtApex'].max() + 1, 1)
ax.hist(df0['rtApex'], bins = bns)
ax.set_xlabel('RT, min', size=16)
ax.set_ylabel('# features', size=16)
# Features mass distribution
# TODO add matched features and matched to 1% FDR proteins features
ax = fig.add_subplot(3, 1, 2)
bns = np.arange(0, df0['massCalib'].max() + 6, 5)
ax.hist(df0['massCalib'], bins = bns)
ax.set_xlabel('neutral mass, Da', size=16)
ax.set_ylabel('# features', size=16)
# Features intensity distribution
# TODO add matched features and matched to 1% FDR proteins features
ax = fig.add_subplot(3, 1, 3)
bns = np.arange(np.log10(df0['intensityApex'].min()) - 0.5, np.log10(df0['intensityApex'].max()) + 0.5, 0.5)
ax.hist(np.log10(df0['intensityApex']), bins = bns)
ax.set_xlabel('log10(Intensity)', size=16)
ax.set_ylabel('# features', size=16)
plt.savefig(base_out_name + '.png')
def noisygaus(x, a, x0, sigma, b):
return a * exp(-(x - x0) ** 2 / (2 * sigma ** 2)) + b
def calibrate_mass(bwidth, mass_left, mass_right, true_md):
bbins = np.arange(-mass_left, mass_right, bwidth)
H1, b1 = np.histogram(true_md, bins=bbins)
b1 = b1 + bwidth
b1 = b1[:-1]
popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), 1, 1])
mass_shift, mass_sigma = popt[1], abs(popt[2])
return mass_shift, mass_sigma, pcov[0][0]
def calibrate_RT_gaus(bwidth, mass_left, mass_right, true_md):
bbins = np.arange(-mass_left, mass_right, bwidth)
H1, b1 = np.histogram(true_md, bins=bbins)
b1 = b1 + bwidth
b1 = b1[:-1]
popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), bwidth * 5, 1])
mass_shift, mass_sigma = popt[1], abs(popt[2])
return mass_shift, mass_sigma, pcov[0][0]
def process_file(args):
utils.seen_target.clear()
utils.seen_decoy.clear()
args = utils.prepare_decoy_db(args)
return process_peptides(args)
def peptide_processor(peptide, **kwargs):
seqm = peptide
results = []
m = cmass.fast_mass(seqm, aa_mass=kwargs['aa_mass']) + kwargs['aa_mass'].get('Nterm', 0) + kwargs['aa_mass'].get('Cterm', 0)
acc_l = kwargs['acc_l']
acc_r = kwargs['acc_r']
dm_l = acc_l * m / 1.0e6
if acc_r == acc_l:
dm_r = dm_l
else:
dm_r = acc_r * m / 1.0e6
start = nmasses.searchsorted(m - dm_l)
end = nmasses.searchsorted(m + dm_r)
for i in range(start, end):
peak_id = ids[i]
I = Is[i]
massdiff = (m - nmasses[i]) / m * 1e6
mods = 0
results.append((seqm, massdiff, mods, i))
return results
def prepare_peptide_processor(fname, args):
global nmasses
global rts
global charges
global ids
global Is
global Scans
global Isotopes
global mzraw
global avraw
global imraw
min_ch = args['cmin']
max_ch = args['cmax']
min_isotopes = args['i']
min_scans = args['sc']
print('Reading spectra ...')
df_features = utils.iterate_spectra(fname, min_ch, max_ch, min_isotopes, min_scans)
# Sort by neutral mass
df_features = df_features.sort_values(by='massCalib')
nmasses = df_features['massCalib'].values
rts = df_features['rtApex'].values
charges = df_features['charge'].values
ids = df_features['id'].values
Is = df_features['intensityApex'].values
Scans = df_features['nScans'].values
Isotopes = df_features['nIsotopes'].values
mzraw = df_features['mz'].values
avraw = np.zeros(len(df_features))
if len(set(df_features['FAIMS'])) > 1:
imraw = df_features['FAIMS'].values
else:
imraw = df_features['ion_mobility'].values
print('Number of peptide isotopic clusters: %d' % (len(nmasses), ))
fmods = args['fmods']
aa_mass = mass.std_aa_mass
if fmods:
for mod in fmods.split(','):
m, aa = mod.split('@')
if aa == '[':
aa_mass['Nterm'] = float(m)
elif aa == ']':
aa_mass['Cterm'] = float(m)
else:
aa_mass[aa] += float(m)
acc_l = args['ptol']
acc_r = args['ptol']
return {'aa_mass': aa_mass, 'acc_l': acc_l, 'acc_r': acc_r, 'args': args}, df_features
def peptide_processor_iter_isoforms(peptide, **kwargs):
out = []
out.append(peptide_processor(peptide, **kwargs))
return out
def get_results(ms1results):
resdict = dict()
labels = [
'seqs',
'md',
'mods',
'iorig',
# 'rt',
# 'ids',
# 'Is',
# 'Scans',
# 'Isotopes',
# 'mzraw',
# 'av',
# 'ch',
# 'im',
]
for label, val in zip(labels, zip(*ms1results)):
resdict[label] = np.array(val)
return resdict
def filter_results(resultdict, idx):
tmp = dict()
for label in resultdict:
tmp[label] = resultdict[label][idx]
return tmp
def process_peptides(args):
fname = args['file']
fdr = args['fdr'] / 100
min_isotopes_calibration = args['ci']
try:
outpath = args['outpath']
except:
outpath = False
if outpath:
base_out_name = os.path.splitext(os.path.join(outpath, os.path.basename(fname)))[0]
else:
base_out_name = os.path.splitext(fname)[0]
out_log = open(base_out_name + '_log.txt', 'w')
out_log.close()
out_log = open(base_out_name + '_log.txt', 'w')
elude_path = args['elude']
elude_path = elude_path.strip()
deeplc_path = args['deeplc']
deeplc_path = deeplc_path.strip()
calib_path = args['pl']
calib_path = calib_path.strip()
if calib_path and args['ts']:
args['ts'] = 0
print('Two-stage RT prediction does not work with list of MS/MS identified peptides...')
args['enzyme'] = utils.get_enzyme(args['e'])
ms1results = []
peps = utils.peptide_gen(args)
kwargs, df_features = prepare_peptide_processor(fname, args)
func = peptide_processor_iter_isoforms
print('Running the search ...')
for y in utils.multimap(1, func, peps, **kwargs):
for result in y:
if len(result):
ms1results.extend(result)
prefix = args['prefix']
protsN, pept_prot = utils.get_prot_pept_map(args)
resdict = get_results(ms1results)
del ms1results
resdict['mc'] = np.array([parser.num_sites(z, args['enzyme']) for z in resdict['seqs']])
isdecoy = lambda x: x[0].startswith(prefix)
isdecoy_key = lambda x: x.startswith(prefix)
escore = lambda x: -x[1]
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= 1
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
p1 = set(resdict2['seqs'])
if len(p1):
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=0.05, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1,
full_output=True)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('results for default search: number of identified proteins = %d' % (identified_proteins, ))
print('Running mass recalibration...')
e_ind = resdict['mc'] == 0
resdict2 = filter_results(resdict, e_ind)
true_md = []
true_isotopes = []
true_seqs = []
true_prots = set(x[0] for x in filtered_prots)
for pep, proteins in pept_prot.items():
if any(protein in true_prots for protein in proteins):
true_seqs.append(pep)
e_ind = np.in1d(resdict2['seqs'], true_seqs)
true_seqs = resdict2['seqs'][e_ind]
true_md.extend(resdict2['md'][e_ind])
true_md = np.array(true_md)
# true_isotopes.extend(resdict2['Isotopes'][e_ind])
true_isotopes.extend(np.array([Isotopes[iorig] for iorig in resdict2['iorig']])[e_ind])
true_isotopes = np.array(true_isotopes)
true_intensities = np.array([Is[iorig] for iorig in resdict2['iorig']])[e_ind]
# true_intensities = np.array(resdict2['Is'][e_ind])
# true_rt = np.array(resdict2['rt'][e_ind])
# true_mz = np.array(resdict2['mzraw'][e_ind])
true_rt = np.array([rts[iorig] for iorig in resdict2['iorig']])[e_ind]
true_mz = np.array([mzraw[iorig] for iorig in resdict2['iorig']])[e_ind]
df1 = pd.DataFrame()
df1['mass diff'] = true_md
df1['mz'] = true_mz
df1['RT'] = true_rt
df1['Intensity'] = true_intensities
df1['seqs'] = true_seqs
df1['orig_md'] = true_md
mass_left = args['ptol']
mass_right = args['ptol']
try:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.001, mass_left, mass_right, true_md)
except:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.01, mass_left, mass_right, true_md)
print('Calibrated mass shift: ', mass_shift)
print('Calibrated mass sigma in ppm: ', mass_sigma)
out_log.write('Calibrated mass shift: %s\n' % (mass_shift, ))
out_log.write('Calibrated mass sigma in ppm: %s\n' % (mass_sigma, ))
e_all = abs(resdict['md'] - mass_shift) / (mass_sigma)
r = 3.0
e_ind = e_all <= r
resdict = filter_results(resdict, e_ind)
zs_all = e_all[e_ind] ** 2
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
p1 = set(resdict2['seqs'])
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=0.05, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1,
full_output=True)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('results for default search after mass calibration: number of identified proteins = %d' % (identified_proteins, ))
print('Running RT prediction...')
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= 1
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
true_seqs = []
true_rt = []
true_isotopes = []
true_prots = set(x[0] for x in filtered_prots)#[:5])
for pep, proteins in pept_prot.items():
if any(protein in true_prots for protein in proteins):
true_seqs.append(pep)
e_ind = np.in1d(resdict2['seqs'], true_seqs)
true_seqs = resdict2['seqs'][e_ind]
true_rt.extend(np.array([rts[iorig] for iorig in resdict2['iorig']])[e_ind])
# true_rt.extend(resdict2['rt'][e_ind])
true_rt = np.array(true_rt)
true_isotopes.extend(np.array([Isotopes[iorig] for iorig in resdict2['iorig']])[e_ind])
# true_isotopes.extend(resdict2['Isotopes'][e_ind])
true_isotopes = np.array(true_isotopes)
e_all = abs(resdict2['md'][e_ind] - mass_shift) / (mass_sigma)
zs_all_tmp = e_all ** 2
e_ind = true_isotopes >= min_isotopes_calibration
true_seqs = true_seqs[e_ind]
true_rt = true_rt[e_ind]
true_isotopes = true_isotopes[e_ind]
zs_all_tmp = zs_all_tmp[e_ind]
e_ind = np.argsort(zs_all_tmp)
true_seqs = true_seqs[e_ind]
true_rt = true_rt[e_ind]
true_isotopes = true_isotopes[e_ind]
true_seqs = true_seqs[:2500]
true_rt = true_rt[:2500]
true_isotopes = true_isotopes[:2500]
best_seq = defaultdict(list)
newseqs = []
newRTs = []
for seq, RT in zip(true_seqs, true_rt):
best_seq[seq].append(RT)
for k, v in best_seq.items():
newseqs.append(k)
newRTs.append(np.median(v))
true_seqs = np.array(newseqs)
true_rt = np.array(newRTs)
if calib_path:
df1 = pd.read_csv(calib_path, sep='\t')
true_seqs2 = df1['peptide'].values
true_rt2 = df1['RT exp'].values
else:
true_seqs2 = true_seqs
true_rt2 = true_rt
if args['ts'] != 2 and deeplc_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outcalib_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outcalib = open(outcalib_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
ns = true_seqs
nr = true_rt
print('Peptides used for RT prediction: %d' % (len(ns), ))
ns2 = true_seqs2
nr2 = true_rt2
outtrain.write('seq,modifications,tr\n')
for seq, RT in zip(ns2, nr2):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtrain.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outtrain.close()
outcalib.write('seq,modifications,tr\n')
for seq, RT in zip(ns, nr):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outcalib.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outcalib.close()
subprocess.call([deeplc_path, '--file_pred', outcalib_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[1:]:
_, seq, _, RTexp, RT = x.strip().split(',')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
RT_pred = np.array([pepdict[s] for s in train_seq])
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
try:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 100
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
except:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
if args['ts'] != 2 and elude_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outcalib_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outcalib = open(outcalib_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
ns = true_seqs
nr = true_rt
print('Peptides used for RT prediction: %d' % (len(ns), ))
ns2 = true_seqs2
nr2 = true_rt2
for seq, RT in zip(ns, nr):
outtrain.write(seq + '\t' + str(RT) + '\n')
outtrain.close()
for seq, RT in zip(ns, nr):
outcalib.write(seq + '\t' + str(RT) + '\n')
outcalib.close()
subprocess.call([elude_path, '-t', outtrain_name, '-e', outcalib_name, '-a', '-g', '-o', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[3:]:
seq, RT, RTexp = x.strip().split('\t')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
ns = true_seqs
nr = true_rt
ns2 = true_seqs2
nr2 = true_rt2
RC = achrom.get_RCs_vary_lcp(ns2, nr2)
RT_pred = np.array([achrom.calculate_RT(s, RC) for s in ns])
train_RT = nr
aa, bb, RR, ss = aux.linear_regression(RT_pred, nr)
rt_diff_tmp = RT_pred - nr
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
print(aa, bb, RR, ss)
best_sigma = XRT_sigma
RT_sigma = XRT_sigma
else:
print('No matches found')
if args['ts']:
print('Running second stage RT prediction...')
ns = np.array(ns)
nr = np.array(nr)
idx = np.abs((rt_diff_tmp) - XRT_shift) <= 3 * XRT_sigma
ns = ns[idx]
nr = nr[idx]
if deeplc_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
print('Peptides used for RT prediction: %d' % (len(ns), ))
ll = len(ns)
ns = ns[:ll]
nr = nr[:ll]
outtrain.write('seq,modifications,tr\n')
for seq, RT in zip(ns, nr):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtrain.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outtrain.close()
subprocess.call([deeplc_path, '--file_pred', outtrain_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[1:]:
_, seq, _, RTexp, RT = x.strip().split(',')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
RT_pred = np.array([pepdict[s] for s in train_seq])
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
try:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 100
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
except:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
if elude_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
print(len(ns))
ll = len(ns)
ns = ns[:ll]
nr = nr[:ll]
for seq, RT in zip(ns, nr):
outtrain.write(seq + '\t' + str(RT) + '\n')
outtrain.close()
subprocess.call([elude_path, '-t', outtrain_name, '-e', outtrain_name, '-a', '-g', '-o', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[3:]:
seq, RT, RTexp = x.strip().split('\t')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
RT_pred = np.array([pepdict[s] for s in train_seq])
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
RC = achrom.get_RCs_vary_lcp(ns, nr)
RT_pred = np.array([achrom.calculate_RT(s, RC) for s in ns])
aa, bb, RR, ss = aux.linear_regression(RT_pred, nr)
rt_diff_tmp = RT_pred - nr
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
print(aa, bb, RR, ss)
best_sigma = XRT_sigma
RT_sigma = XRT_sigma
out_log.write('Calibrated RT shift: %s\n' % (XRT_shift, ))
out_log.write('Calibrated RT sigma: %s\n' % (XRT_sigma, ))
out_log.close()
p1 = set(resdict['seqs'])
n = args['nproc']
if deeplc_path:
pepdict = dict()
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain.write('seq,modifications,tr\n')
for seq, RT in zip(ns, nr):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtrain.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outtrain.close()
outtest_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtest = open(outtest_name, 'w')
outtest.write('seq,modifications\n')
for seq in p1:
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtest.write(seq + ',' + str(mods_tmp) + '\n')
outtest.close()
if args['deeplc_library']:
print('Using deeplc library...')
subprocess.call([deeplc_path, '--file_pred', outtest_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name, '--use_library', args['deeplc_library'], '--write_library'])
else:
subprocess.call([deeplc_path, '--file_pred', outtest_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name])
for x in open(outres_name).readlines()[1:]:
_, seq, _, RT = x.strip().split(',')
pepdict[seq] = float(RT)
else:
if n == 1 or os.name == 'nt':
qin = list(p1)
qout = []
if elude_path:
pepdict = worker_RT(qin, qout, 0, 1, False, elude_path, ns, nr, True)
else:
pepdict = worker_RT(qin, qout, 0, 1, RC, False, False, False, True)
else:
qin = list(p1)
qout = Queue()
procs = []
for i in range(n):
if elude_path:
p = Process(target=worker_RT, args=(qin, qout, i, n, False, elude_path, ns, nr))
else:
p = Process(target=worker_RT, args=(qin, qout, i, n, RC, False, False, False))
p.start()
procs.append(p)
pepdict = dict()
for _ in range(n):
for item in iter(qout.get, None):
for k, v in item.items():
pepdict[k] = v
for p in procs:
p.join()
rt_pred = np.array([pepdict[s] for s in resdict['seqs']])
rt_diff = np.array([rts[iorig] for iorig in resdict['iorig']]) - rt_pred
# rt_diff = resdict['rt'] - rt_pred
e_all = (rt_diff) ** 2 / (RT_sigma ** 2)
r = 9.0
e_ind = e_all <= r
resdict = filter_results(resdict, e_ind)
rt_diff = rt_diff[e_ind]
rt_pred = rt_pred[e_ind]
with open(base_out_name + '_protsN.tsv', 'w') as output:
output.write('dbname\ttheor peptides\n')
for k, v in protsN.items():
output.write('\t'.join((k, str(v))) + '\n')
with open(base_out_name + '_PFMs.tsv', 'w') as output:
output.write('sequence\tmass diff\tRT diff\tpeak_id\tIntensity\tnScans\tnIsotopes\tproteins\tm/z\tRT\taveragineCorr\tcharge\tion_mobility\n')
# for seq, md, rtd, peak_id, I, nScans, nIsotopes, mzr, rtr, av, ch, im in zip(resdict['seqs'], resdict['md'], rt_diff, resdict['ids'], resdict['Is'], resdict['Scans'], resdict['Isotopes'], resdict['mzraw'], resdict['rt'], resdict['av'], resdict['ch'], resdict['im']):
for seq, md, rtd, iorig in zip(resdict['seqs'], resdict['md'], rt_diff, resdict['iorig']):
peak_id = ids[iorig]
I = Is[iorig]
nScans = Scans[iorig]
nIsotopes = Isotopes[iorig]
mzr = mzraw[iorig]
rtr = rts[iorig]
av = avraw[iorig]
ch = charges[iorig]
im = imraw[iorig]
output.write('\t'.join((seq, str(md), str(rtd), str(peak_id), str(I), str(nScans), str(nIsotopes), ';'.join(pept_prot[seq]), str(mzr), str(rtr), str(av), str(ch), str(im))) + '\n')
e_ind = resdict['mc'] == 0
resdict = filter_results(resdict, e_ind)
rt_diff = rt_diff[e_ind]
rt_pred = rt_pred[e_ind]
mass_diff = (resdict['md'] - mass_shift) / (mass_sigma)
rt_diff = (np.array([rts[iorig] for iorig in resdict['iorig']]) - rt_pred) / RT_sigma
# rt_diff = (resdict['rt'] - rt_pred) / RT_sigma
prefix = 'DECOY_'
isdecoy = lambda x: x[0].startswith(prefix)
isdecoy_key = lambda x: x.startswith(prefix)
escore = lambda x: -x[1]
SEED = 42
# Hyperparameter grid
param_grid = {
'boosting_type': ['gbdt', ],
'num_leaves': list(range(10, 1000)),
'learning_rate': list(np.logspace(np.log10(0.001), np.log10(0.05), base = 10, num = 1000)),
'metric': ['rmse', ],
'verbose': [-1, ],
'num_threads': [args['nproc'], ],
}
def get_X_array(df, feature_columns):
return df.loc[:, feature_columns].values
def get_Y_array_pfms(df):
return df.loc[:, 'decoy'].values
def get_features_pfms(dataframe):
feature_columns = dataframe.columns
columns_to_remove = []
banned_features = {
'iorig',
'ids',
'seqs',
'decoy',
'preds',
'av',
'Scans',
'proteins',
'peptide',
'md',
}
for feature in feature_columns:
if feature in banned_features:
columns_to_remove.append(feature)
feature_columns = feature_columns.drop(columns_to_remove)
return feature_columns
def objective_pfms(df, hyperparameters, iteration, threshold=0):
"""Objective function for grid and random search. Returns
the cross validation score from a set of hyperparameters."""
all_res = []
groups = df['peptide']
ix = df.index.values
unique = np.unique(groups)
np.random.RandomState(SEED).shuffle(unique)
result = []
for split in np.array_split(unique, 3):
mask = groups.isin(split)
train, test = ix[~mask], ix[mask]
train_df = df.iloc[train]
test_df = df.iloc[test]
feature_columns = get_features_pfms(df)
model = get_cat_model_final_pfms(train_df, hyperparameters, feature_columns)
df.loc[mask, 'preds'] = model.predict(get_X_array(df.loc[mask, :], feature_columns))
train_df = df.iloc[train]
test_df = df.iloc[test]
fpr, tpr, thresholds = metrics.roc_curve(get_Y_array_pfms(test_df), test_df['preds'])
shr_v = metrics.auc(fpr, tpr)
# shr_v = len(aux.filter(test_df, fdr=0.25, key='preds', is_decoy='decoy'))
all_res.append(shr_v)
# print(shr_v)
if shr_v < threshold:
all_res = [0, ]
break
shr_v = np.mean(all_res)
# print(shr_v)
# print('\n')
return [shr_v, hyperparameters, iteration, all_res]
def random_search_pfms(df, param_grid, out_file, max_evals):
"""Random search for hyperparameter optimization.
Writes result of search to csv file every search iteration."""
threshold = 0
# Dataframe for results
results = pd.DataFrame(columns = ['sharpe', 'params', 'iteration', 'all_res'],
index = list(range(max_evals)))
for i in range(max_evals):
print('%d/%d' % (i+1, max_evals))
# Choose random hyperparameters
random_params = {k: random.sample(v, 1)[0] for k, v in param_grid.items()}
# Evaluate randomly selected hyperparameters
eval_results = objective_pfms(df, random_params, i, threshold)
results.loc[i, :] = eval_results
threshold = max(threshold, np.mean(eval_results[3]) - 3 * np.std(eval_results[3]))
# open connection (append option) and write results
of_connection = open(out_file, 'a')
writer = csv.writer(of_connection)
writer.writerow(eval_results)
# make sure to close connection
of_connection.close()
# Sort with best score on top
results.sort_values('sharpe', ascending = False, inplace = True)
results.reset_index(inplace = True)
return results
def get_cat_model_pfms(df, hyperparameters, feature_columns, train, test):
feature_columns = list(feature_columns)
dtrain = lgb.Dataset(get_X_array(train, feature_columns), get_Y_array_pfms(train), feature_name=feature_columns, free_raw_data=False)
dvalid = lgb.Dataset(get_X_array(test, feature_columns), get_Y_array_pfms(test), feature_name=feature_columns, free_raw_data=False)
np.random.seed(SEED)
evals_result = {}
model = lgb.train(hyperparameters, dtrain, num_boost_round=5000, valid_sets=(dvalid,), valid_names=('valid',), verbose_eval=False,
early_stopping_rounds=20, evals_result=evals_result)
return model
def get_cat_model_final_pfms(df, hyperparameters, feature_columns):
feature_columns = list(feature_columns)
train = df
dtrain = lgb.Dataset(get_X_array(train, feature_columns), get_Y_array_pfms(train), feature_name=feature_columns, free_raw_data=False)
np.random.seed(SEED)
model = lgb.train(hyperparameters, dtrain, num_boost_round=100)
return model
df1 = pd.DataFrame()
for k in resdict.keys():
df1[k] = resdict[k]
df1['ids'] = df1['iorig'].apply(lambda x: ids[x])
df1['Is'] = df1['iorig'].apply(lambda x: Is[x])
df1['Scans'] = df1['iorig'].apply(lambda x: Scans[x])
df1['Isotopes'] = df1['iorig'].apply(lambda x: Isotopes[x])
df1['mzraw'] = df1['iorig'].apply(lambda x: mzraw[x])
df1['rt'] = df1['iorig'].apply(lambda x: rts[x])
df1['av'] = df1['iorig'].apply(lambda x: avraw[x])
df1['ch'] = df1['iorig'].apply(lambda x: charges[x])
df1['im'] = df1['iorig'].apply(lambda x: imraw[x])
df1['mass_diff'] = mass_diff
df1['rt_diff'] = rt_diff
df1['decoy'] = df1['seqs'].apply(lambda x: all(z.startswith(prefix) for z in pept_prot[x]))
df1['peptide'] = df1['seqs']
mass_dict = {}
pI_dict = {}
charge_dict = {}
for pep in set(df1['peptide']):
try:
mass_dict[pep] = mass.fast_mass2(pep)
pI_dict[pep] = electrochem.pI(pep)
charge_dict[pep] = electrochem.charge(pep, pH=7.0)
except:
mass_dict[pep] = 0
pI_dict[pep] = 0
charge_dict[pep] = 0
df1['plen'] = df1['peptide'].apply(lambda z: len(z))
df1['mass'] = df1['peptide'].apply(lambda x: mass_dict[x])
df1['pI'] = df1['peptide'].apply(lambda x: pI_dict[x])
df1['charge_theor'] = df1['peptide'].apply(lambda x: charge_dict[x])
df1['rt_diff_abs'] = df1['rt_diff'].abs()
df1['rt_diff_abs_pdiff'] = df1['rt_diff_abs'] - df1.groupby('ids')['rt_diff_abs'].transform('median')
df1['rt_diff_abs_pnorm'] = df1['rt_diff_abs'] / (df1.groupby('ids')['rt_diff_abs'].transform('sum') + 1e-2)
df1['id_count'] = df1.groupby('ids')['mass_diff'].transform('count')
df1['seq_count'] = df1.groupby('peptide')['mass_diff'].transform('count')
df1t5 = df1.sort_values(by='Is', ascending=False).copy()
df1t5 = df1t5.drop_duplicates(subset='peptide', keep='first')
if args['ml']:
print('Start Machine Learning on PFMs...')
print('Features used for MachineLearning: ', get_features_pfms(df1))
MAX_EVALS = 25
out_file = 'test_randomCV_PFMs_2.tsv'
of_connection = open(out_file, 'w')
writer = csv.writer(of_connection)
# Write column names
headers = ['auc', 'params', 'iteration', 'all_res']
writer.writerow(headers)
of_connection.close()
random_results = random_search_pfms(df1, param_grid, out_file, MAX_EVALS)
random_results = pd.read_csv(out_file)
random_results = random_results[random_results['auc'] != 'auc']
random_results['params'] = random_results['params'].apply(lambda x: ast.literal_eval(x))
convert_dict = {'auc': float,
}
random_results = random_results.astype(convert_dict)
bestparams = random_results.sort_values(by='auc',ascending=False)['params'].values[0]
bestparams['num_threads'] = args['nproc']
print(random_results.sort_values(by='auc',ascending=False)['auc'].values[0])
groups = df1['peptide']
ix = df1.index.values
unique = np.unique(groups)
np.random.RandomState(SEED).shuffle(unique)
result = []
for split in np.array_split(unique, 3):
mask = groups.isin(split)
train, test = ix[~mask], ix[mask]
train_df = df1.iloc[train]
test_df = df1.iloc[test]
feature_columns = list(get_features_pfms(train_df))
model = get_cat_model_final_pfms(train_df, bestparams, feature_columns)
df1.loc[test, 'preds'] = model.predict(get_X_array(test_df, feature_columns))
else:
df1['preds'] = np.power(df1['mass_diff'], 2) + np.power(df1['rt_diff'], 2)
df1['qpreds'] = pd.qcut(df1['preds'], 10, labels=range(10))
df1['proteins'] = df1['seqs'].apply(lambda x: ';'.join(pept_prot[x]))
df1.to_csv(base_out_name + '_PFMs_ML.tsv', sep='\t', index=False)
resdict['qpreds'] = df1['qpreds'].values
resdict['ids'] = df1['ids'].values
mass_diff = resdict['qpreds']
rt_diff = resdict['qpreds']
p1 = set(resdict['seqs'])
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
final_iteration(resdict, mass_diff, rt_diff, pept_prot, protsN, base_out_name, prefix, isdecoy, isdecoy_key, escore, fdr, args['nproc'], fname)
def worker(qin, qout, mass_diff, rt_diff, resdict, protsN, pept_prot, isdecoy_key, isdecoy, fdr, prots_spc_basic2, win_sys=False):
for item in (iter(qin.get, None) if not win_sys else qin):
mass_koef, rtt_koef = item
e_ind = mass_diff <= mass_koef
resdict2 = filter_results(resdict, e_ind)
features_dict = dict()
for pep in set(resdict2['seqs']):
for bprot in pept_prot[pep]:
prot_score = prots_spc_basic2[bprot]
if prot_score > features_dict.get(pep, [-1, ])[-1]:
features_dict[pep] = (bprot, prot_score)
prots_spc_basic = dict()
p1 = set(resdict2['seqs'])
pep_pid = defaultdict(set)
pid_pep = defaultdict(set)
banned_dict = dict()
for pep, pid in zip(resdict2['seqs'], resdict2['ids']):
# for pep, pid in zip(resdict2['seqs'], [ids[iorig] for iorig in resdict2['iorig']]):
pep_pid[pep].add(pid)
pid_pep[pid].add(pep)
if pep in banned_dict:
banned_dict[pep] += 1
else:
banned_dict[pep] = 1
if len(p1):
prots_spc_final = dict()
prots_spc_copy = False
prots_spc2 = False
unstable_prots = set()
p0 = False
names_arr = False
tmp_spc_new = False
decoy_set = False
while 1:
if not prots_spc2:
best_match_dict = dict()
n_map_dict = defaultdict(list)
for k, v in protsN.items():
n_map_dict[v].append(k)
decoy_set = set()
for k in protsN:
if isdecoy_key(k):
decoy_set.add(k)
decoy_set = list(decoy_set)
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
if protein == features_dict[pep][0]:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc2 = dict(prots_spc2)
unstable_prots = set(prots_spc2.keys())
top100decoy_N = sum([val for key, val in protsN.items() if isdecoy_key(key)])
names_arr = np.array(list(prots_spc2.keys()))
n_arr = np.array([protsN[k] for k in names_arr])
tmp_spc_new = dict((k, len(v)) for k, v in prots_spc2.items())
top100decoy_score_tmp = [tmp_spc_new.get(dprot, 0) for dprot in decoy_set]
top100decoy_score_tmp_sum = float(sum(top100decoy_score_tmp))
tmp_spc = tmp_spc_new
prots_spc = tmp_spc_new
if not prots_spc_copy:
prots_spc_copy = deepcopy(prots_spc)
for idx, v in enumerate(decoy_set):
if v in unstable_prots:
top100decoy_score_tmp_sum -= top100decoy_score_tmp[idx]
top100decoy_score_tmp[idx] = prots_spc.get(v, 0)
top100decoy_score_tmp_sum += top100decoy_score_tmp[idx]
p = float(sum(top100decoy_score_tmp)) / top100decoy_N
p = top100decoy_score_tmp_sum / top100decoy_N
if not p0:
p0 = float(p)
n_change = set(protsN[k] for k in unstable_prots)
for n_val in n_change:
for k in n_map_dict[n_val]:
v = prots_spc[k]
if n_val not in best_match_dict or v > prots_spc[best_match_dict[n_val]]:
best_match_dict[n_val] = k
n_arr_small = []
names_arr_small = []
v_arr_small = []
for k, v in best_match_dict.items():
n_arr_small.append(k)
names_arr_small.append(v)
v_arr_small.append(prots_spc[v])
prots_spc_basic = dict()
all_pvals = calc_sf_all(np.array(v_arr_small), n_arr_small, p)
for idx, k in enumerate(names_arr_small):
prots_spc_basic[k] = all_pvals[idx]
best_prot = utils.keywithmaxval(prots_spc_basic)
best_score = prots_spc_basic[best_prot]
unstable_prots = set()
if best_prot not in prots_spc_final:
prots_spc_final[best_prot] = best_score
banned_pids = set()
for pep in prots_spc2[best_prot]:
for pid in pep_pid[pep]:
banned_pids.add(pid)
for pid in banned_pids:
for pep in pid_pep[pid]:
banned_dict[pep] -= 1
if banned_dict[pep] == 0:
best_prot_val = features_dict[pep][0]
for bprot in pept_prot[pep]:
if bprot == best_prot_val:
tmp_spc_new[bprot] -= 1
unstable_prots.add(bprot)
else:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
try:
prot_fdr = aux.fdr(prots_spc_final.items(), is_decoy=isdecoy)
except ZeroDivisionError:
prot_fdr = 100.0
if prot_fdr >= 12.5 * fdr:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
if mass_koef == 9:
item2 = prots_spc_copy
else:
item2 = False
if not win_sys:
qout.put((prots_spc_final, item2))
else:
qout.append((prots_spc_final, item2))
if not win_sys:
qout.put(None)
else:
return qout
| 37.466706
| 276
| 0.564225
| 8,352
| 63,581
| 4.07136
| 0.073994
| 0.030349
| 0.014557
| 0.004117
| 0.68142
| 0.633425
| 0.601517
| 0.593195
| 0.578138
| 0.553111
| 0
| 0.018811
| 0.316069
| 63,581
| 1,696
| 277
| 37.488797
| 0.76316
| 0.029112
| 0
| 0.614855
| 0
| 0.000766
| 0.05699
| 0.006033
| 0
| 0
| 0
| 0.00059
| 0
| 1
| 0.015314
| false
| 0.000766
| 0.04977
| 0.002297
| 0.078867
| 0.032159
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc930b4f7f09f8fa1727fc2d776abafb2a109c6e
| 5,991
|
py
|
Python
|
tests/test_contrib/test_prepredict.py
|
uricod/yellowbrick
|
6fb2e9b7e5b2998c6faa4dcca81a4b0f91bf29b4
|
[
"Apache-2.0"
] | 1
|
2017-03-03T03:26:54.000Z
|
2017-03-03T03:26:54.000Z
|
tests/test_contrib/test_prepredict.py
|
uricod/yellowbrick
|
6fb2e9b7e5b2998c6faa4dcca81a4b0f91bf29b4
|
[
"Apache-2.0"
] | 1
|
2021-11-10T18:06:19.000Z
|
2021-11-10T18:06:19.000Z
|
tests/test_contrib/test_prepredict.py
|
uricod/yellowbrick
|
6fb2e9b7e5b2998c6faa4dcca81a4b0f91bf29b4
|
[
"Apache-2.0"
] | null | null | null |
# tests.test_contrib.test_prepredict
# Test the prepredict estimator.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Mon Jul 12 07:07:33 2021 -0400
#
# ID: test_prepredict.py [] benjamin@bengfort.com $
"""
Test the prepredict estimator.
"""
##########################################################################
## Imports
##########################################################################
import pytest
from io import BytesIO
from tests.fixtures import Dataset, Split
from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase
from sklearn.naive_bayes import GaussianNB
from sklearn.cluster import MiniBatchKMeans
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split as tts
from sklearn.datasets import make_classification, make_regression, make_blobs
from yellowbrick.contrib.prepredict import *
from yellowbrick.regressor import PredictionError
from yellowbrick.classifier import ClassificationReport
import numpy as np
# Set random state
np.random.seed()
##########################################################################
## Fixtures
##########################################################################
@pytest.fixture(scope="class")
def multiclass(request):
"""
Creates a random multiclass classification dataset fixture
"""
X, y = make_classification(
n_samples=500,
n_features=20,
n_informative=8,
n_redundant=2,
n_classes=6,
n_clusters_per_class=3,
random_state=87,
)
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=93)
dataset = Dataset(Split(X_train, X_test), Split(y_train, y_test))
request.cls.multiclass = dataset
@pytest.fixture(scope="class")
def continuous(request):
"""
Creates a random continuous regression dataset fixture
"""
X, y = make_regression(
n_samples=500,
n_features=22,
n_informative=8,
random_state=42,
noise=0.2,
bias=0.2,
)
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=11)
# Set a class attribute for regression
request.cls.continuous = Dataset(Split(X_train, X_test), Split(y_train, y_test))
@pytest.fixture(scope="class")
def blobs(request):
"""
Create a random blobs clustering dataset fixture
"""
X, y = make_blobs(
n_samples=1000, n_features=12, centers=6, shuffle=True, random_state=42
)
# Set a class attribute for blobs
request.cls.blobs = Dataset(X, y)
##########################################################################
## Tests
##########################################################################
@pytest.mark.usefixtures("multiclass")
@pytest.mark.usefixtures("continuous")
@pytest.mark.usefixtures("blobs")
class TestPrePrePredictEstimator(VisualTestCase):
"""
Pre-predict contrib tests.
"""
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="image comparison failure on Conda 3.8 and 3.9 with RMS 19.307",
)
def test_prepredict_classifier(self):
"""
Test the prepredict estimator with classification report
"""
# Make prepredictions
X, y = self.multiclass.X, self.multiclass.y
y_pred = GaussianNB().fit(X.train, y.train).predict(X.test)
# Create prepredict estimator with prior predictions
estimator = PrePredict(y_pred, CLASSIFIER)
assert estimator.fit(X.train, y.train) is estimator
assert estimator.predict(X.train) is y_pred
assert estimator.score(X.test, y.test) == pytest.approx(0.41, rel=1e-3)
# Test that a visualizer works with the pre-predictions.
viz = ClassificationReport(estimator)
viz.fit(None, y.train)
viz.score(None, y.test)
viz.finalize()
self.assert_images_similar(viz)
def test_prepredict_regressor(self):
"""
Test the prepredict estimator with a prediction error plot
"""
# Make prepredictions
X, y = self.continuous.X, self.continuous.y
y_pred = LinearRegression().fit(X.train, y.train).predict(X.test)
# Create prepredict estimator with prior predictions
estimator = PrePredict(y_pred, REGRESSOR)
assert estimator.fit(X.train, y.train) is estimator
assert estimator.predict(X.train) is y_pred
assert estimator.score(X.test, y.test) == pytest.approx(0.9999983124154966, rel=1e-2)
# Test that a visualizer works with the pre-predictions.
viz = PredictionError(estimator)
viz.fit(X.train, y.train)
viz.score(X.test, y.test)
viz.finalize()
self.assert_images_similar(viz, tol=10.0)
def test_prepredict_clusterer(self):
"""
Test the prepredict estimator with a silhouette visualizer
"""
X = self.blobs.X
y_pred = MiniBatchKMeans(random_state=831).fit(X).predict(X)
# Create prepredict estimator with prior predictions
estimator = PrePredict(y_pred, CLUSTERER)
assert estimator.fit(X) is estimator
assert estimator.predict(X) is y_pred
assert estimator.score(X) == pytest.approx(0.5477478541994333, rel=1e-2)
# NOTE: there is currently no cluster visualizer that can take advantage of
# the prepredict utility since they all require learned attributes.
def test_load(self):
"""
Test the various ways that prepredict loads data
"""
# Test callable
ppe = PrePredict(lambda: self.multiclass.y.test)
assert ppe._load() is self.multiclass.y.test
# Test file-like object, assume that str and pathlib.Path work similarly
f = BytesIO()
np.save(f, self.continuous.y.test)
f.seek(0)
ppe = PrePredict(f)
assert np.array_equal(ppe._load(), self.continuous.y.test)
# Test direct array-like completed in other tests.
| 32.037433
| 93
| 0.624604
| 726
| 5,991
| 5.044077
| 0.269972
| 0.019115
| 0.037684
| 0.0355
| 0.358274
| 0.278263
| 0.259694
| 0.232933
| 0.232933
| 0.209995
| 0
| 0.02437
| 0.212318
| 5,991
| 186
| 94
| 32.209677
| 0.751642
| 0.223335
| 0
| 0.139785
| 0
| 0
| 0.025112
| 0
| 0
| 0
| 0
| 0
| 0.139785
| 1
| 0.075269
| false
| 0
| 0.139785
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc93993829d4a8f1dc6a847d22783d8908c2b006
| 2,259
|
py
|
Python
|
examples/metrics/fc/example_idtxl_wrapper_multi.py
|
HelmchenLabSoftware/mesostat-dev
|
8baa7120b892fe0df893cdcf0f20f49876643d75
|
[
"MIT"
] | null | null | null |
examples/metrics/fc/example_idtxl_wrapper_multi.py
|
HelmchenLabSoftware/mesostat-dev
|
8baa7120b892fe0df893cdcf0f20f49876643d75
|
[
"MIT"
] | null | null | null |
examples/metrics/fc/example_idtxl_wrapper_multi.py
|
HelmchenLabSoftware/mesostat-dev
|
8baa7120b892fe0df893cdcf0f20f49876643d75
|
[
"MIT"
] | null | null | null |
import os, sys
import numpy as np
import matplotlib.pyplot as plt
# Export library path
rootname = "mesoscopic-functional-connectivity"
thispath = os.path.dirname(os.path.abspath(__file__))
rootpath = os.path.join(thispath[:thispath.index(rootname)], rootname)
print("Appending project path", rootpath)
sys.path.append(rootpath)
from codes.lib.info_metrics.info_metrics_generic import parallel_metric_2d
from codes.lib.models.test_lib import dynsys
from codes.lib.sweep_lib import DataSweep
############################
# Parameters
############################
# DynSys parameters
dynsysParam = {
'nNode' : 4, # Number of variables
'nData' : 4000, # Number of timesteps
'nTrial' : 20, # Number of trials
'dt' : 50, # ms, timestep
'tau' : 500, # ms, timescale of each mesoscopic area
'inpT' : 100, # Period of input oscillation
'inpMag' : 0.0, # Magnitude of the periodic input
'std' : 0.2, # STD of neuron noise
}
# IDTxl parameters
idtxlParam = {
'dim_order' : 'rps',
'cmi_estimator' : 'JidtGaussianCMI',
'max_lag_sources' : 5,
'min_lag_sources' : 1,
'window' : 50
}
############################
# Data
############################
nSweep = 10
data = dynsys(dynsysParam) #[trial x channel x time]
print("Generated data of shape", data.shape)
methods = ['BivariateTE', 'MultivariateTE']
dataSweep1 = DataSweep(data, idtxlParam, nSweepMax=nSweep)
timeIdxs = dataSweep1.get_target_time_idxs()
# print(timeIdxs)
#
# from codes.lib.sweep_lib import Sweep2D
#
# sweeper = Sweep2D(dataSweep1.iterator(), methods, idtxlParam["dim_order"], parTarget=True)
#
# for i, (method, data, iTrg) in enumerate(sweeper.iterator()):
# print(i, method, data.shape, iTrg)
results = parallel_metric_2d(dataSweep1.iterator(), "idtxl", methods, idtxlParam, nCore=None)
fig, ax = plt.subplots(nrows=nSweep, ncols=2)
fig.suptitle("TE computation for several windows of the data")
for iMethod, method in enumerate(methods):
ax[0][iMethod].set_title(method)
print(results[method].shape)
for iSweep in range(nSweep):
ax[iSweep][0].set_ylabel("time="+str(timeIdxs[iSweep]))
ax[iSweep][iMethod].imshow(results[method][iSweep][0])
plt.show()
| 29.723684
| 93
| 0.660912
| 279
| 2,259
| 5.258065
| 0.512545
| 0.02454
| 0.03272
| 0.023177
| 0.035446
| 0.035446
| 0
| 0
| 0
| 0
| 0
| 0.019702
| 0.168659
| 2,259
| 76
| 94
| 29.723684
| 0.761448
| 0.234617
| 0
| 0
| 0
| 0
| 0.169279
| 0.021317
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.136364
| 0
| 0.136364
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc97881f01338acc3697d0a0591a9ee19e6dbad9
| 1,747
|
py
|
Python
|
project.py
|
blendacosta/CIP_FinalProject
|
8f50298d1b35cffe4569f5a78a158d6d699fa532
|
[
"MIT"
] | null | null | null |
project.py
|
blendacosta/CIP_FinalProject
|
8f50298d1b35cffe4569f5a78a158d6d699fa532
|
[
"MIT"
] | null | null | null |
project.py
|
blendacosta/CIP_FinalProject
|
8f50298d1b35cffe4569f5a78a158d6d699fa532
|
[
"MIT"
] | null | null | null |
'''
A Gameplay Mechanic [TS4]
By CozyGnomes (https://cozygnomes.tumblr.com/)
This is an existing document that contains several things you can do in your gameplay on TS4.
This program comes with the intention of automatically generating the related phrase without
having to search for each number (as instructed in the original document).
Project by Blenda C
'''
import random
FILE_NAME = 'gpmechanic.txt'
def get_mechanics():
# read the file containing the mechanics
list_mechanics = []
with open(FILE_NAME, encoding='utf-8') as file:
for line in file: # for-each loop gives lines one at a time
list_mechanics.append(line.strip()) # strip removes whitespace at the start or end
return list_mechanics
def introduction():
# gives the introduction and instructions to the user
print("")
print("A Gameplay Mechanic [TS4] by CozyGnomes")
print("Every time you press enter, a new suggestion for your gameplay will be generated. (to exit enter 0)")
def main():
introduction()
list_mechanics = get_mechanics()
chosen_value = random.choice(list_mechanics) # comes with ‘import random’
print("You should...")
print('')
print(chosen_value)
# here is the verification if the user wants to generate an alternative mechanic or wants to leave
while True:
user_input = input("")
if user_input == '':
print("or...")
chosen_value = random.choice(list_mechanics)
print(chosen_value)
elif user_input == '0':
break
# once generated, a final message
print('')
print("Now it's time to do it! Good luck!")
if __name__ == '__main__':
main()
| 37.170213
| 112
| 0.661133
| 233
| 1,747
| 4.849785
| 0.506438
| 0.069027
| 0.030089
| 0.035398
| 0.120354
| 0.120354
| 0
| 0
| 0
| 0
| 0
| 0.004601
| 0.253578
| 1,747
| 47
| 113
| 37.170213
| 0.861963
| 0.39439
| 0
| 0.193548
| 0
| 0.032258
| 0.218656
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.032258
| 0
| 0.16129
| 0.322581
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc9d27fc8106d84826a9bc91142a53251a35c56a
| 62,936
|
py
|
Python
|
niimpy/preprocess.py
|
CxAalto/niimpy
|
ffd20f9c6aba1671d9035f47715d1649ced0e6e7
|
[
"MIT"
] | 5
|
2021-11-23T12:05:23.000Z
|
2022-02-11T12:57:50.000Z
|
niimpy/preprocess.py
|
niima-project/niimpy
|
975470507b1f8836d9e29d43601e345612b06a62
|
[
"MIT"
] | 62
|
2021-07-16T09:17:18.000Z
|
2022-03-16T11:27:50.000Z
|
niimpy/preprocess.py
|
niima-project/niimpy
|
975470507b1f8836d9e29d43601e345612b06a62
|
[
"MIT"
] | 6
|
2021-09-07T13:06:57.000Z
|
2022-03-14T11:26:30.000Z
|
################################################################################
# This is the main file for preprocessing smartphone sensor data #
# #
# Contributors: Anna Hakala & Ana Triana #
################################################################################
import niimpy
import numpy as np
import pandas as pd
from pandas import Series
import matplotlib.pyplot as plt
import seaborn as sns
import time
import datetime
import pytz
import niimpy.aalto
# backwards compatibility aliases
from .screen import screen_off, screen_duration
def date_range(df, begin, end):
"""Extract out a certain date range from a DataFrame.
Extract out a certain data range from a dataframe. The index must be the
dates, and the index must be sorted.
"""
# TODO: is this needed? Do normal pandas operation, timestamp
# checking is not really needed (and limits the formats that can
# be used, pandas can take more than pd.Timestamp)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = df.index[0]
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = df.index[-1]
df_new = df.loc[begin:end]
return df_new
# Above this point is function that should *stay* in preprocess.py
# Below this is functions that may or may not be moved.
def get_subjects(database):
""" Returns a list of the subjects in the database
Parameters
----------
database: database
"""
# TODO: deprecate, user should do ['user'].unique() on dataframe themselves
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
questions = database.raw(table='AwareHyksConverter', user=niimpy.ALL)
subjects=list(questions.user.unique())
return subjects
def get_phq9(database,subject):
""" Returns the phq9 scores from the databases per subject
Parameters
----------
database: database
user: string
Returns
-------
phq9: Dataframe with the phq9 score
"""
# TODO: Most of this logic can be moved to sum_survey_cores
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
phq9 = niimpy.aalto.phq9_raw(database)
phq9 = phq9[phq9['user']==subject]
phq9 = phq9.drop(['user','source'],axis=1)
phq9 = phq9.sort_index()
phq9 = phq9.reset_index().drop_duplicates(subset=['index','id'],keep='first').set_index('index')
phq9 = phq9.groupby(phq9.index)['answer'].sum()
phq9 = phq9.to_frame()
return phq9
#surveys
def daily_affect_variability(questions, subject=None):
""" Returns two DataFrames corresponding to the daily affect variability and
mean daily affect, both measures defined in the OLO paper available in
10.1371/journal.pone.0110907. In brief, the mean daily affect computes the
mean of each of the 7 questions (e.g. sad, cheerful, tired) asked in a
likert scale from 0 to 7. Conversely, the daily affect viariability computes
the standard deviation of each of the 7 questions.
NOTE: This function aggregates data by day.
Parameters
----------
questions: DataFrame with subject data (or database for backwards compatibility)
subject: string, optional (backwards compatibility only, in the future do filtering before).
Returns
-------
DLA_mean: mean of the daily affect
DLA_std: standard deviation of the daily affect
"""
# TODO: The daily summary (mean/std) seems useful, can we generalize?
# Backwards compatibilty if a database was passed
if isinstance(questions, niimpy.database.Data1):
questions = questions.raw(table='AwareHyksConverter', user=subject)
# Maintain backwards compatibility in the case subject was passed and
# questions was *not* a dataframe.
elif isinstance(subject, string):
questions = questions[questions['user'] == subject]
questions=questions[(questions['id']=='olo_1_1') | (questions['id']=='olo_1_2') | (questions['id']=='olo_1_3') | (questions['id']=='olo_1_4') | (questions['id']=='olo_1_5') | (questions['id']=='olo_1_6') | (questions['id']=='olo_1_7') | (questions['id']=='olo_1_8')]
questions['answer']=pd.to_numeric(questions['answer'])
questions = questions.drop(['device', 'time', 'user'], axis=1)
if (pd.Timestamp.tzname(questions.index[0]) != 'EET'):
if pd.Timestamp.tzname(questions.index[0]) != 'EEST':
questions.index = pd.to_datetime(questions.index).tz_localize('Europe/Helsinki')
questions=questions.drop_duplicates(subset=['datetime','id'],keep='first')
questions=questions.pivot_table(index='datetime', columns='id', values='answer')
questions=questions.rename(columns={'olo_1_1': 'cheerful', 'olo_1_2': 'tired','olo_1_3': 'content', 'olo_1_4': 'nervous','olo_1_5': 'tranquil', 'olo_1_6': 'sad', 'olo_1_7': 'excited', 'olo_1_8': 'active'})
questions = questions.reset_index()
DLA = questions.copy()
questions['date_minus_time'] = questions['datetime'].apply( lambda questions : datetime.datetime(year=questions.year, month=questions.month, day=questions.day))
questions.set_index(questions["date_minus_time"],inplace=True)
DLA_std = questions.resample('D').std()#), how='std')
DLA_std=DLA_std.rename(columns={'date_minus_time': 'datetime'})
DLA_std.index = pd.to_datetime(DLA_std.index).tz_localize('Europe/Helsinki')
DLA_mean = questions.resample('D').mean()
DLA_mean=DLA_mean.rename(columns={'date_minus_time': 'datetime'})
DLA_mean.index = pd.to_datetime(DLA_mean.index).tz_localize('Europe/Helsinki')
return DLA_std, DLA_mean
#Ambient Noise
def ambient_noise(noise, subject, begin=None, end=None):
""" Returns a Dataframe with 5 possible computations regarding the noise
ambient plug-in: average decibels, average frequency, number of times when
there was noise in the day, number of times when there was a loud noise in
the day (>70dB), and number of times when the noise matched the speech noise
level and frequency (65Hz < freq < 255Hz and dB>50 )
NOTE: This function aggregates data by day.
Parameters
----------
noise: DataFrame with subject data (or database for backwards compatibility)
subject: string, optional (backwards compatibility only, in the future do filtering before).
begin: datetime, optional
end: datetime, optional
Returns
-------
avg_noise: Dataframe
"""
# TODO: move to niimpy.noise
# TODO: add arguments for frequency/decibels/silence columns
# Backwards compatibilty if a database was passed
if isinstance(noise, niimpy.database.Data1):
noise = noise.raw(table='AwareAmbientNoise', user=subject)
# Maintain backwards compatibility in the case subject was passed and
# questions was *not* a dataframe.
elif isinstance(subject, string):
noise = noise[noise['user'] == subject]
# Shrink the dataframe down to only what we need
noise = noise[['double_frequency', 'is_silent', 'double_decibels', 'datetime']]
# Extract the data range (In the future should be done before this function
# is called.)
if begin is not None or end is not None:
noise = date_range(noise, begin, end)
noise['is_silent']=pd.to_numeric(noise['is_silent'])
loud = noise[noise.double_decibels>70] #check if environment was noisy
speech = noise[noise['double_frequency'].between(65, 255)]
speech = speech[speech.is_silent==0] #check if there was a conversation
silent=noise[noise.is_silent==0] #This is more what moments there are noise in the environment.
avg_noise=noise.resample('D', on='datetime').mean() #average noise
avg_noise=avg_noise.drop(['is_silent'],axis=1)
if not silent.empty:
silent=silent.resample('D', on='datetime').count()
silent = silent.drop(['double_decibels','double_frequency','datetime'],axis=1)
silent=silent.rename(columns={'is_silent':'noise'})
avg_noise = avg_noise.merge(silent, how='outer', left_index=True, right_index=True)
if not loud.empty:
loud=loud.resample('D', on='datetime').count()
loud = loud.drop(['double_decibels','double_frequency','datetime'],axis=1)
loud=loud.rename(columns={'is_silent':'loud'})
avg_noise = avg_noise.merge(loud, how='outer', left_index=True, right_index=True)
if not speech.empty:
speech=speech.resample('D', on='datetime').count()
speech = speech.drop(['double_decibels','double_frequency','datetime'],axis=1)
speech=speech.rename(columns={'is_silent':'speech'})
avg_noise = avg_noise.merge(speech, how='outer', left_index=True, right_index=True)
return avg_noise
#Application
def shutdown_info(database,subject,begin=None,end=None):
""" Returns a DataFrame with the timestamps of when the phone has shutdown.
NOTE: This is a helper function created originally to preprocess the application
info data
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
shutdown: Dataframe
"""
bat = niimpy.read._get_dataframe(database, table='AwareBattery', user=subject)
bat = niimpy.filter_dataframe(bat, begin=begin, end=end)
# TODO: move to niimpy.battery
if 'datetime' in bat.columns:
bat = bat[['battery_status', 'datetime']]
else:
bat = bat[['battery_status']]
bat=bat.loc[begin:end]
bat['battery_status']=pd.to_numeric(bat['battery_status'])
shutdown = bat[bat['battery_status'].between(-3, 0, inclusive=False)]
return shutdown
def get_seconds(time_delta):
""" Converts the timedelta to seconds
NOTE: This is a helper function
Parameters
----------
time_delta: Timedelta
"""
return time_delta.dt.seconds
def app_duration(database,subject,begin=None,end=None,app_list_path=None):
""" Returns two DataFrames contanining the duration and number of events per
group of apps, e.g. number of times a person used communication apps like
WhatsApp, Telegram, Messenger, sms, etc. and for how long these apps were
used in a day (in seconds).
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
app_list_path: path to the csv file where the apps are classified into groups
Returns
-------
duration: Dataframe
count: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
if(app_list_path==None):
app_list_path = '/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/apps_group.csv'
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
app=app.loc[begin:end]
#Classify the apps into groups
app_list=pd.read_csv(app_list_path)
app['group']=np.nan
for index, row in app.iterrows():
group=app_list.isin([row['application_name']]).any()
group=group.reset_index()
if (not any(group[0])):
app.loc[index,'group']=10
else:
app.loc[index,'group']=group.index[group[0] == True].tolist()[0]
#Insert missing data due to phone being shut down
shutdown = shutdown_info(database,subject,begin,end)
if not shutdown.empty:
shutdown['group']=11
shutdown['battery_status'] = 'off'
app = app.merge(shutdown, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'off', regex=True)
app['group_x'] = app['group_x'].replace(np.nan, 11, regex=True)
app = app.drop(['battery_status','group_y'], axis=1)
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'], axis=1)
app=app.rename(columns={'group_x':'group'})
#Insert missing data due to the screen being off
screen=screen_off(database,subject,begin,end)
if not screen.empty:
app = app.merge(screen, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'off', regex=True)
app['group'] = app['group'].replace(np.nan, 11, regex=True)
del app['screen_status']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'], axis=1)
#Insert missing data caught by sms but unknown cause
sms = database.raw(table='AwareMessages', user=subject)
sms = sms.drop(columns=['device','user','time','trace'])
sms = sms.drop_duplicates(subset=['datetime','message_type'],keep='first')
sms = sms[sms.message_type=='outgoing']
sms = sms.loc[begin:end]
if not sms.empty:
app = app.merge(sms, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'sms', regex=True)
app['group'] = app['group'].replace(np.nan, 2, regex=True)
del app['message_type']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'], axis=1)
#Insert missing data caught by calls but unknown cause
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
call = call.drop(columns=['device','user','time','trace'])
call = call.drop_duplicates(subset=['datetime','call_type'],keep='first')
call['call_duration'] = pd.to_timedelta(call.call_duration.astype(int), unit='s')
call = call.loc[begin:end]
dummy = call.datetime+call.call_duration
dummy = pd.Series.to_frame(dummy)
dummy['finish'] = dummy[0]
dummy = dummy.set_index(0)
call = call.merge(dummy, how='outer', left_index=True, right_index=True)
dates=call.datetime.combine_first(call.finish)
call['datetime']=dates
call = call.drop(columns=['call_duration','finish'])
app = app.merge(call, how='outer', left_index=True, right_index=True)
app.group = app.group.fillna(2)
app.application_name = app.application_name.fillna('call')
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(columns=['datetime_x','datetime_y','call_type'])
#Calculate the app duration per group
app['duration']=np.nan
app['duration']=app['datetime'].diff()
app['duration'] = app['duration'].shift(-1)
app['datetime'] = app['datetime'].dt.floor('d')
duration=pd.pivot_table(app,values='duration',index='datetime', columns='group', aggfunc=np.sum)
count=pd.pivot_table(app,values='duration',index='datetime', columns='group', aggfunc='count')
duration.columns = duration.columns.map({0.0: 'sports', 1.0: 'games', 2.0: 'communication', 3.0: 'social_media', 4.0: 'news', 5.0: 'travel', 6.0: 'shop', 7.0: 'entretainment', 8.0: 'work_study', 9.0: 'transportation', 10.0: 'other', 11.0: 'off'})
count.columns = count.columns.map({0.0: 'sports', 1.0: 'games', 2.0: 'communication', 3.0: 'social_media', 4.0: 'news', 5.0: 'travel', 6.0: 'shop', 7.0: 'entretainment', 8.0: 'work_study', 9.0: 'transportation', 10.0: 'other', 11.0: 'off'})
duration = duration.apply(get_seconds,axis=1)
return duration, count
#Communication
def call_info(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the duration and number of events per
type of calls (outgoing, incoming, missed). The Dataframe summarizes the
duration of the incoming/outgoing calls in seconds, number of those events,
and how long (in seconds) the person has spoken to the top 5 contacts (most
frequent)
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
duration: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
call['datetime'] = call['datetime'].dt.floor('d')
call['call_duration']=pd.to_numeric(call['call_duration'])
duration = call.groupby(['datetime']).sum()
missed_calls = call.loc[(call['call_type'] == 'missed')].groupby(['datetime']).count()
outgoing_calls = call.loc[(call['call_type'] == 'outgoing')].groupby(['datetime']).count()
incoming_calls = call.loc[(call['call_type'] == 'incoming')].groupby(['datetime']).count()
duration['call_missed'] = missed_calls['call_type']
duration['call_outgoing'] = outgoing_calls['call_type']
duration['call_incoming'] = incoming_calls['call_type']
duration2 = call.pivot_table(index='datetime', columns='call_type', values='call_duration',aggfunc='sum')
if ('incoming' in duration2.columns):
duration2 = duration2.rename(columns={'incoming': 'call_incoming_duration'})
if ('outgoing' in duration2.columns):
duration2 = duration2.rename(columns={'outgoing': 'call_outgoing_duration'})
if ('missed' in duration2.columns):
duration2 = duration2.drop(columns=['missed'])
duration = duration.merge(duration2, how='outer', left_index=True, right_index=True)
duration = duration.fillna(0)
if ('missed_y' in duration.columns):
duration = duration.drop(columns=['missed_y'])
#duration.columns = ['total_call_duration', 'call_missed', 'call_outgoing', 'call_incoming', 'call_incoming_duration', 'call_outgoing_duration']
#Now let's calculate something more sophisticated... Let's see
trace = call.groupby(['trace']).count()
trace = trace.sort_values(by=['call_type'], ascending=False)
top5 = trace.index.values.tolist()[:5]
call['frequent']=0
call = call.reset_index()
call = call.rename(columns={'index': 'date'})
for index, row in call.iterrows():
if (call.loc[index,'trace'] in top5):
call.loc[index,'frequent']=1
call['frequent'] = call['frequent'].astype(str)
duration2 = call.pivot_table(index='date', columns=['call_type','frequent'], values='call_duration',aggfunc='sum')
duration2.columns = ['_'.join(col) for col in duration2.columns]
duration2 = duration2.reset_index()
#duration2.columns = ['datetime','incoming_0','incoming_1','missed_0','missed_1','outgoing_0','outgoing_1']
duration2['datetime'] = duration2['date'].dt.floor('d')
duration2 = duration2.groupby(['datetime']).sum()
if ('incoming_0' in duration2.columns):
duration2 = duration2.drop(columns=['incoming_0'])
if ('missed_0' in duration2.columns):
duration2 = duration2.drop(columns=['missed_0'])
if ('missed_1' in duration2.columns):
duration2 = duration2.drop(columns=['missed_1'])
if ('outgoing_0' in duration2.columns):
duration2 = duration2.drop(columns=['outgoing_0'])
duration = duration.merge(duration2, how='outer', left_index=True, right_index=True)
duration = duration.rename(columns={'incoming_1': 'incoming_duration_top5', 'outgoing_1': 'outgoing_duration_top5'})
return duration
def sms_info(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the number of events per type of messages
SMS (outgoing, incoming). The Dataframe summarizes the number of the
incoming/outgoing sms and how many of those correspond to the top 5 contacts
(most frequent with whom the subject exchanges texts)
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
sms_stats: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
sms = database.raw(table='AwareMessages', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = sms.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = sms.iloc[len(sms)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms['datetime'] = sms['datetime'].dt.floor('d')
sms = sms.loc[begin:end]
if (len(sms)>0):
sms_stats = sms.copy()
sms_stats['dummy'] = 1
sms_stats = sms_stats.pivot_table(index='datetime', columns='message_type', values='dummy',aggfunc='sum')
#Now let's move to somethign more sophisticated
trace = sms.groupby(['trace']).count()
trace = trace.sort_values(by=['message_type'], ascending=False)
top5 = trace.index.values.tolist()[:5]
sms['frequent']=0
sms = sms.reset_index()
sms = sms.rename(columns={'index': 'date'})
for index, row in sms.iterrows():
if (sms.loc[index,'trace'] in top5):
sms.loc[index,'frequent']=1
sms['frequent'] = sms['frequent'].astype(str)
sms['dummy']=1
dummy = sms.pivot_table(index='date', columns=['message_type','frequent'], values='dummy',aggfunc='sum')
dummy.columns = ['_'.join(col) for col in dummy.columns]
dummy = dummy.reset_index()
dummy['datetime'] = dummy['date'].dt.floor('d')
dummy = dummy.groupby(['datetime']).sum()
if ('incoming_0' in dummy.columns):
dummy = dummy.drop(columns=['incoming_0'])
if ('outgoing_0' in dummy.columns):
dummy = dummy.drop(columns=['outgoing_0'])
sms_stats = sms_stats.merge(dummy, how='outer', left_index=True, right_index=True)
sms_stats = sms_stats.rename(columns={'incoming_1': 'sms_incoming_top5', 'outgoing_1': 'sms_outgoing_top5'})
sms_stats = sms_stats.fillna(0)
if ('incoming' in sms_stats.columns):
sms_stats = sms_stats.rename(columns={'incoming': 'sms_incoming'})
if ('outgoing' in sms_stats.columns):
sms_stats = sms_stats.rename(columns={'outgoing': 'sms_outgoing'})
return sms_stats
else:
sms_stats = pd.DataFrame()
return sms_stats
def sms_duration(database,subject,begin,end):
""" Returns a DataFrame contanining the duration per type of messages SMS
(outgoing, incoming). The Dataframe summarizes the calculated duration of
the incoming/outgoing sms and the lags (i.e. the period between receiving a
message and reading/writing a reply).
NOTE: The foundation of this function is still weak and needs discussion
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
reading: Dataframe
writing: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
#Insert missing data due to phone being shut down
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'application_name'})
shutdown['application_name'] = 'off'
app = app.merge(shutdown, how='outer', left_index=True, right_index=True)
app['application_name_x'] = app['application_name_x'].replace(np.nan, 'off', regex=True)
del app['application_name_y']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'],axis=1)
app=app.rename(columns={'application_name_x':'application_name'})
#Insert missing data due to the screen being off
screen=screen_off(database,subject,begin,end)
app = app.merge(screen, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'off', regex=True)
del app['screen_status']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'],axis=1)
app = app.drop_duplicates(subset=['datetime','application_name'],keep='first')
#Insert missing data caught by sms but unknown cause
sms = database.raw(table='AwareMessages', user=subject)
sms = sms.drop(columns=['device','user','time','trace'])
sms = sms.drop_duplicates(subset=['datetime','message_type'],keep='first')
#sms = sms[sms.message_type=='outgoing']
app = app.merge(sms, how='outer', left_index=True, right_index=True)
app.loc[app['application_name'].isnull(),'application_name'] = app['message_type']
del app['message_type']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'],axis=1)
#Calculate the app duration
app['duration']=np.nan
app['duration']=app['datetime'].diff()
app['duration'] = app['duration'].shift(-1)
#Select the text applications only
sms_app_name = ['Messages','Mensajería','MensajerÃa','Viestit','incoming','outgoing']
app = app[app['application_name'].isin(sms_app_name)]
sms_app_name = ['Messages','Mensajería','MensajerÃa','Viestit']
app['application_name'].loc[(app['application_name'].isin(sms_app_name))] = 'messages'
app['group']=np.nan
for i in range(len(app)-1):
if (app.application_name[i]=='incoming' and app.application_name[i+1]=='messages'):
app.group[i+1]=1
elif (app.application_name[i]=='messages' and app.application_name[i+1]=='outgoing'):
app.group[i+1]=2
else:
app.group[i+1]=0
app['lags'] = app['datetime'].diff()
app['datetime'] = app['datetime'].dt.floor('d')
app=app.loc[begin:end]
reading = app.loc[(app['group']==1)]
if (len(reading)>0):
reading = pd.pivot_table(reading,values=['duration','lags'],index='datetime', columns='application_name', aggfunc=np.sum)
reading.columns = ['reading_duration','reading_lags']
reading = reading.apply(get_seconds,axis=1)
writing = app.loc[(app['group']==2)]
if (len(writing)>0):
for i in range(len(writing)-1):
if (writing.lags[i].seconds<15 or writing.lags[i].seconds>120):
writing.lags[i] = datetime.datetime.strptime('00:05', "%M:%S") - datetime.datetime.strptime("00:00", "%M:%S")
del writing['duration']
writing = writing.rename(columns={'lags':'writing_duration'})
writing = pd.pivot_table(writing,values='writing_duration',index='datetime', columns='application_name', aggfunc=np.sum)
writing = writing.apply(get_seconds,axis=1)
return reading, writing
def communication_info(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining all the information extracted from
communication's events (calls, sms, and communication apps like WhatsApp,
Telegram, Messenger, etc.). Regarding calls, this function contains the
duration of the incoming/outgoing calls in seconds, number of those events,
and how long (in seconds) the person has spoken to the top 5 contacts (most
frequent). Regarding the SMSs, this function contains the number of incoming
/outgoing events, and the top 5 contacts (most frequent). Aditionally, we
also include the calculated duration of the incoming/outgoing sms and the
lags (i.e. the period between receiving a message and reading/writing a
reply). Regarding the app, the duration of communication events is summarized.
This function also sums all the different durations (calls, SMSs, apps) and
provides the duration (in seconds) that a person spent communicating during
the day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
call_summary: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
duration_app, count_app = app_duration(database,subject,begin,end)
call_summary = call_info(database,subject,begin,end)
sms_summary = sms_info(database,subject,begin,end)
#reading, writing = sms_duration(database,subject,begin,end)
if (not sms_summary.empty):
call_summary = call_summary.merge(sms_summary, how='outer', left_index=True, right_index=True)
call_summary = call_summary.fillna(0)
#Now let's see if there is any info from the apps worth bringin back
if ('communication' in duration_app.columns): #2 is the number for communication apps
comm_app = duration_app['communication']#.dt.seconds
comm_app = comm_app.fillna(0)
comm_app = comm_app.to_frame()
if ('social_media' in duration_app.columns): #2 is the number for communication apps
social_app = duration_app['social_media']#.dt.seconds
social_app = social_app.fillna(0)
social_app = social_app.to_frame()
try:
social_app
try:
comm_app
comm_app = comm_app.merge(social_app, how='outer', left_index=True, right_index=True)
except NameError:
comm_app = social_app
except NameError:
pass
try:
comm_app
call_summary = call_summary.merge(comm_app, how='outer', left_index=True, right_index=True)
except NameError:
pass
call_summary = call_summary.fillna(0)
if ('communication' in call_summary.columns):
call_summary['total_comm_duration'] = call_summary['call_duration']+call_summary['communication']
if (('social_media' in call_summary.columns) and ('communication' in call_summary.columns)):
call_summary['total_comm_duration'] = call_summary['call_duration']+call_summary['social_media']+call_summary['communication']
if ('communication' in call_summary.columns):
call_summary=call_summary.rename(columns={'communication':'comm_apps_duration'})
if ('social_media' in call_summary.columns):
call_summary=call_summary.rename(columns={'social_media':'social_apps_duration'})
#Now let's see if there is any info from the sms duration
'''if (len(reading)>0):
reading['reading_duration'] = reading['reading_duration']#.dt.seconds
reading['reading_lags'] = reading['reading_lags']#.dt.seconds
call_summary = call_summary.merge(reading, how='outer', left_index=True, right_index=True)
call_summary = call_summary.fillna(0)
call_summary['total_comm_duration'] = call_summary['total_comm_duration']+call_summary['reading_duration']
if (len(writing)>0):
writing=writing.rename(columns={'outgoing':'writing_duration'})
writing['writing_duration'] = writing['writing_duration']#.dt.seconds
call_summary = call_summary.merge(writing, how='outer', left_index=True, right_index=True)
call_summary = call_summary.fillna(0)
call_summary['total_comm_duration'] = call_summary['total_comm_duration']+call_summary['writing_duration']'''
return call_summary
#Occurrences
def occurrence_call_sms(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the number of events that occur in a
day for call and sms. The events are binned in 12-minutes, i.e. if there is
an event at 11:05 and another one at 11:45, 2 occurences happened in one
hour. Then, the sum of these occurences yield the number per day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
event: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
sms = database.raw(table='AwareMessages', user=subject)
if not sms.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms = sms.loc[begin:end]
if not call.empty:
if not sms.empty:
call_sms = call.merge(sms, how='outer', left_index=True, right_index=True)
times = pd.DatetimeIndex.to_series(call_sms.index,keep_tz=True)
else:
times = pd.DatetimeIndex.to_series(call.index,keep_tz=True)
if not sms.empty:
if not call.empty:
call_sms = sms.merge(call, how='outer', left_index=True, right_index=True)
times = pd.DatetimeIndex.to_series(call_sms.index,keep_tz=True)
else:
times = pd.DatetimeIndex.to_series(sms.index,keep_tz=True)
event=niimpy.util.occurrence(times)
event = event.groupby(['day']).sum()
event = event.drop(columns=['hour'])
return event
def occurrence_call_sms_apps(database,subject,begin=None,end=None,app_list_path=None,comm_app_list_path=None):
""" Returns a DataFrame contanining the number of events that occur in a
day for calls, sms, and communication apps. The events are binned in
12-minutes, i.e. if there is an event at 11:05 and another one at 11:45, 2
occurences happened in one hour. Then, the sum of these occurences yield the
number per day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
app_list_path: path to the file where the apps are classified into groups
comm_app_list_path:path to the file where the communication apps are listed
Returns
-------
event: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
sms = database.raw(table='AwareMessages', user=subject)
if not sms.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = sms.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = sms.iloc[len(sms)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms = sms.loc[begin:end]
app = database.raw(table='AwareApplicationNotifications', user=subject)
if (app_list_path==None):
app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/apps_group.csv'
if (comm_app_list_path==None):
comm_app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/comm_apps.csv'
if not app.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
app = app.loc[begin:end]
app_list=pd.read_csv(app_list_path)
app['group']=np.nan
for index, row in app.iterrows():
group=app_list.isin([row['application_name']]).any()
group=group.reset_index()
if (not any(group[0])):
app.loc[index,'group']=10
else:
app.loc[index,'group']=group.index[group[0] == True].tolist()[0]
app = app.loc[app['group'] == 2]
comm_app_list = pd.read_csv(comm_app_list_path)
comm_app_list = comm_app_list['Communication'].tolist()
app = app[~app.application_name.isin(comm_app_list)]
if not call.empty:
if not sms.empty:
event = call.merge(sms, how='outer', left_index=True, right_index=True)
else:
event = call
else:
if not sms.empty:
event = sms
else:
event= pd.DataFrame()
if not app.empty:
if not event.empty:
event = event.merge(app, how='outer', left_index=True, right_index=True)
else:
event=app
if not event.empty:
times = pd.DatetimeIndex.to_series(event.index,keep_tz=True)
event=niimpy.util.occurrence(times)
event = event.groupby(['day']).sum()
event = event.drop(columns=['hour'])
return event
def occurrence_call_sms_social(database,subject,begin=None,end=None,app_list_path=None,comm_app_list_path=None):
""" Returns a DataFrame contanining the number of events that occur in a
day for calls, sms, and social and communication apps. The events are binned
in 12-minutes, i.e. if there is an event at 11:05 and another one at 11:45,
2 occurences happened in one hour. Then, the sum of these occurences yield
the number per day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
app_list_path: path to the file where the apps are classified into groups
comm_app_list_path:path to the file where the communication apps are listed
Returns
-------
event: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
sms = database.raw(table='AwareMessages', user=subject)
if not sms.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = sms.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = sms.iloc[len(sms)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms = sms.loc[begin:end]
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(app_list_path==None):
app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/apps_group.csv'
if (comm_app_list_path==None):
comm_app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/comm_apps.csv'
if not app.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
app = app.loc[begin:end]
app_list=pd.read_csv(app_list_path)
app['group']=np.nan
for index, row in app.iterrows():
group=app_list.isin([row['application_name']]).any()
group=group.reset_index()
if (not any(group[0])):
app.loc[index,'group']=10
else:
app.loc[index,'group']=group.index[group[0] == True].tolist()[0]
app = app.loc[(app['group'] == 2) | (app['group'] == 3)]
comm_app_list = pd.read_csv(comm_app_list_path)
comm_app_list = comm_app_list['Communication'].tolist()
app = app[~app.application_name.isin(comm_app_list)]
if not call.empty:
if not sms.empty:
event = call.merge(sms, how='outer', left_index=True, right_index=True)
else:
event = call
else:
if not sms.empty:
event = sms
else:
event= pd.DataFrame()
if not app.empty:
if not event.empty:
event = event.merge(app, how='outer', left_index=True, right_index=True)
else:
event=app
if not event.empty:
times = pd.DatetimeIndex.to_series(event.index,keep_tz=True)
event=niimpy.util.occurrence(times)
event = event.groupby(['day']).sum()
event = event.drop(columns=['hour'])
event.index = pd.to_datetime(event.index).tz_localize('Europe/Helsinki')
return event
#Location
def location_data(database,subject,begin=None,end=None):
""" Reads the readily, preprocessed location data from the right database.
The data already contains the aggregation of the GPS data (more info here:
https://github.com/digitraceslab/koota-server/blob/master/kdata/converter.py).
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
location: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
location = database.raw(table='AwareLocationDay', user=subject)
location = location.drop(['device','user'],axis=1)
location=location.drop_duplicates(subset=['day'],keep='first')
location['day']=pd.to_datetime(location['day'], format='%Y-%m-%d')
location=location.set_index('day')
location.index = pd.to_datetime(location.index).tz_localize('Europe/Helsinki')
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = location.index[0]
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = location.index[-1]
location=location.loc[begin:end]
return location
#Screen
def get_battery_data(battery, user=None, start = None, end = None):
""" Returns a DataFrame with battery data for a user.
Parameters
----------
battery: DataFrame with battery data
user: string, optional
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
if(user!= None):
assert isinstance(user, str),"user not given in string format"
battery_data = battery[(battery['user']==user)]
else:
battery_data = battery
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.iloc[0]['datetime']
if(end!= None):
end = pd.to_datetime(end)
else:
end = battery_data.iloc[len(battery_data)-1]['datetime']
battery_data = battery_data[(battery_data['datetime']>=start) & (battery_data['datetime']<=end)]
battery_data['battery_level'] = pd.to_numeric(battery_data['battery_level'])
#df['column'].fillna(pd.Timedelta(seconds=0))
#df.dropna()
battery_data = battery_data.drop_duplicates(subset=['datetime','user','device'],keep='last')
battery_data = battery_data.drop(['user','device','time','datetime'],axis=1)
return battery_data
def battery_occurrences(battery_data, user=None, start=None, end=None, battery_status = False, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the amount of battery data points found between a given interval and steps.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
user: string, optional
start: datetime, optional
end: datetime, optional
battery_status: boolean, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
assert isinstance(user, (type(None), str)),"user not given in string format"
if(user!= None):
ocurrence_data = battery_data[(battery_data['user']==user)]
else:
occurrence_data = battery_data
occurrence_data = occurrence_data.drop_duplicates(subset=['datetime','device'],keep='last')
if(start==None):
start = occurrence_data.iloc[0]['datetime']
start = pd.to_datetime(start)
td = pd.Timedelta(days=days,hours=hours,minutes=minutes,seconds=seconds,milliseconds=milli,microseconds=micro,nanoseconds=nano)
delta = start+td
if(end==None):
end = occurrence_data.iloc[len(occurrence_data)-1]['datetime']
end = pd.to_datetime(end)
idx_range = np.floor((end-start)/td).astype(int)
idx = pd.date_range(start, periods = idx_range, freq=td)
if ((battery_status == True) & ('battery_status' in occurrence_data.columns)):
occurrences = pd.DataFrame(np.nan, index = idx,columns=list(['start','end','occurrences','battery_status']))
for i in range(idx_range):
idx_dat = occurrence_data.loc[(occurrence_data['datetime']>start) & (occurrence_data['datetime']<=delta)]
battery_status = occurrence_data.loc[(occurrence_data['datetime']>start) & (occurrence_data['datetime']<=delta) & ((occurrence_data['battery_status']=='-1')|(occurrence_data['battery_status']=='-2')|(occurrence_data['battery_status']=='-3'))]
occurrences.iloc[i] = [start, delta,len(idx_dat), len(battery_status)]
start = start + td
delta = start + td
else:
occurrences = pd.DataFrame(np.nan, index = idx,columns=list(['start','end','occurrences']))
for i in range(idx_range):
idx_dat = occurrence_data.loc[(occurrence_data['datetime']>start) & (occurrence_data['datetime']<=delta)]
occurrences.iloc[i] = [start, delta,len(idx_dat)]
start = start + td
delta = start + td
return occurrences
def battery_gaps(data, min_duration_between = None):
'''Returns a DataFrame including all battery data and showing the delta between
consecutive battery timestamps. The minimum size of the considered deltas can be decided
with the min_duration_between parameter.
Parameters
----------
data: dataframe with date index
min_duration_between: Timedelta, for example, pd.Timedelta(hours=6)
'''
assert isinstance(data, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
assert isinstance(data.index, pd.core.indexes.datetimes.DatetimeIndex), "data index is not DatetimeIndex"
gaps = data.copy()
gaps['tvalue'] = gaps.index
gaps['delta'] = (gaps['tvalue']-gaps['tvalue'].shift()).fillna(pd.Timedelta(seconds=0))
if(min_duration_between!=None):
gaps = gaps[gaps['delta']>=min_duration_between]
return gaps
def battery_charge_discharge(data):
'''Returns a DataFrame including all battery data and showing the charge/discharge between each timestamp.
Parameters
----------
data: dataframe with date index
'''
assert isinstance(data, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
assert isinstance(data.index, pd.core.indexes.datetimes.DatetimeIndex), "data index is not DatetimeIndex"
charge = data.copy()
charge['battery_level'] = pd.to_numeric(charge['battery_level'])
charge['tvalue'] = charge.index
charge['tdelta'] = (charge['tvalue']-charge['tvalue'].shift()).fillna(pd.Timedelta(seconds=0))
charge['bdelta'] = (charge['battery_level']-charge['battery_level'].shift()).fillna(0)
charge['charge/discharge']= ((charge['bdelta'])/((charge['tdelta']/ pd.Timedelta(seconds=1))))
return charge
def find_real_gaps(battery_data,other_data,start=None, end=None, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the gaps found both in the battery data and the other data.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
other_data: Dataframe
The data you want to compare with
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "battery_data is not a pandas DataFrame"
assert isinstance(other_data, pd.core.frame.DataFrame), "other_data is not a pandas DataFrame"
assert isinstance(battery_data.index, pd.core.indexes.datetimes.DatetimeIndex), "battery_data index is not DatetimeIndex"
assert isinstance(other_data.index, pd.core.indexes.datetimes.DatetimeIndex), "other_data index is not DatetimeIndex"
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.index[0] if (battery_data.index[0]<= other_data.index[0]) else other_data.index[0]
if(end!=None):
end = pd.to_datetime(end)
else:
end = battery_data.index[-1] if (battery_data.index[-1]>= other_data.index[-1]) else other_data.index[-1]
battery = battery_occurrences(battery_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
battery.rename({'occurrences': 'battery_occurrences'}, axis=1, inplace = True)
other = battery_occurrences(other_data, start=start,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
mask = (battery['battery_occurrences']==0)&(other['occurrences']==0)
gaps = pd.concat([battery[mask],other[mask]['occurrences']],axis=1, sort=False)
return gaps
def find_non_battery_gaps(battery_data,other_data,start=None, end=None, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the gaps found only in the other data.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
other_data: Dataframe
The data you want to compare with
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "battery_data is not a pandas DataFrame"
assert isinstance(other_data, pd.core.frame.DataFrame), "other_data is not a pandas DataFrame"
assert isinstance(battery_data.index, pd.core.indexes.datetimes.DatetimeIndex), "battery_data index is not DatetimeIndex"
assert isinstance(other_data.index, pd.core.indexes.datetimes.DatetimeIndex), "other_data index is not DatetimeIndex"
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.index[0] if (battery_data.index[0]<= other_data.index[0]) else other_data.index[0]
if(end!=None):
end = pd.to_datetime(end)
else:
end = battery_data.index[-1] if (battery_data.index[-1]>= other_data.index[-1]) else other_data.index[-1]
battery = battery_occurrences(battery_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
battery.rename({'occurrences': 'battery_occurrences'}, axis=1, inplace = True)
other = battery_occurrences(other_data, start=start,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
mask = (battery['battery_occurrences']>10)&(other['occurrences']==0)
gaps = pd.concat([battery[mask],other[mask]['occurrences']],axis=1, sort=False)
return gaps
def find_battery_gaps(battery_data,other_data,start=None, end=None, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the gaps found only in the battery data.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
other_data: Dataframe
The data you want to compare with
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "battery_data is not a pandas DataFrame"
assert isinstance(other_data, pd.core.frame.DataFrame), "other_data is not a pandas DataFrame"
assert isinstance(battery_data.index, pd.core.indexes.datetimes.DatetimeIndex), "battery_data index is not DatetimeIndex"
assert isinstance(other_data.index, pd.core.indexes.datetimes.DatetimeIndex), "other_data index is not DatetimeIndex"
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.index[0] if (battery_data.index[0]<= other_data.index[0]) else other_data.index[0]
if(end!=None):
end = pd.to_datetime(end)
else:
end = battery_data.index[-1] if (battery_data.index[-1]>= other_data.index[-1]) else other_data.index[-1]
battery = battery_occurrences(battery_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
battery.rename({'occurrences': 'battery_occurrences'}, axis=1, inplace = True)
other = battery_occurrences(other_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
mask = (battery['battery_occurrences']==0)&(other['occurrences']>0)
gaps = pd.concat([battery[mask],other[mask]['occurrences']],axis=1, sort=False)
return gaps
def missing_data_format(question,keep_values=False):
""" Returns a series of timestamps in the right format to allow missing data visualization
.
Parameters
----------
question: Dataframe
"""
question['date'] = question.index
question['date'] = question['date'].apply( lambda question : datetime.datetime(year=question.year, month=question.month, day=question.day))
question = question.drop_duplicates(subset=['date'],keep='first')
question = question.set_index(['date'])
if (keep_values == False):
question['answer'] = 1
question = question.T.squeeze()
return question
def screen_missing_data(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the percentage (range [0,1]) of loss data
calculated based on the transitions of screen status. In general, if
screen_status(t) == screen_status(t+1), we declared we have at least one
missing point.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
count: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"usr not given in string format"
screen = database.raw(table='AwareScreen', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = screen.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = screen.iloc[len(screen)-1]['datetime']
screen=screen.drop_duplicates(subset=['datetime'],keep='first')
screen = screen.drop(['device','user','time'],axis=1)
screen=screen.loc[begin:end]
screen['screen_status']=pd.to_numeric(screen['screen_status'])
#Include the missing points that are due to shutting down the phone
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'screen_status'})
shutdown['screen_status']=0
screen = screen.merge(shutdown, how='outer', left_index=True, right_index=True)
screen['screen_status'] = screen.fillna(0)['screen_status_x'] + screen.fillna(0)['screen_status_y']
screen = screen.drop(['screen_status_x','screen_status_y'],axis=1)
dates=screen.datetime_x.combine_first(screen.datetime_y)
screen['datetime']=dates
screen = screen.drop(['datetime_x','datetime_y'],axis=1)
#Detect missing data points
screen['missing']=0
screen['next']=screen['screen_status'].shift(-1)
screen['dummy']=screen['screen_status']-screen['next']
screen['missing'] = np.where(screen['dummy']==0, 1, 0)
screen['missing'] = screen['missing'].shift(1)
screen = screen.drop(['dummy','next'], axis=1)
screen = screen.fillna(0)
screen['datetime'] = screen['datetime'].apply( lambda screen : datetime.datetime(year=screen.year, month=screen.month, day=screen.day))
screen = screen.drop(['screen_status'], axis=1)
count=pd.pivot_table(screen,values='missing',index='datetime', aggfunc='count')
count = screen.groupby(['datetime','missing'])['missing'].count().unstack(fill_value=0)
count['missing'] = count[1.0]/(count[0.0]+count[1.0])
count = count.drop([0.0,1.0], axis=1)
if (pd.Timestamp.tzname(count.index[0]) != 'EET'):
if pd.Timestamp.tzname(count.index[0]) != 'EEST':
count.index = pd.to_datetime(count.index).tz_localize('Europe/Helsinki')
return count
def missing_noise(database,subject,begin=None,end=None):
""" Returns a Dataframe with the estimated missing data from the ambient
noise sensor.
NOTE: This function aggregates data by day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
avg_noise: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
noise = database.raw(table='AwareAmbientNoise', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = noise.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = noise.iloc[len(noise)-1]['datetime']
noise = noise.drop(['device','user','time','double_silence_threshold','double_rms','blob_raw','is_silent','double_frequency'],axis=1)
noise = noise.loc[begin:end]
noise['duration'] = noise['datetime'].diff()
noise['duration'] = get_seconds(noise['duration'])
noise = noise.iloc[1:]
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'duration'})
noise = noise.merge(shutdown, how='outer', left_index=True, right_index=True)
noise['duration_x'] = noise.fillna(0)['duration_x'] + noise.fillna(0)['duration_y']
noise=noise.rename(columns={'duration_x':'duration'})
dates=noise.datetime_x.combine_first(noise.datetime_y)
noise['datetime']=dates
noise = noise.drop(['datetime_x','datetime_y'],axis=1)
noise=noise.drop(['double_decibels', 'duration_y'],axis=1)
noise['missing'] = np.where(noise['duration']>=1860, 1, 0) #detect the missing points
noise['dummy'] = noise.missing.shift(-2) #assumes that everytime the cellphone shuts down, two timestamps are generated with -1 in the battery_health
noise['dummy'] = noise.dummy*noise.duration
noise['dummy'] = noise.dummy.shift(2)
noise['missing'] = np.where(noise['missing']==1, np.round(noise['duration']/1800), 0) #calculate the number of datapoints missing
noise = noise.drop(noise[noise.dummy==-1].index) #delete those missing datapoints due to the phone being shut down
noise = noise.drop(['duration', 'datetime', 'dummy'],axis=1)
return noise
| 42.182306
| 270
| 0.668981
| 8,346
| 62,936
| 4.936856
| 0.072849
| 0.030677
| 0.014805
| 0.015678
| 0.664855
| 0.624275
| 0.597845
| 0.573866
| 0.544839
| 0.531199
| 0
| 0.010402
| 0.191909
| 62,936
| 1,491
| 271
| 42.210597
| 0.799717
| 0.204319
| 0
| 0.528455
| 0
| 0.005807
| 0.195521
| 0.013224
| 0
| 0
| 0
| 0.004024
| 0.091754
| 1
| 0.030197
| false
| 0.002323
| 0.012776
| 0
| 0.074332
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cc9e3bc94eb629f76a855b39c6d9abe102c7440e
| 15,570
|
py
|
Python
|
analog-voltage-control/analogVoltageController.py
|
jsbangsund/measurement-automation-tools
|
d2c2fd58b3a6884945081cb9a9ad87366da4a10e
|
[
"MIT"
] | 2
|
2018-09-27T09:47:47.000Z
|
2022-03-24T09:53:04.000Z
|
analog-voltage-control/analogVoltageController.py
|
jsbangsund/measurement-automation-tools
|
d2c2fd58b3a6884945081cb9a9ad87366da4a10e
|
[
"MIT"
] | null | null | null |
analog-voltage-control/analogVoltageController.py
|
jsbangsund/measurement-automation-tools
|
d2c2fd58b3a6884945081cb9a9ad87366da4a10e
|
[
"MIT"
] | null | null | null |
# imports
import visa
import numpy as np
import os
import csv
import time
import datetime
import tkinter as tk
from tkinter.filedialog import askopenfilename, askdirectory
from tkinter.ttk import Frame, Button, Style,Treeview, Scrollbar, Checkbutton
from functools import partial
import serial
# This app uses an arduino to output two analog voltage channels from 0 to ~3.3V
# These output voltages are used to control flow rate on mass flow controllers
class VoltageController(Frame):
def __init__(self,parent):
#### USER DEFINED
self.arduinoAddress = 'COM5'
self.window_title = "Mass Flow Control"
self.channels = ["A","B"]
self.V_calibration = {i:None for i in self.channels} # initialize correction factor
self.show_keithley = True
self.smu_address_default = ""
self.smu_address = ""
self.complianceV=5
self.max_V_out = 3.2467 # Measured maximum output voltage
self.upper_reference_V = 4.097 # Measured reference output from LM4040
#### End user defined parameters
self.arduino = serial.Serial(self.arduinoAddress,9600)
Frame.__init__(self, parent)
self.parent = parent
self.configure_gui()
def configure_gui(self):
# Master Window
self.parent.title(self.window_title)
self.style = Style()
self.style.theme_use("default")
# Test Mode Frame
frame_setpoints=Frame(self)
frame_setpoints.pack()
self.s_setpoints = {}
self.b_set_setpoints = {}
self.l_actual_flow = {}
self.l_integer = {}
tk.Label(frame_setpoints,text="Setpoint").grid(row=0,column=1,sticky=tk.W,padx=1, pady=1)
tk.Label(frame_setpoints,text="Actual").grid(row=0,column=2,sticky=tk.W,padx=1, pady=1)
tk.Label(frame_setpoints,text="Integer").grid(row=0,column=3,sticky=tk.W,padx=1, pady=1)
for i,ch in enumerate(self.channels):
self.s_setpoints[ch] = tk.StringVar()
tk.Label(frame_setpoints,text="Channel " + str(ch) + " (SCCM)"
).grid(row=i+1,column=0,sticky=tk.W,padx=1, pady=1)#.pack(side=tk.LEFT)
tk.Entry(frame_setpoints,textvariable=self.s_setpoints[ch],width=10
).grid(row=i+1,column=1,sticky=tk.W)
self.l_actual_flow[ch] = tk.Label(frame_setpoints,text="00.00")
self.l_actual_flow[ch].grid(row=i+1,column=2,sticky=tk.W,padx=1, pady=1)
self.l_integer[ch] = tk.Label(frame_setpoints,text="0000")
self.l_integer[ch].grid(row=i+1,column=3,sticky=tk.W,padx=1, pady=1)
self.b_set_setpoints[ch] = Button(
frame_setpoints,text="Set",
command=partial(self.set_setpoint,ch))
self.b_set_setpoints[ch].grid(row=i+1,column=4,sticky=tk.W,padx=1, pady=1)
# Source control buttons
frame_buttons=Frame(self)
frame_buttons.pack()
# Turn on all sources
tk.Button(frame_buttons,text="Turn Sources On", bg="lime",
command=self.turn_on_sources).grid(row=0,column=0,sticky=tk.W,padx=1, pady=1)
# Set all sources to zero
tk.Button(frame_buttons,text="Set Sources to 0", bg="red",
command=self.turn_off_sources).grid(row=0,column=1,sticky=tk.W,padx=1, pady=1)
# Functions for measuring with Keithley
if self.show_keithley:
self.rm = visa.ResourceManager()
self.resources = self.rm.list_resources()
self.configure_keithley_widgets()
# Style Configuration
Style().configure("defaultState.TButton", foreground='black', background='light grey')
Style().configure("onState.TButton", foreground='black', background='red')
Style().map("onState.TButton",
background=[('disabled', 'grey'),
('pressed', 'red3'),
('active', 'red2')])
self.pack(fill=tk.BOTH, expand=1)
def configure_keithley_widgets(self):
frame_keithley = Frame(self)
frame_keithley.pack()
self.l_smu_address = tk.Label(frame_keithley, text='Pick SMU address:')
self.l_smu_address.grid(row=0, column=0, sticky=tk.W)
self.s_smu_address = tk.StringVar()
self.s_smu_address.set(self.smu_address)
self.o_smu_address = tk.OptionMenu(
frame_keithley, self.s_smu_address,*self.resources,
command=self.connect_to_smu)
self.o_smu_address.grid(row=0,column=1, sticky=tk.W)
self.configure_resource_optionmenu()
##### Connect buttons
#self.c_connect = Frame(self.c_top)
#self.c_connect.pack(fill=X, expand=True)
self.b_connect = tk.Button(frame_keithley, command=self.connect_to_smu)
self.b_connect.configure(text="Connect", background= "yellow")
self.b_connect.grid(row=0, column=2, sticky=tk.E, padx=5)
self.b_calibrate_channelA = tk.Button(frame_keithley, command=partial(self.calibrate_channels,"A"))
self.b_calibrate_channelA.configure(text="Calibrate A", background= "grey")
self.b_calibrate_channelA.grid(row=0, column=3, sticky=tk.E, padx=5)
self.b_calibrate_channelB = tk.Button(frame_keithley, command=partial(self.calibrate_channels,"B"))
self.b_calibrate_channelB.configure(text="Calibrate B", background= "grey")
self.b_calibrate_channelB.grid(row=0, column=4, sticky=tk.E, padx=5)
tk.Label(frame_keithley, text='Voltage Reading:').grid(row=1,column=0,padx=5,sticky=tk.E)
self.l_voltage = tk.Label(frame_keithley, text='000.0 mV')
self.l_voltage.grid(row=1,column=1,padx=5,sticky=tk.W)
def configure_resource_optionmenu(self):
# Only display keithley or GPIB addresses
# Keithley addresses have form USB0::0x05E6::0x26##::7 digit SN::INSTR
self.display_resources = []
for resource in self.resources:
if ('USB0::0x05E6::0x26' in resource) or ('GPIB0' in resource):
# Add the resource address and vendor info to the option menu
hardware_info = self.get_hardware_label(resource)
if not hardware_info=='Unknown':
self.display_resources.append(resource + '--' + hardware_info)
# https://stackoverflow.com/questions/28412496/updating-optionmenu-from-list
menu = self.o_smu_address["menu"]
menu.delete(0, "end")
for string in self.display_resources:
menu.add_command(label=string,
command=lambda value=string.split('--')[0]: self.s_smu_address.set(value))
# reset address to default
self.s_smu_address.set(self.smu_address_default)
def get_hardware_label(self,resource):
# Check for known hardware types and make a label
try:
r = self.rm.open_resource(resource)
hardware_info = r.query("*IDN?")
if 'OK\r\n' in hardware_info:
# The "OK\r\n" message is sent as a handshake from Obis lasers
# Turn hand-shaking off and then ask for the info again
r.write('system:communicate:handshaking OFF')
hardware_info = r.query("*IDN?")
# Check for known instruments
if 'Keithley' in hardware_info:
model_number = hardware_info.split(',')[1].split(' Model ')[1]
serial_number = hardware_info.split(',')[2][1:]
label = 'Keithley ' + model_number + ' SN: ' + serial_number
elif 'Stanford' in hardware_info:
label = 'Lock-in Amplifier ' + hardware_info.split(',')[1]
elif 'HEWLETT' in hardware_info:
label = 'Parameter Analyzer ' + hardware_info.split(',')[1]
elif 'Coherent' in hardware_info:
wavelength = r.query('system:information:wavelength?')
if 'OK' in wavelength:
r.write('system:communicate:handshaking OFF')
wavelength = r.query('system:information:wavelength?')
label = 'Coherent ' + wavelength.strip() + 'nm laser'
else:
label = 'Unknown: ' + hardware_info.strip()
r.close()
except Exception as e:
#print(e)
label='Unknown'
return label
def connect_to_smu(self):
self.smu_address = self.s_smu_address.get()
self.keithley = self.rm.open_resource(self.smu_address)
self.initializeKeithley(self.keithley)
print('keithley connected')
self.b_connect.configure(background='green2')
def calibrate_channels(self,ch):
print("calibrating channel " + ch)
setpoints = np.arange(0,4096,1)
voltages = np.zeros(setpoints.shape)
self.keithley.write('smua.source.leveli=0')
self.keithley.write('smua.source.output=1')
self.keithley.write('smua.measure.autorangev=1')
for i,setpt in enumerate(setpoints):
self.arduino.write((str(ch)+str(setpt)+'\n').encode())
self.l_actual_flow[ch].configure(text=str(setpt))
time.sleep(0.2)
voltages[i] = self.readVoltage()
self.l_voltage.configure(text="{:.2f}".format(voltages[i]*1e3) + " mV")
self.parent.update()
np.savetxt('Channel'+ch+'_calibration.csv',np.vstack((setpoints,voltages)).T,
delimiter=',',header='Setpoints,Voltages')
def prep_measure_stability(self,ch):
self.start_time = time.time()
self.set_setpoint(ch) # Turn on desired set-point
self.file = 'Channel'+ch+'_stability.csv'
header="Elapsed Time (hr),Output Voltage (V)\n"
with open(self.file, 'a') as f:
f.write(header)
self.measure_stability(ch)
def measure_stability(self,ch):
voltage = self.readVoltage() # read voltage
self.onTime = time.time() - self.startTime # record onTime
self.l_voltage.configure(text="{:.2f}".format(voltages[i]*1e3) + " mV")
# Update file
with open(self.file, 'a') as f:
f.write(str(self.onTime/3600.0)+','+
str(voltage)+'\n')
# Update once every 20 seconds
self.parent.after(int(20 * 1000), partial(self.measure_stability,ch))
def set_setpoint(self,ch):
integer,actual_flow = self.convert_sccm_to_int(float(self.s_setpoints[ch].get()),ch)
self.l_actual_flow[ch].configure(text='{:.2f}'.format(float(actual_flow)))
self.l_integer[ch].configure(text=str(integer))
self.arduino.write((str(ch)+str(integer)+'\n').encode())
def convert_sccm_to_int(self,sccm,ch):
# Get calibration if not yet loaded
if self.V_calibration[ch] is None:
self.V_calibration[ch] = np.genfromtxt("Channel"+ch+"_calibration.csv",
delimiter=',',skip_header=1)
# Maximum SCCM output is 200
# the upper reference output voltage is given by the LM4040
sccm_per_volt = 200 / self.upper_reference_V
V_out = sccm / sccm_per_volt # Needed output voltage
if V_out > self.max_V_out:
print('Maximum output voltage exceeded')
V_out = self.max_V_out
idx_min = np.abs(V_out - self.V_calibration[ch][:,1]).argmin()
integer = int(self.V_calibration[ch][idx_min,0])
#V_out * 4096 / self.max_V_out / MCF
actual_flow=self.V_calibration[ch][idx_min,1]*sccm_per_volt
return integer,actual_flow
def turn_off_sources(self):
for ch in self.channels:
self.arduino.write((str(ch)+str(0)+'\n').encode())
def turn_on_sources(self):
for ch in self.channels:
self.set_setpoint(ch)
def initializeKeithley(self,keithley):
keithley.write('reset()')
keithley.timeout = 4000 # ms
keithley.write('errorqueue.clear()')
ch = 'a'
keithley.write( 'smu'+ch+'.reset()')
keithley.write( 'smu'+ch+'.measure.count=20')
keithley.write( 'smu'+ch+'.measure.nplc=1')
keithley.write( 'smu'+ch+'.nvbuffer1.appendmode=0')
keithley.write( 'smu'+ch+'.nvbuffer1.clear()')
keithley.write( 'smu'+ch+'.source.func=0') # 0 is output_DCAMPS, 1 is output_DCVOLTS
keithley.write( 'smu'+ch+'.source.limitv='+str(self.complianceV))
keithley.write( 'smu'+ch+'.source.leveli=0')
keithley.write( 'smu'+ch+'.source.output=0')
keithley.write( 'smu'+ch+'.measure.autorangev=1')
ch = 'b'
keithley.write( 'smu'+ch+'.reset()')
keithley.write( 'smu'+ch+'.measure.count=10')
keithley.write( 'smu'+ch+'.measure.nplc=1')
keithley.write( 'smu'+ch+'.nvbuffer1.appendmode=0')
keithley.write( 'smu'+ch+'.nvbuffer1.clear()')
keithley.write( 'smu'+ch+'.source.func=1') # 0 is output_DCAMPS, 1 is output_DCVOLTS
keithley.write( 'smu'+ch+'.source.levelv=0')
keithley.write( 'smu'+ch+'.measure.autorangei=1')
keithley.write( 'smu'+ch+'.source.output=1')
print('keithley initialized')
def turnCurrentOn(self,I):
print('current turned on')
self.keithley.write( 'smua.source.leveli='+str(I))
self.keithley.write( 'smua.source.output=1')
def turnCurrentOff(self):
print('current turned off')
self.keithley.write( 'smua.source.output=0')
def turnVoltageOn(self,V):
#print('voltage turned on')
self.keithley.write( 'smua.source.func=1') # 0 is output_DCAMPS, 1 is output_DCVOLTS
self.keithley.write( 'smua.source.levelv='+str(V))
self.keithley.write( 'smua.source.output=1')
def turnVoltageOff(self):
#print('voltage turned off')
self.keithley.write( 'smua.source.levelv=0')
self.keithley.write( 'smua.source.func=0') # 0 is output_DCAMPS, 1 is output_DCVOLTS
self.keithley.write( 'smua.source.output=0')
# reads the voltage from keithley of the specified device
def readVoltage(self):
self.keithley.write('smua.nvbuffer1.clear()')
self.keithley.write('smua.measure.v(smua.nvbuffer1)')
sig = self.keithley.query('printbuffer(1,smua.nvbuffer1.n,smua.nvbuffer1)')
sig=[float(v) for v in sig.split(',')]
return np.mean(sig)
def readCurrent(self):
self.keithley.write('smua.measure.autorangei=1')
self.keithley.write('smua.nvbuffer1.clear()')
self.keithley.write('smua.measure.i(smua.nvbuffer1)')
sig = self.keithley.query('printbuffer(1,smua.nvbuffer1.n,smua.nvbuffer1)')
sig=[float(v) for v in sig.split(',')]
return np.mean(sig)
# reads device photodiode signal measured by keithley from channel b
def keithleyDiodeRead(self,keithley):
holder = []
for x in range(0,self.keithleyReadingCount):
keithley.write('smub.nvbuffer1.clear()')
keithley.write('smub.measure.i(smub.nvbuffer1)')
sig = keithley.query('printbuffer(1,smub.nvbuffer1.n,smub.nvbuffer1)')
sig=[float(v) for v in sig.split(',')]
holder.append(sig)
return np.mean(holder),np.std(holder)
def main():
root = tk.Tk()
app = VoltageController(root)
root.mainloop()
#app.keithley.close()
app.arduino.close()
if __name__ == '__main__':
main()
| 48.504673
| 107
| 0.617213
| 2,014
| 15,570
| 4.65144
| 0.189176
| 0.055508
| 0.032451
| 0.036507
| 0.371477
| 0.30316
| 0.230252
| 0.197267
| 0.162468
| 0.126921
| 0
| 0.020344
| 0.248619
| 15,570
| 321
| 108
| 48.504673
| 0.780409
| 0.107065
| 0
| 0.134831
| 0
| 0
| 0.135457
| 0.039879
| 0
| 0
| 0.000722
| 0
| 0
| 1
| 0.082397
| false
| 0
| 0.041199
| 0
| 0.146067
| 0.033708
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cca08eaaa789f720d1578ebb6f7b99357f1739e1
| 7,298
|
py
|
Python
|
marble-plotter-dous/src/main/python/server.py
|
paser4se/marble
|
35da5ac8ff24ff7f30a135cbabf57f60f06f54e3
|
[
"Apache-2.0"
] | 2
|
2017-06-05T13:06:06.000Z
|
2021-06-23T13:53:33.000Z
|
marble-plotter-dous/src/main/python/server.py
|
paser4se/marble
|
35da5ac8ff24ff7f30a135cbabf57f60f06f54e3
|
[
"Apache-2.0"
] | 35
|
2015-07-21T15:09:24.000Z
|
2020-07-08T09:06:08.000Z
|
marble-plotter-dous/src/main/python/server.py
|
paser4se/marble
|
35da5ac8ff24ff7f30a135cbabf57f60f06f54e3
|
[
"Apache-2.0"
] | 5
|
2015-06-16T09:40:39.000Z
|
2020-11-05T08:13:57.000Z
|
# Rest Server
from flask import Flask, jsonify, abort, request
# Eureka client
from eureka.client import EurekaClient
# Background tasks
import threading
import atexit
import logging
import socket
import netifaces as ni
import sys
import os
import time
# Plotter libs
from io import BytesIO
import pymongo
from pymongo import MongoClient
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import base64
import datetime
DATABASE_NAME = 'marble'
POSTS_COLLECTION = 'posts'
PROCESSED_POSTS_COLLECTION = 'processed_posts'
pool_time = 5 # Seconds
# variables that are accessible from anywhere
commonDataStruct = {}
# lock to control access to variable
dataLock = threading.Lock()
# thread handler
yourThread = threading.Thread()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global variables
app_name = "plotter-dous"
try:
ni.ifaddresses('eth0')
app_ip = ni.ifaddresses('eth0')[2][0]['addr']
except Exception:
app_ip = "localhost"
app_host = socket.getfqdn()
app_port = 8084
secure_app_port = 8443
eureka_url = "http://registry:1111/eureka/"
def create_app():
app = Flask(__name__)
def interrupt():
global yourThread
yourThread.cancel()
def doStuff():
global commonDataStruct
global yourThread
with dataLock:
# TODO: Handle what happens when eureka goes down
try:
commonDataStruct['ec'].heartbeat()
except Exception:
logger.info("Registering to Eureka...")
try:
commonDataStruct['ec'].register(initial_status="UP")
logger.info("Registered to Eureka.")
commonDataStruct['ec'].heartbeat()
except Exception as e:
logger.warning(
"Caught exception while trying to register in Eureka: " + str(e) + ". Will retry again shortly.")
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(
exc_tb.tb_frame.f_code.co_filename)[1]
print((exc_type, fname, exc_tb.tb_lineno))
# Set the next thread to happen
yourThread = threading.Timer(pool_time, doStuff, ())
yourThread.start()
def doStuffStart():
# Do initialisation stuff here
# no spaces or underscores, this needs to be url-friendly
commonDataStruct['ec'] = EurekaClient(app_name,
ip_address=app_ip,
eureka_url=eureka_url,
eureka_domain_name="",
data_center="MyOwn",
port=app_port,
secure_port=None,
use_dns=False,
region="none",
prefer_same_zone=False,
context="",
host_name=app_host,
vip_address=app_name,
secure_vip_address=app_name)
global yourThread
# Create your thread
yourThread = threading.Timer(pool_time, doStuff, ())
yourThread.start()
# Initiate
doStuffStart()
# When you kill Flask (SIGTERM), clear the trigger for the next thread
atexit.register(interrupt)
return app
class ChartResponse(object):
def __init__(self, name, description="", type="Image", customType=None, jobId=None, options={}, data={}, images={}):
self.id = None
self.name = name
self.description = description
self.type = type
self.customType = customType
self.jobId = jobId
self.options = options
self.data = data
self.images = images
self.createdAt = None
def plotTopic(topicName, options):
polarity = None
chartName = options['title']
chartDescription = options['description']
client = MongoClient('mongodb', 27017)
db = client[DATABASE_NAME]
posts_collection = db.get_collection(POSTS_COLLECTION)
processed_posts_collection = db.get_collection(PROCESSED_POSTS_COLLECTION)
invalid_plot = False
if (options['type'] == "scatter"):
logger.debug("Plotting scatter.")
collection = options.get('collection', PROCESSED_POSTS_COLLECTION)
point_size = options.get('point_size', 2)
color = options.get('color', 'green')
y_axis_field = options.get('y_axis_field', 'polarity')
y_min = options.get('y_min', None)
y_max = options.get('y_max', None)
if (collection == POSTS_COLLECTION):
posts = posts_collection.find(
{'topicName': topicName}).sort('createdAt', pymongo.ASCENDING)
else:
posts = processed_posts_collection.find(
{'topicName': topicName}).sort('createdAt', pymongo.ASCENDING)
dates_axis = []
y_axis = []
for post in posts:
if (y_axis_field in post):
dates_axis.append(post['createdAt'])
y_axis.append(post[y_axis_field])
dates = [pd.to_datetime(d) for d in dates_axis]
fig = plt.figure(1, figsize=(11, 6))
plt.title(chartName)
plt.xlabel('createdAt')
plt.ylabel(y_axis_field)
# the scatter plot:
axScatter = plt.subplot(111)
axScatter.scatter(x=dates, y=y_axis, s=point_size, color=color)
# set axes range
plt.xlim(dates[0], dates[len(dates) - 1])
if y_min == None:
y_min = min(y_axis)
if y_max == None:
y_max = max(y_axis)
plt.ylim(y_min, y_max)
my_plot = plt.gcf()
imgdata = BytesIO()
# my_plot.show()
my_plot.savefig(imgdata, format='png')
encoded_chart = base64.b64encode(imgdata.getvalue())
else:
invalid_plot = True
client.close()
if invalid_plot:
return None
singleChart = {
"id": None,
"name": chartName,
"description": chartDescription,
"type": "Figure List",
"customType": "",
"jobId": None,
"options": {},
"data": {},
"figures": [encoded_chart.decode('ascii')],
#"figures": [],
"createdAt": None
}
response = {
"charts": [
singleChart
]
}
return response
app = create_app()
@app.route('/api/plot', methods=['POST'])
def process():
print(request)
if not request.json or not 'topicName' or not 'options' in request.json:
abort(400)
response = plotTopic(
request.json['topicName'], request.json.get('options', {}))
if (response != None):
return jsonify(response), 200
else:
return "", 500
if __name__ == '__main__':
app.run(host="0.0.0.0", port=app_port)
# plotTopic("Apple Microsoft", {
# 'title': 'Titlte', 'description': 'Dscription'})
#input("Press Enter to continue...")
| 29.075697
| 121
| 0.565908
| 766
| 7,298
| 5.227154
| 0.368146
| 0.037463
| 0.02997
| 0.025475
| 0.111139
| 0.05994
| 0.05994
| 0.05994
| 0.032967
| 0
| 0
| 0.010654
| 0.331187
| 7,298
| 250
| 122
| 29.192
| 0.80967
| 0.084955
| 0
| 0.106145
| 0
| 0
| 0.082507
| 0
| 0
| 0
| 0
| 0.004
| 0
| 1
| 0.039106
| false
| 0
| 0.100559
| 0
| 0.173184
| 0.011173
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cca2be577b6cb5b5ef2f77c8c187c1f9904195fb
| 2,687
|
py
|
Python
|
django_docutils/favicon/rst/transforms/favicon.py
|
tony/django-docutils
|
ed3e089728507a0f579a62bcb182f283bc59929c
|
[
"MIT"
] | 10
|
2017-04-28T00:19:10.000Z
|
2020-07-22T15:27:09.000Z
|
django_docutils/favicon/rst/transforms/favicon.py
|
tony/django-docutils
|
ed3e089728507a0f579a62bcb182f283bc59929c
|
[
"MIT"
] | 231
|
2017-01-17T04:47:51.000Z
|
2022-03-30T03:03:42.000Z
|
django_docutils/favicon/rst/transforms/favicon.py
|
tony/django-docutils
|
ed3e089728507a0f579a62bcb182f283bc59929c
|
[
"MIT"
] | 1
|
2019-01-25T14:42:15.000Z
|
2019-01-25T14:42:15.000Z
|
import tldextract
from django.db.models import Q
from docutils import nodes
from docutils.transforms import Transform
from django_docutils.favicon.models import get_favicon_model
from ..nodes import icon
Favicon = get_favicon_model()
def resolve_favicon(url):
"""Given a URL to a website, see if a Favicon exists in db.
URL will be resolved to a fqdn for a key lookup.
:param url: URL to any page on a website
:type url: str
:returns: Full Storage based favicon url path, or None
:rtype: str|None
"""
# e.g. forums.bbc.co.uk
fqdn = tldextract.extract(url).fqdn
try:
return Favicon.objects.get(domain=fqdn).favicon.url
except (ValueError, Favicon.DoesNotExist):
return None
class FaviconTransform(Transform):
#: run after based.app.references.rst.transforms.xref
default_priority = 20
def apply(self):
q = Q()
# first run, iterate through references, extract FQDN's, add to query
for node in self.document.traverse(plain_references):
q.add(Q(domain__exact=tldextract.extract(node['refuri']).fqdn), Q.OR)
# pull all fqdn's with a favicon
favicons = Favicon.objects.filter(q)
for node in self.document.traverse(plain_references):
fqdn = tldextract.extract(node['refuri']).fqdn
try:
favicon_url = next( # Find favicon matching fqdn
(f.favicon.url for f in favicons if f.domain == fqdn), None
)
except ValueError: # no favicon exists for fqdn
favicon_url = None
if favicon_url:
nodecopy = node.deepcopy()
ico = icon(
'',
'',
style=f'background-image: url({favicon_url})',
classes=['ico'],
)
nodecopy.insert(0, ico)
node.replace_self(nodecopy)
def plain_references(node):
"""Docutils traversal: Only return references with URI's, skip xref's
If a nodes.reference already has classes, it's an icon class from xref,
so skip that.
If a nodes.reference has no 'refuri', it's junk, skip.
Docutils node.traverse condition callback
:returns: True if it's a URL we want to lookup favicons for
:rtype: bool
"""
if isinstance(node, nodes.reference):
# skip nodes already with xref icon classes or no refuri
no_classes = 'classes' not in node or not node['classes']
has_refuri = 'refuri' in node
if no_classes and has_refuri and node['refuri'].startswith('http'):
return True
return False
| 30.885057
| 81
| 0.61965
| 351
| 2,687
| 4.683761
| 0.37037
| 0.048662
| 0.018248
| 0.015815
| 0.091241
| 0.053528
| 0.053528
| 0.053528
| 0
| 0
| 0
| 0.001583
| 0.294753
| 2,687
| 86
| 82
| 31.244186
| 0.865963
| 0.314477
| 0
| 0.133333
| 0
| 0
| 0.045737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cca383b23457a782befbce9c96268d9161502ad0
| 1,733
|
py
|
Python
|
xnmt/model_context.py
|
marcintustin/xnmt
|
f315fc5e493d25746bbde46d2c89cea3410d43df
|
[
"Apache-2.0"
] | null | null | null |
xnmt/model_context.py
|
marcintustin/xnmt
|
f315fc5e493d25746bbde46d2c89cea3410d43df
|
[
"Apache-2.0"
] | null | null | null |
xnmt/model_context.py
|
marcintustin/xnmt
|
f315fc5e493d25746bbde46d2c89cea3410d43df
|
[
"Apache-2.0"
] | null | null | null |
import dynet as dy
import os
from xnmt.serializer import Serializable
class ModelContext(Serializable):
yaml_tag = u'!ModelContext'
def __init__(self):
self.dropout = 0.0
self.weight_noise = 0.0
self.default_layer_dim = 512
self.dynet_param_collection = None
self.serialize_params = ["dropout", "weight_noise", "default_layer_dim"]
def update(self, other):
for param in self.serialize_params:
setattr(self, param, getattr(other, param))
class PersistentParamCollection(object):
def __init__(self, model_file, save_num_checkpoints=1):
self.model_file = model_file
self.param_col = dy.Model()
self.is_saved = False
assert save_num_checkpoints >= 1 or (model_file is None and save_num_checkpoints==0)
if save_num_checkpoints>0: self.data_files = [self.model_file + '.data']
for i in range(1,save_num_checkpoints):
self.data_files.append(self.model_file + '.data.' + str(i))
def revert_to_best_model(self):
self.param_col.populate(self.model_file + '.data')
def save(self, fname=None):
if fname: assert fname == self.data_files[0], "%s != %s" % (fname + '.data', self.data_files[0])
if not self.is_saved:
self.remove_existing_history()
self.shift_safed_checkpoints()
self.param_col.save(self.data_files[0])
self.is_saved = True
def remove_existing_history(self):
for fname in self.data_files[1:]:
if os.path.exists(fname):
os.remove(fname)
def shift_safed_checkpoints(self):
for i in range(len(self.data_files)-1)[::-1]:
if os.path.exists(self.data_files[i]):
os.rename(self.data_files[i], self.data_files[i+1])
def load_from_data_file(self, datafile):
self.param_col.populate(datafile)
| 38.511111
| 100
| 0.709175
| 262
| 1,733
| 4.435115
| 0.293893
| 0.068847
| 0.111876
| 0.04389
| 0.025818
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013167
| 0.16734
| 1,733
| 44
| 101
| 39.386364
| 0.7921
| 0
| 0
| 0
| 0
| 0
| 0.045009
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 1
| 0.190476
| false
| 0
| 0.071429
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cca508227383d8fc79a193435ae1d2995d0eb8b7
| 3,100
|
py
|
Python
|
utils.py
|
Akshayvm98/Django-School
|
723d52db2cd3bc7665680a3adaf8687f97836d48
|
[
"MIT"
] | 26
|
2015-08-04T00:13:27.000Z
|
2021-03-19T01:01:14.000Z
|
utils.py
|
Akshayvm98/Django-School
|
723d52db2cd3bc7665680a3adaf8687f97836d48
|
[
"MIT"
] | null | null | null |
utils.py
|
Akshayvm98/Django-School
|
723d52db2cd3bc7665680a3adaf8687f97836d48
|
[
"MIT"
] | 28
|
2015-01-19T15:10:15.000Z
|
2020-10-27T11:22:21.000Z
|
import csv
from django.http import HttpResponse, HttpResponseForbidden
from django.template.defaultfilters import slugify
from django.db.models.loading import get_model
def export(qs, fields=None):
model = qs.model
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % slugify(model.__name__)
writer = csv.writer(response)
# Write headers to CSV file
if fields:
headers = fields
else:
headers = []
for field in model._meta.fields:
headers.append(field.name)
writer.writerow(headers)
# Write data to CSV file
for obj in qs:
row = []
for field in headers:
if field in headers:
val = getattr(obj, field)
if callable(val):
val = val()
row.append(val)
writer.writerow(row)
# Return CSV file to browser as download
return response
def admin_list_export(request, model_name, app_label, queryset=None, fields=None, list_display=True):
"""
Put the following line in your urls.py BEFORE your admin include
(r'^admin/(?P<app_label>[\d\w]+)/(?P<model_name>[\d\w]+)/csv/', 'util.csv_view.admin_list_export'),
"""
if not request.user.is_staff:
return HttpResponseForbidden()
if not queryset:
model = get_model(app_label, model_name)
queryset = model.objects.all()
filters = dict()
for key, value in request.GET.items():
if key not in ('ot', 'o'):
filters[str(key)] = str(value)
if len(filters):
queryset = queryset.filter(**filters)
if not fields and list_display:
from django.contrib import admin
ld = admin.site._registry[queryset.model].list_display
if ld and len(ld) > 0:
fields = ld
'''
if not fields:
if list_display and len(queryset.model._meta.admin.list_display) > 1:
fields = queryset.model._meta.admin.list_display
else:
fields = None
'''
return export(queryset, fields)
"""
Create your own change_list.html for your admin view and put something like this in it:
{% block object-tools %}
<ul class="object-tools">
<li><a href="csv/{%if request.GET%}?{{request.GET.urlencode}}{%endif%}" class="addlink">Export to CSV</a></li>
{% if has_add_permission %}
<li><a href="add/{% if is_popup %}?_popup=1{% endif %}" class="addlink">{% blocktrans with cl.opts.verbose_name|escape as name %}Add {{ name }}{% endblocktrans %}</a></li>
{% endif %}
</ul>
{% endblock %}
"""
import datetime
from django.http import HttpResponseRedirect #, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
def mail(request, app_label, model_name):
context = { 'app_label':app_label, 'model_name':model_name, }
return render_to_response('mail.html', context, context_instance=RequestContext(request))
| 36.470588
| 179
| 0.639032
| 397
| 3,100
| 4.874055
| 0.355164
| 0.041344
| 0.020155
| 0.026357
| 0.034109
| 0.034109
| 0
| 0
| 0
| 0
| 0
| 0.001279
| 0.243226
| 3,100
| 84
| 180
| 36.904762
| 0.823529
| 0.086129
| 0
| 0
| 0
| 0
| 0.041222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.192308
| 0
| 0.326923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccae60674a2d5d72a4d3496efd86de39ff78e0dc
| 13,807
|
py
|
Python
|
src/python/pants/backend/docker/util_rules/docker_build_context_test.py
|
asherf/pants
|
e010b93c4123b4446a631cac5db0b7ea15634686
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/docker/util_rules/docker_build_context_test.py
|
asherf/pants
|
e010b93c4123b4446a631cac5db0b7ea15634686
|
[
"Apache-2.0"
] | 14
|
2021-05-03T13:54:41.000Z
|
2022-03-30T10:20:58.000Z
|
src/python/pants/backend/docker/util_rules/docker_build_context_test.py
|
asherf/pants
|
e010b93c4123b4446a631cac5db0b7ea15634686
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from typing import Any, ContextManager
import pytest
from pants.backend.docker.subsystems.dockerfile_parser import rules as parser_rules
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.docker.util_rules.docker_build_args import docker_build_args
from pants.backend.docker.util_rules.docker_build_context import (
DockerBuildContext,
DockerBuildContextRequest,
DockerVersionContext,
)
from pants.backend.docker.util_rules.docker_build_context import rules as context_rules
from pants.backend.docker.util_rules.docker_build_env import docker_build_environment_vars
from pants.backend.docker.util_rules.dockerfile import rules as dockerfile_rules
from pants.backend.python import target_types_rules
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet
from pants.backend.python.target_types import PexBinary
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.shell.target_types import ShellSourcesGeneratorTarget, ShellSourceTarget
from pants.backend.shell.target_types import rules as shell_target_types_rules
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import FilesGeneratorTarget
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.fs import Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.testutil.pytest_util import no_exception
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*context_rules(),
*core_target_types_rules(),
*dockerfile_rules(),
*package_pex_binary.rules(),
*parser_rules(),
*pex_from_targets.rules(),
*shell_target_types_rules(),
*target_types_rules.rules(),
docker_build_args,
docker_build_environment_vars,
QueryRule(BuiltPackage, [PexBinaryFieldSet]),
QueryRule(DockerBuildContext, (DockerBuildContextRequest,)),
],
target_types=[
DockerImageTarget,
FilesGeneratorTarget,
PexBinary,
ShellSourcesGeneratorTarget,
ShellSourceTarget,
],
)
return rule_runner
def assert_build_context(
rule_runner: RuleRunner,
address: Address,
*,
expected_files: list[str],
expected_version_context: dict[str, dict[str, str]] | None = None,
pants_args: list[str] | None = None,
runner_options: dict[str, Any] | None = None,
) -> None:
if runner_options is None:
runner_options = {}
runner_options.setdefault("env_inherit", set()).update({"PATH", "PYENV_ROOT", "HOME"})
rule_runner.set_options(pants_args or [], **runner_options)
context = rule_runner.request(
DockerBuildContext,
[
DockerBuildContextRequest(
address=address,
build_upstream_images=False,
)
],
)
snapshot = rule_runner.request(Snapshot, [context.digest])
assert sorted(expected_files) == sorted(snapshot.files)
if expected_version_context is not None:
assert context.version_context == DockerVersionContext.from_dict(expected_version_context)
def test_file_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
# img_A -> files_A
# img_A -> img_B
"src/a/BUILD": dedent(
"""\
docker_image(name="img_A", dependencies=[":files_A", "src/b:img_B"])
files(name="files_A", sources=["files/**"])
"""
),
"src/a/Dockerfile": "FROM base",
"src/a/files/a01": "",
"src/a/files/a02": "",
# img_B -> files_B
"src/b/BUILD": dedent(
"""\
docker_image(name="img_B", dependencies=[":files_B"])
files(name="files_B", sources=["files/**"])
"""
),
"src/b/Dockerfile": "FROM base",
"src/b/files/b01": "",
"src/b/files/b02": "",
# Mixed
"src/c/BUILD": dedent(
"""\
docker_image(name="img_C", dependencies=["src/a:files_A", "src/b:files_B"])
"""
),
"src/c/Dockerfile": "FROM base",
}
)
# We want files_B in build context for img_B
assert_build_context(
rule_runner,
Address("src/b", target_name="img_B"),
expected_files=["src/b/Dockerfile", "src/b/files/b01", "src/b/files/b02"],
)
# We want files_A in build context for img_A, but not files_B
assert_build_context(
rule_runner,
Address("src/a", target_name="img_A"),
expected_files=["src/a/Dockerfile", "src/a/files/a01", "src/a/files/a02"],
)
# Mixed.
assert_build_context(
rule_runner,
Address("src/c", target_name="img_C"),
expected_files=[
"src/c/Dockerfile",
"src/a/files/a01",
"src/a/files/a02",
"src/b/files/b01",
"src/b/files/b02",
],
)
def test_files_out_of_tree(rule_runner: RuleRunner) -> None:
# src/a:img_A -> res/static:files
rule_runner.write_files(
{
"src/a/BUILD": dedent(
"""\
docker_image(name="img_A", dependencies=["res/static:files"])
"""
),
"res/static/BUILD": dedent(
"""\
files(name="files", sources=["!BUILD", "**/*"])
"""
),
"src/a/Dockerfile": "FROM base",
"res/static/s01": "",
"res/static/s02": "",
"res/static/sub/s03": "",
}
)
assert_build_context(
rule_runner,
Address("src/a", target_name="img_A"),
expected_files=[
"src/a/Dockerfile",
"res/static/s01",
"res/static/s02",
"res/static/sub/s03",
],
)
def test_packaged_pex_path(rule_runner: RuleRunner) -> None:
# This test is here to ensure that we catch if there is any change in the generated path where
# built pex binaries go, as we rely on that for dependency inference in the Dockerfile.
rule_runner.write_files(
{
"src/docker/BUILD": """docker_image(dependencies=["src/python/proj/cli:bin"])""",
"src/docker/Dockerfile": """FROM python""",
"src/python/proj/cli/BUILD": """pex_binary(name="bin", entry_point="main.py")""",
"src/python/proj/cli/main.py": """print("cli main")""",
}
)
assert_build_context(
rule_runner,
Address("src/docker", target_name="docker"),
expected_files=["src/docker/Dockerfile", "src.python.proj.cli/bin.pex"],
)
def test_version_context_from_dockerfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": "docker_image()",
"src/docker/Dockerfile": dedent(
"""\
FROM python:3.8
FROM alpine as interim
FROM interim
FROM scratch:1-1 as output
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile"],
expected_version_context={
"baseimage": {"tag": "3.8"},
"stage0": {"tag": "3.8"},
"interim": {"tag": "latest"},
"stage2": {"tag": "latest"},
"output": {"tag": "1-1"},
},
)
def test_synthetic_dockerfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(
instructions=[
"FROM python:3.8",
"FROM alpine as interim",
"FROM interim",
"FROM scratch:1-1 as output",
]
)
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile.docker"],
expected_version_context={
"baseimage": {"tag": "3.8"},
"stage0": {"tag": "3.8"},
"interim": {"tag": "latest"},
"stage2": {"tag": "latest"},
"output": {"tag": "1-1"},
},
)
def test_shell_source_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(dependencies=[":entrypoint", ":shell"])
shell_source(name="entrypoint", source="entrypoint.sh")
shell_sources(name="shell", sources=["scripts/**/*.sh"])
"""
),
"src/docker/Dockerfile": "FROM base",
"src/docker/entrypoint.sh": "",
"src/docker/scripts/s01.sh": "",
"src/docker/scripts/s02.sh": "",
"src/docker/scripts/random.file": "",
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=[
"src/docker/Dockerfile",
"src/docker/entrypoint.sh",
"src/docker/scripts/s01.sh",
"src/docker/scripts/s02.sh",
],
)
def test_build_arg_defaults_from_dockerfile(rule_runner: RuleRunner) -> None:
# Test that only explicitly defined build args in the BUILD file or pants configuraiton use the
# environment for its values.
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(
extra_build_args=[
"base_version",
]
)
"""
),
"src/docker/Dockerfile": dedent(
"""\
ARG base_name=python
ARG base_version=3.8
FROM ${base_name}:${base_version}
ARG NO_DEF
ENV opt=${NO_DEF}
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
runner_options={
"env": {
"base_name": "no-effect",
"base_version": "3.9",
},
},
expected_files=["src/docker/Dockerfile"],
expected_version_context={
"baseimage": {"tag": "${base_version}"},
"stage0": {"tag": "${base_version}"},
"build_args": {
# `base_name` is not listed here, as it was not an explicitly defined build arg.
"base_version": "3.9",
},
},
)
@pytest.mark.parametrize(
"dockerfile_arg_value, extra_build_arg_value, expect",
[
pytest.param(None, None, no_exception(), id="No args defined"),
pytest.param(
None,
"",
pytest.raises(ExecutionError, match=r"variable 'MY_ARG' is undefined"),
id="No default value for build arg",
),
pytest.param(None, "some default value", no_exception(), id="Default value for build arg"),
pytest.param("", None, no_exception(), id="No build arg defined, and ARG without default"),
pytest.param(
"",
"",
pytest.raises(ExecutionError, match=r"variable 'MY_ARG' is undefined"),
id="No default value from ARG",
),
pytest.param(
"", "some default value", no_exception(), id="Default value for build arg, ARG present"
),
pytest.param(
"some default value", None, no_exception(), id="No build arg defined, only ARG"
),
pytest.param("some default value", "", no_exception(), id="Default value from ARG"),
pytest.param(
"some default value",
"some other default",
no_exception(),
id="Default value for build arg, ARG default",
),
],
)
def test_undefined_env_var_behavior(
rule_runner: RuleRunner,
dockerfile_arg_value: str | None,
extra_build_arg_value: str | None,
expect: ContextManager,
) -> None:
dockerfile_arg = ""
if dockerfile_arg_value is not None:
dockerfile_arg = "ARG MY_ARG"
if dockerfile_arg_value:
dockerfile_arg += f"={dockerfile_arg_value}"
extra_build_args = ""
if extra_build_arg_value is not None:
extra_build_args = 'extra_build_args=["MY_ARG'
if extra_build_arg_value:
extra_build_args += f"={extra_build_arg_value}"
extra_build_args += '"],'
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
f"""\
docker_image(
{extra_build_args}
)
"""
),
"src/docker/Dockerfile": dedent(
f"""\
FROM python:3.8
{dockerfile_arg}
"""
),
}
)
with expect:
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile"],
)
| 32.640662
| 99
| 0.557833
| 1,453
| 13,807
| 5.084652
| 0.150723
| 0.046021
| 0.030319
| 0.032756
| 0.483487
| 0.407282
| 0.376827
| 0.347185
| 0.285869
| 0.250406
| 0
| 0.008836
| 0.319693
| 13,807
| 422
| 100
| 32.718009
| 0.777707
| 0.050916
| 0
| 0.347962
| 0
| 0
| 0.209768
| 0.062193
| 0
| 0
| 0
| 0
| 0.040752
| 1
| 0.031348
| false
| 0
| 0.081505
| 0
| 0.115987
| 0.003135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccb0f6edadd14edc784ea01c369c3b62583b31ca
| 5,100
|
py
|
Python
|
src/rescuexport/dal.py
|
karlicoss/rescuexport
|
69f5275bfa7cb39a1ba74b99312b605ba340916a
|
[
"MIT"
] | 6
|
2019-11-28T10:56:53.000Z
|
2022-01-10T21:07:40.000Z
|
src/rescuexport/dal.py
|
karlicoss/rescuexport
|
69f5275bfa7cb39a1ba74b99312b605ba340916a
|
[
"MIT"
] | null | null | null |
src/rescuexport/dal.py
|
karlicoss/rescuexport
|
69f5275bfa7cb39a1ba74b99312b605ba340916a
|
[
"MIT"
] | 1
|
2020-12-08T14:16:53.000Z
|
2020-12-08T14:16:53.000Z
|
#!/usr/bin/env python3
import logging
from pathlib import Path
import json
from datetime import datetime, timedelta
from typing import Set, Sequence, Any, Iterator
from dataclasses import dataclass
from .exporthelpers.dal_helper import PathIsh, Json, Res, datetime_naive
from .exporthelpers.logging_helper import LazyLogger
logger = LazyLogger(__package__)
seconds = int
_DT_FMT = '%Y-%m-%dT%H:%M:%S'
@dataclass
class Entry:
dt: datetime_naive
'''
Ok, it definitely seems local, by the looks of the data.
https://www.rescuetime.com/apidoc#analytic-api-reference
"defined by the user’s selected time zone" -- not sure what it means, but another clue I suppose
Note that the manual export has something like -08:00, but it's the time is same as local -- doesn't make any sense...
'''
duration_s: seconds
activity: str
@classmethod
def from_row(cls, row: Json) -> 'Entry':
# COL_DT = 0
# COL_DUR = 1
# COL_ACTIVITY = 3
# todo I think cols are fixed so could speed up lookup? Not really necessary at te moment though
COL_DT = 'Date'
COL_DUR = 'Time Spent (seconds)'
COL_ACTIVITY = 'Activity'
dt_s = row[COL_DT]
dur = row[COL_DUR]
activity = row[COL_ACTIVITY]
dt = datetime.strptime(dt_s, _DT_FMT)
return cls(dt=dt, duration_s=dur, activity=activity)
class DAL:
def __init__(self, sources: Sequence[PathIsh]) -> None:
# todo not sure if should sort -- probably best to rely on get_files?
self.sources = [p if isinstance(p, Path) else Path(p) for p in sources]
def raw_entries(self) -> Iterator[Res[Json]]:
# todo rely on more_itertools for it?
emitted: Set[Any] = set()
last = None
for src in self.sources:
# todo parse in multiple processes??
try:
j = json.loads(src.read_text())
except Exception as e:
ex = RuntimeError(f'While processing {src}')
ex.__cause__ = e
yield ex
continue
headers = j['row_headers']
rows = j['rows']
total = len(rows)
unique = 0
for row in rows:
frow = tuple(row) # freeze for hashing
if frow in emitted:
continue
drow = dict(zip(headers, row))
if last is not None and drow['Date'] < last['Date']: # pylint: disable=unsubscriptable-object
yield RuntimeError(f'Expected\n{drow}\nto be later than\n{last}')
# TODO ugh, for couple of days it was pretty bad, lots of duplicated entries..
# for now, just ignore it
else:
yield drow
emitted.add(frow)
unique += 1
last = drow
logger.debug(f"{src}: filtered out {total - unique:<6} of {total:<6}. Grand total: {len(emitted)}")
def entries(self) -> Iterator[Res[Entry]]:
for row in self.raw_entries():
if isinstance(row, Exception):
yield row
continue
cur = Entry.from_row(row)
yield cur
from typing import Iterable
# todo quick test (dal helper aided: check that DAL can handle fake data)
def fake_data_generator(rows=100, seed=123) -> Json:
# todo ok, use faker/mimesis here??
from random import Random
r = Random(seed)
def row_gen():
base = datetime(year=2000, month=1, day=1)
cur = base
emitted = 0
i = 0
while emitted < rows:
i += 1
sleeping = 1 <= cur.hour <= 8
if sleeping:
cur = cur + timedelta(hours=2)
continue
# do something during that period
duration = r.randint(10, 500)
if r.choice([True, False]):
emitted += 1
yield [
cur.strftime(_DT_FMT),
duration,
1,
f'Activity {i % 10}',
'Category {i % 3}',
i % 2,
]
cur += timedelta(seconds=duration)
return {
"notes": "data is an array of arrays (rows), column names for rows in row_headers",
"row_headers": ["Date", "Time Spent (seconds)", "Number of People", "Activity", "Category", "Productivity"],
"rows": list(row_gen())
}
def main() -> None:
# todo adapt for dal_helper?
import argparse
p = argparse.ArgumentParser()
p.add_argument('path', type=Path)
args = p.parse_args()
files = list(sorted(args.path.glob('*.json')))
model = DAL(files)
count = 0
for x in model.entries():
if isinstance(x, Exception):
logger.error(x)
else:
count += 1
if count % 10000 == 0:
logger.info('Processed %d entries', count)
# print(x)
if __name__ == '__main__':
main()
| 31.097561
| 122
| 0.550392
| 631
| 5,100
| 4.353407
| 0.432647
| 0.009829
| 0.011649
| 0.016017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014799
| 0.350784
| 5,100
| 163
| 123
| 31.288344
| 0.81486
| 0.123137
| 0
| 0.053097
| 0
| 0.00885
| 0.110596
| 0
| 0
| 0
| 0
| 0.006135
| 0
| 1
| 0.061947
| false
| 0
| 0.097345
| 0
| 0.221239
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccb20b99e03872ed73a498ac6c05de75181ce2b4
| 1,287
|
py
|
Python
|
MuonAnalysis/MuonAssociators/python/muonHLTL1Match_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
MuonAnalysis/MuonAssociators/python/muonHLTL1Match_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
MuonAnalysis/MuonAssociators/python/muonHLTL1Match_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from MuonAnalysis.MuonAssociators.muonL1Match_cfi import *
muonHLTL1Match = cms.EDProducer("HLTL1MuonMatcher",
muonL1MatcherParameters,
# Reconstructed muons
src = cms.InputTag("muons"),
# L1 Muon collection, and preselection on that collection
matched = cms.InputTag("patTrigger"),
# Requests to select the object
matchedCuts = cms.string('coll("hltL1extraParticles")'),
# 90% compatible with documentation at SWGuidePATTrigger#Module_Configuration_AN1
# andOr = cms.bool( False ), # if False, do the 'AND' of the conditions below; otherwise, do the OR
# filterIdsEnum = cms.vstring( '*' ),
# filterIds = cms.vint32( 0 ),
# filterLabels = cms.vstring( '*' ),
# pathNames = cms.vstring( '*' ),
# collectionTags = cms.vstring( 'hltL1extraParticles' ),
resolveAmbiguities = cms.bool( True ), # if True, no more than one reco object can be matched to the same L1 object; precedence is given to the reco ones coming first in the list
# Fake filter lavels for the object propagated to the second muon station
setPropLabel = cms.string("propagatedToM2"),
# Write extra ValueMaps
writeExtraInfo = cms.bool(True),
)
| 40.21875
| 186
| 0.679876
| 143
| 1,287
| 6.097902
| 0.664336
| 0.045872
| 0.025229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015075
| 0.226884
| 1,287
| 31
| 187
| 41.516129
| 0.861307
| 0.574981
| 0
| 0
| 0
| 0
| 0.144
| 0.054
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccb315a349fc8cff12751a9ddd2a5e9db7858230
| 632
|
py
|
Python
|
Curso_em_Video_py3/ex082.py
|
Rodrigo98Matos/Projetos_py
|
6428e2c09d28fd8a717743f4434bc788e7d7d3cc
|
[
"MIT"
] | 1
|
2021-05-11T12:39:43.000Z
|
2021-05-11T12:39:43.000Z
|
Curso_em_Video_py3/ex082.py
|
Rodrigo98Matos/Projetos_py
|
6428e2c09d28fd8a717743f4434bc788e7d7d3cc
|
[
"MIT"
] | null | null | null |
Curso_em_Video_py3/ex082.py
|
Rodrigo98Matos/Projetos_py
|
6428e2c09d28fd8a717743f4434bc788e7d7d3cc
|
[
"MIT"
] | null | null | null |
lista = list()
while True:
lista.append(int(input("Digite um número inteiro:\t")))
while True:
p = str(input("Digitar mais números?\t").strip())[0].upper()
if p in 'SN':
break
else:
print("\033[31mDigite uma opção válida!\033[m")
if p == 'N':
break
par = list()
impar = list()
for n in lista:
if n % 2 == 0:
par.append(n)
else:
impar.append(n)
par.sort()
impar.sort()
if 0 in par:
par.remove(0)
print(f"Entre os números \033[32m{lista}\033[m, os números pares são: \033[33m{par}\033[m e os números ímpares são: \033[34m{impar}\033[m!")
| 26.333333
| 140
| 0.568038
| 102
| 632
| 3.519608
| 0.480392
| 0.044568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079741
| 0.265823
| 632
| 23
| 141
| 27.478261
| 0.693966
| 0
| 0
| 0.26087
| 0
| 0.043478
| 0.349684
| 0.06962
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccb69ce91869d886c281dd5c097515346b3bb141
| 844
|
py
|
Python
|
pyskel_bc/cli.py
|
smnorris/pyskel_bc
|
d8bdec3e15da6268c9b6a0f3be1fdd6af9737d21
|
[
"Apache-2.0"
] | null | null | null |
pyskel_bc/cli.py
|
smnorris/pyskel_bc
|
d8bdec3e15da6268c9b6a0f3be1fdd6af9737d21
|
[
"Apache-2.0"
] | null | null | null |
pyskel_bc/cli.py
|
smnorris/pyskel_bc
|
d8bdec3e15da6268c9b6a0f3be1fdd6af9737d21
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Skeleton of a CLI
import click
import pyskel_bc
@click.command('pyskel_bc')
@click.argument('count', type=int, metavar='N')
def cli(count):
"""Echo a value `N` number of times"""
for i in range(count):
click.echo(pyskel_bc.has_legs)
| 30.142857
| 76
| 0.740521
| 133
| 844
| 4.669173
| 0.646617
| 0.096618
| 0.041868
| 0.05153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011445
| 0.171801
| 844
| 27
| 77
| 31.259259
| 0.876967
| 0.726303
| 0
| 0
| 0
| 0
| 0.070755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccb6b1cc30ddbfae5e44890354c45b22ed3bc398
| 757
|
py
|
Python
|
solutions/python3/558.py
|
sm2774us/amazon_interview_prep_2021
|
f580080e4a6b712b0b295bb429bf676eb15668de
|
[
"MIT"
] | 42
|
2020-08-02T07:03:49.000Z
|
2022-03-26T07:50:15.000Z
|
solutions/python3/558.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | null | null | null |
solutions/python3/558.py
|
ajayv13/leetcode
|
de02576a9503be6054816b7444ccadcc0c31c59d
|
[
"MIT"
] | 40
|
2020-02-08T02:50:24.000Z
|
2022-03-26T15:38:10.000Z
|
class Solution:
def intersect(self, q1, q2):
if q1.isLeaf:
return q1.val and q1 or q2
elif q2.isLeaf:
return q2.val and q2 or q1
else:
tLeft = self.intersect(q1.topLeft, q2.topLeft)
tRight = self.intersect(q1.topRight, q2.topRight)
bLeft = self.intersect(q1.bottomLeft, q2.bottomLeft)
bRight = self.intersect(q1.bottomRight, q2.bottomRight)
if tLeft.isLeaf and tRight.isLeaf and bLeft.isLeaf and bRight.isLeaf and tLeft.val == tRight.val == bLeft.val == bRight.val:
node = Node(tLeft.val, True, None, None, None, None)
else:
node = Node(False, False, tLeft, tRight, bLeft, bRight)
return node
| 47.3125
| 136
| 0.587847
| 97
| 757
| 4.587629
| 0.278351
| 0.116854
| 0.134831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034682
| 0.314399
| 757
| 16
| 137
| 47.3125
| 0.822736
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccba7105c97f04202293ba5c9e3bb9286a1e1c30
| 1,745
|
py
|
Python
|
tests/test_main.py
|
giulionf/GetOldTweets3
|
038b8fed7da27300e6c611d3c0fd617588075a58
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
giulionf/GetOldTweets3
|
038b8fed7da27300e6c611d3c0fd617588075a58
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
giulionf/GetOldTweets3
|
038b8fed7da27300e6c611d3c0fd617588075a58
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import GetOldTweets3 as Got3
if sys.version_info[0] < 3:
raise Exception("Python 2.x is not supported. Please upgrade to 3.x")
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
def test_username():
tweet_criteria = Got3.manager.TweetCriteria() \
.set_username('barackobama') \
.set_max_tweets(1)
tweet = Got3.manager.TweetManager.get_tweets(tweet_criteria)[0]
assert tweet.username == 'BarackObama'
def test_query_search():
tweet_criteria = Got3.manager.TweetCriteria().set_query_search('#europe #refugees') \
.set_since("2015-05-01") \
.set_until("2015-09-30") \
.set_max_tweets(1)
tweet = Got3.manager.TweetManager.get_tweets(tweet_criteria)[0]
assert tweet.hashtags.lower() == '#europe #refugees'
def test_mass_fetch_concurrent():
time1 = time.time()
tweet_criteria = Got3.manager.TweetCriteria().set_query_search('#europe #refugees') \
.set_since("2015-05-01") \
.set_until("2015-09-30") \
.set_max_tweets(500)
tweets = Got3.manager.ConcurrentTweetManager.get_tweets(tweet_criteria, worker_count=25)
print("Time Needed Concurrent: {} Secs".format((time.time() - time1)))
assert len(tweets) <= 1000
def test_mass_fetch_non_concurrent():
time1 = time.time()
tweet_criteria = Got3.manager.TweetCriteria().set_query_search('#europe #refugees') \
.set_since("2015-05-01") \
.set_until("2015-09-30") \
.set_max_tweets(500)
tweets = Got3.manager.TweetManager.get_tweets(tweet_criteria)
print("Time Needed Non Concurrent: {} Secs".format((time.time() - time1)))
assert len(tweets) <= 1000
| 33.557692
| 92
| 0.681948
| 228
| 1,745
| 5.004386
| 0.346491
| 0.091148
| 0.059597
| 0.084137
| 0.638913
| 0.638913
| 0.603856
| 0.574058
| 0.574058
| 0.574058
| 0
| 0.060732
| 0.169628
| 1,745
| 51
| 93
| 34.215686
| 0.726708
| 0.024642
| 0
| 0.5
| 0
| 0
| 0.157647
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.210526
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccbaa690673254ea1cda4e119201ebd6a7af3c57
| 421
|
py
|
Python
|
extsum/__main__.py
|
streof/extsum
|
45949a6a6aff65ceaf2a4bdc70f5b4d9660fca5f
|
[
"MIT"
] | null | null | null |
extsum/__main__.py
|
streof/extsum
|
45949a6a6aff65ceaf2a4bdc70f5b4d9660fca5f
|
[
"MIT"
] | null | null | null |
extsum/__main__.py
|
streof/extsum
|
45949a6a6aff65ceaf2a4bdc70f5b4d9660fca5f
|
[
"MIT"
] | null | null | null |
import extsum as ext
URL = "https://i.picsum.photos/id/42/1/1.jpg"
# Uncomment for random Picsum photo
# URL = "https://picsum.photos/1/1"
if __name__ == '__main__':
# Init
photo = ext.Load(URL)
photo_parsed = ext.Parse(photo)
# Print found ID (if any)
id_found = photo_parsed.find_id()
if id_found is None:
print("Couldn't find any ID")
else:
print(f"Found ID {id_found}")
| 23.388889
| 45
| 0.631829
| 67
| 421
| 3.761194
| 0.507463
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018519
| 0.230404
| 421
| 17
| 46
| 24.764706
| 0.759259
| 0.228029
| 0
| 0
| 0
| 0
| 0.2625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccbaec8553da0b692edf4bc23f5c78797c64aa03
| 931
|
py
|
Python
|
rplugin/python3/denite/kind/lab_browse.py
|
lighttiger2505/denite-lab
|
611e5e081d049d79999a8c0a0f38c2466d8ca970
|
[
"MIT"
] | 1
|
2018-02-26T15:27:03.000Z
|
2018-02-26T15:27:03.000Z
|
rplugin/python3/denite/kind/lab_browse.py
|
lighttiger2505/denite-lab
|
611e5e081d049d79999a8c0a0f38c2466d8ca970
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/kind/lab_browse.py
|
lighttiger2505/denite-lab
|
611e5e081d049d79999a8c0a0f38c2466d8ca970
|
[
"MIT"
] | null | null | null |
import subprocess
from .base import Base
class Kind(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'lab_browse'
self.default_action = 'lab_browse'
def action_lab_browse(self, context):
for target in context['targets']:
iid = target['word']
command = ['lab', 'browse', iid]
process = subprocess.Popen(command,
cwd=context['path'],
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
output, err_output = process.communicate(timeout=15)
except subprocess.TimeoutExpired:
process.kill()
output, err_output = process.communicate()
exit_code = process.returncode
if exit_code != 0:
print('error')
| 30.032258
| 64
| 0.525242
| 87
| 931
| 5.413793
| 0.574713
| 0.076433
| 0.055202
| 0.093418
| 0.140127
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005199
| 0.380236
| 931
| 30
| 65
| 31.033333
| 0.811092
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.208333
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccbbb657919673062f15afd66c5a069d9e36de11
| 3,358
|
py
|
Python
|
Betsy/Betsy/modules/plot_sample_pca.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | 9
|
2017-01-13T02:38:41.000Z
|
2021-04-08T00:44:39.000Z
|
Betsy/Betsy/modules/plot_sample_pca.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | null | null | null |
Betsy/Betsy/modules/plot_sample_pca.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | 4
|
2017-01-05T16:25:25.000Z
|
2019-12-12T20:07:38.000Z
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
from genomicode import filelib
from genomicode import parallel
from Betsy import module_utils as mlib
in_data = antecedents
metadata = {}
## data_node, cls_node = antecedents
## a, b, c = read_label_file.read(cls_node.identifier)
## if len(a) > 1:
## colors = []
## for i in range(5):
## colors.append(cm.hot(i / 5.0, 1))
## colors.append(cm.autumn(i / 5.0, i))
## colors.append(cm.cool(i / 5.0, i))
## colors.append(cm.jet(i / 5.0, i))
## colors.append(cm.spring(i / 5.0, i))
## colors.append(cm.prism(i / 5.0, i))
## colors.append(cm.summer(i / 5.0, i))
## colors.append(cm.winter(i / 5.0, i))
## opts = [colors[int(i)] for i in b]
## legend = [c[int(i)] for i in b]
## plot_pca(data_node.identifier, outfile, opts, legend)
#num_genes = mlib.get_user_option(
# user_options, "pca_num_genes", type=int)
#assert num_genes >= 5 and num_genes < 1E5
#metadata["num_genes"] = num_genes
pcaplot = mlib.get_config("pcaplot", which_assert_file=True)
prism_file = "prism.txt"
row_pc_file = "row_components.txt"
col_pc_file = "col_components.txt"
sq = parallel.quote
cmd = [
sq(pcaplot),
"--label",
#"-g", num_genes,
"--prism_file", prism_file,
"--row_pc_file", row_pc_file,
"--col_pc_file", col_pc_file,
sq(in_data.identifier),
sq(outfile),
]
cmd = " ".join(map(str, cmd))
parallel.sshell(cmd)
metadata["commands"] = [cmd]
filelib.assert_exists_nz(outfile)
return metadata
def name_outfile(self, antecedents, user_options):
return "pca.png"
## def plot_pca(filename, result_fig, opts='b', legend=None):
## import arrayio
## from genomicode import jmath, mplgraph
## from genomicode import filelib
## R = jmath.start_R()
## jmath.R_equals(filename, 'filename')
## M = arrayio.read(filename)
## labels = M._col_names['_SAMPLE_NAME']
## data = M.slice()
## jmath.R_equals(data, 'X')
## R('NUM.COMPONENTS <- 2')
## R('S <- svd(X)')
## R('U <- S$u[,1:NUM.COMPONENTS]')
## R('D <- S$d[1:NUM.COMPONENTS]')
## # Project the data onto the first 2 components.
## R('x <- t(X) %*% U %*% diag(D)')
## x1 = R['x'][0:M.ncol()]
## x2 = R['x'][M.ncol():]
## xlabel = 'Principal Component 1'
## ylabel = 'Principal Component 2'
## if len(opts) > 1:
## fig = mplgraph.scatter(
## x1, x2, xlabel=xlabel, ylabel=ylabel, color=opts,
## legend=legend)
## else:
## fig = mplgraph.scatter(
## x1, x2, xlabel=xlabel, ylabel=ylabel, color=opts,
## label=labels)
## fig.savefig(result_fig)
## assert filelib.exists_nz(result_fig), 'the plot_pca.py fails'
| 32.921569
| 76
| 0.532758
| 418
| 3,358
| 4.114833
| 0.313397
| 0.055814
| 0.065116
| 0.016279
| 0.153488
| 0.139535
| 0.126744
| 0.063953
| 0.063953
| 0.063953
| 0
| 0.015686
| 0.316557
| 3,358
| 101
| 77
| 33.247525
| 0.733769
| 0.584276
| 0
| 0
| 0
| 0
| 0.087665
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 1
| 0.090909
| false
| 0
| 0.121212
| 0.030303
| 0.30303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccbc5e88899c66f97d3d1a9a12e0ddc302dbc43e
| 1,098
|
py
|
Python
|
scripts/seg_vs_dog_plots.py
|
AbigailMcGovern/platelet-segmentation
|
46cd87b81fc44473b07a2bebed1e6134b2582348
|
[
"BSD-3-Clause"
] | 1
|
2022-02-01T23:40:38.000Z
|
2022-02-01T23:40:38.000Z
|
scripts/seg_vs_dog_plots.py
|
AbigailMcGovern/platelet-segmentation
|
46cd87b81fc44473b07a2bebed1e6134b2582348
|
[
"BSD-3-Clause"
] | 3
|
2021-03-12T02:03:15.000Z
|
2021-03-31T00:39:05.000Z
|
scripts/seg_vs_dog_plots.py
|
AbigailMcGovern/platelet-segmentation
|
46cd87b81fc44473b07a2bebed1e6134b2582348
|
[
"BSD-3-Clause"
] | 1
|
2021-04-06T23:23:32.000Z
|
2021-04-06T23:23:32.000Z
|
from plots import plot_experiment_APs, plot_experiment_no_diff, experiment_VI_plots
import os
# paths, names, title, out_dir, out_name
data_dir = '/Users/amcg0011/Data/pia-tracking/dl-results/210512_150843_seed_z-1_y-1_x-1_m_centg'
suffix = 'seed_z-1_y-1_x-1_m_centg'
out_dir = os.path.join(data_dir, 'DL-vs-Dog')
ap_paths = [os.path.join(data_dir, suffix + '_validation_AP.csv'),
os.path.join(data_dir, 'DoG-segmentation_average_precision.csv')]
nd_paths = [os.path.join(data_dir, 'seed_z-1_y-1_x-1_m_centg_validation_metrics.csv'),
os.path.join(data_dir, 'DoG-segmentation_metrics.csv')]
vi_paths = [
os.path.join(data_dir, 'seed_z-1_y-1_x-1_m_centgvalidation_VI.csv'),
os.path.join(data_dir, 'seed_z-1_y-1_x-1_m_centgvalidation_VI_DOG-seg.csv')
]
#plot_experiment_APs(ap_paths, ['DL', 'DoG'], 'Average precision: DL vs Dog', out_dir, 'AP_DL-vs-Dog')
#plot_experiment_no_diff(nd_paths, ['DL', 'DoG'], 'Number difference: DL vs Dog', out_dir, 'ND_DL-vs-Dog')
experiment_VI_plots(vi_paths, ['DL', 'DoG'], 'VI Subscores: DL vs DoG', 'VI_DL-vs-DoG', out_dir)
| 64.588235
| 106
| 0.736794
| 202
| 1,098
| 3.638614
| 0.247525
| 0.07619
| 0.095238
| 0.133333
| 0.429932
| 0.353742
| 0.319728
| 0.319728
| 0.22449
| 0.17415
| 0
| 0.0316
| 0.106557
| 1,098
| 17
| 107
| 64.588235
| 0.717635
| 0.222222
| 0
| 0
| 0
| 0.071429
| 0.443008
| 0.364277
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccc14d33e30db584c95153ccde86ea32ce7c48be
| 8,331
|
py
|
Python
|
wavelet_prosody_toolkit/wavelet_prosody_toolkit/cwt_global_spectrum.py
|
eugenemfu/TTS_HW
|
34b3a32da2904578ddbd86bfd9529798cc3a1e9f
|
[
"BSD-3-Clause"
] | 115
|
2019-08-06T08:34:33.000Z
|
2022-02-15T09:44:40.000Z
|
wavelet_prosody_toolkit/wavelet_prosody_toolkit/cwt_global_spectrum.py
|
eugenemfu/TTS_HW
|
34b3a32da2904578ddbd86bfd9529798cc3a1e9f
|
[
"BSD-3-Clause"
] | 11
|
2019-08-13T15:27:07.000Z
|
2022-03-28T15:59:39.000Z
|
wavelet_prosody_toolkit/wavelet_prosody_toolkit/cwt_global_spectrum.py
|
eugenemfu/TTS_HW
|
34b3a32da2904578ddbd86bfd9529798cc3a1e9f
|
[
"BSD-3-Clause"
] | 32
|
2019-01-30T12:00:15.000Z
|
2022-03-28T10:06:39.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
- Antti Suni <antti.suni@helsinki.fi>
- Sébastien Le Maguer <lemagues@tcd.ie>
DESCRIPTION
usage: cwt_global_spectrum.py [-h] [-v] [-o OUTPUT]
[-P]
input_file
Tool for extracting global wavelet spectrum of speech envelope
introduced for second language fluency estimation in the following paper:
@inproceedings{suni2019characterizing,
title={Characterizing second language fluency with global wavelet spectrum},
author={Suni, Antti and Kallio, Heini and Benu{\v{s}}, {\v{S}}tefan and {\v{S}}imko, Juraj},
booktitle={International Congress of Phonetic Sciences},
pages={1947--1951},
year={2019},
organization={Australasian Speech Science and Technology Association Inc.}
}
positional arguments:
input_file Input signal or F0 file
optional arguments:
-h, --help show this help message and exit
-v, --verbosity increase output verbosity
-o OUTPUT, --output OUTPUT
output directory for analysis or filename for synthesis.
(Default: input_file directory [Analysis] or <input_file>.f0 [Synthesis])
-P, --plot Plot the results
You should be able to see peak around 4Hz, corresponding to syllable rate.
For longer speech files, lower frequency peaks related to phrasing should appear.
Synthetic test file with 8Hz, 4Hz and 1Hz components is included in sample directory.
LICENSE
See https://github.com/asuni/wavelet_prosody_toolkit/blob/master/LICENSE.txt
"""
# System/default
import sys
import os
# Arguments
import argparse
# Messaging/logging
import traceback
import time
import logging
# Math/plot
import numpy as np
import matplotlib.ticker
import matplotlib.pyplot as plt
# Libraries
from wavelet_prosody_toolkit.prosody_tools import cwt_utils as cwt_utils
from wavelet_prosody_toolkit.prosody_tools import misc as misc
from wavelet_prosody_toolkit.prosody_tools import energy_processing as energy_processing
###############################################################################
# global constants
###############################################################################
LEVEL = [logging.WARNING, logging.INFO, logging.DEBUG]
###############################################################################
# Functions
###############################################################################
def calc_global_spectrum(wav_file, period=5, n_scales=60, plot=False):
"""
"""
# Extract signal envelope, scale and normalize
(fs, waveform) = misc.read_wav(wav_file)
waveform = misc.resample(waveform, fs, 16000)
energy = energy_processing.extract_energy(waveform, min_freq=30, method="hilbert")
energy[energy<0] = 0
energy = np.cbrt(energy+0.1)
params = misc.normalize_std(energy)
# perform continous wavelet transform on envelope with morlet wavelet
# increase _period to get sharper spectrum
matrix, scales, freq = cwt_utils.cwt_analysis(params, first_freq = 16, num_scales = n_scales, scale_distance = 0.1,period=period, mother_name="Morlet",apply_coi=True)
# power, arbitrary scaling to prevent underflow
p_matrix = (abs(matrix)**2).astype('float32')*1000.0
power_spec = np.nanmean(p_matrix,axis=1)
if plot:
f, wave_pics = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[5, 1]}, sharey=True)
f.subplots_adjust(hspace=10)
f.subplots_adjust(wspace=0)
wave_pics[0].set_ylim(0, n_scales)
wave_pics[0].set_xlabel("Time(m:s)")
wave_pics[0].set_ylabel("Frequency(Hz)")
wave_pics[1].set_xlabel("power")
wave_pics[1].tick_params(labelright=True)
fname = os.path.basename(wav_file)
title = "CWT Morlet(p="+str(period)+") global spectrum, "+ fname
wave_pics[0].contourf(p_matrix, 100)
wave_pics[0].set_title(title, loc="center")
wave_pics[0].plot(params*3, color="white",alpha=0.5)
freq_labels = [round(x,3)
if (np.isclose(x, round(x)) or
(x < 2 and np.isclose(x*100., round(x*100))) or
(x < 0.5 and np.isclose(x*10000., round(x*10000))))
else ""
for x in list(freq)]
wave_pics[0].set_yticks(np.linspace(0, len(freq_labels)-1, len(freq_labels)))
wave_pics[0].set_yticklabels(freq_labels)
formatter = matplotlib.ticker.FuncFormatter(lambda ms, x: time.strftime('%M:%S', time.gmtime(ms // 200)))
wave_pics[0].xaxis.set_major_formatter(formatter)
wave_pics[1].grid(axis="y")
wave_pics[1].plot(power_spec,np.linspace(0,len(power_spec), len(power_spec)),"-")
plt.show()
return (power_spec, freq)
###############################################################################
# Main function
###############################################################################
def main():
"""Main entry function
"""
global args
period = 5
n_scales = 60
# Compute the global spectrum
(power_spec, freq) = calc_global_spectrum(args.wav_file, period, n_scales, args.plot)
# save spectrum and associated frequencies for further processing
output_dir = os.path.dirname(args.wav_file)
if args.output_dir is not None:
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
basename = os.path.join(output_dir, os.path.splitext(os.path.basename(args.wav_file))[0])
np.savetxt(basename+".spec.txt", power_spec, fmt="%.5f", newline= " ")
np.savetxt(basename+".freqs.txt", freq, fmt="%.5f", newline= " ")
###############################################################################
# Envelopping
###############################################################################
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser(description="")
# Add options
parser.add_argument("-l", "--log_file", default=None,
help="Logger file")
parser.add_argument("-o", "--output_dir", default=None, type=str,
help="The output directory (if not defined, use the same directory than the wave file)")
parser.add_argument("-P", "--plot", default=False, action="store_true",
help="Plot the results")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
# Add arguments
parser.add_argument("wav_file", help="The input wave file")
# Parsing arguments
args = parser.parse_args()
# create logger and formatter
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Verbose level => logging level
log_level = args.verbosity
if (args.verbosity >= len(LEVEL)):
log_level = len(LEVEL) - 1
logger.setLevel(log_level)
logging.warning("verbosity level is too high, I'm gonna assume you're taking the highest (%d)" % log_level)
else:
logger.setLevel(LEVEL[log_level])
# create console handler
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
# create file handler
if args.log_file is not None:
fh = logging.FileHandler(args.log_file)
logger.addHandler(fh)
# Debug time
start_time = time.time()
logger.info("start time = " + time.asctime())
# Running main function <=> run application
main()
# Debug time
logging.info("end time = " + time.asctime())
logging.info('TOTAL TIME IN MINUTES: %02.2f' %
((time.time() - start_time) / 60.0))
# Exit program
sys.exit(0)
except KeyboardInterrupt as e: # Ctrl-C
raise e
except SystemExit: # sys.exit()
pass
except Exception as e:
logging.error('ERROR, UNEXPECTED EXCEPTION')
logging.error(str(e))
traceback.print_exc(file=sys.stderr)
sys.exit(-1)
else:
print("usage: cwt_global_spectrum.py <audiofile>")
| 35.451064
| 171
| 0.591886
| 996
| 8,331
| 4.822289
| 0.370482
| 0.023319
| 0.016864
| 0.014991
| 0.043514
| 0.026858
| 0.026858
| 0
| 0
| 0
| 0
| 0.018619
| 0.226383
| 8,331
| 234
| 172
| 35.602564
| 0.72661
| 0.271396
| 0
| 0.018349
| 0
| 0
| 0.114477
| 0.004088
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018349
| false
| 0.009174
| 0.110092
| 0
| 0.137615
| 0.018349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccc2302b262199c91606683ac0c0ea01d97056a4
| 1,649
|
py
|
Python
|
betahex/training/supervised.py
|
StarvingMarvin/betahex
|
0626cf4d003e94423f34f3d83149702a5557ddb8
|
[
"MIT"
] | 2
|
2019-03-17T07:09:14.000Z
|
2020-05-04T17:40:51.000Z
|
betahex/training/supervised.py
|
StarvingMarvin/betahex
|
0626cf4d003e94423f34f3d83149702a5557ddb8
|
[
"MIT"
] | null | null | null |
betahex/training/supervised.py
|
StarvingMarvin/betahex
|
0626cf4d003e94423f34f3d83149702a5557ddb8
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.contrib import learn
from betahex.features import Features
from betahex.training.common import make_train_model, make_policy_input_fn, accuracy
from betahex.models import MODEL
tf.logging.set_verbosity(tf.logging.INFO)
def main(unused_argv):
# Load training and eval data
feat = Features(13, MODEL['features'])
model_fn = make_train_model(
feat,
policy_filters=MODEL['filters'],
policy_shape=MODEL['shape'],
learning_rate=2e-3,
learn_rate_decay=.98,
optimizer="Adam",
regularization_scale=MODEL['regularization_scale']
)
config = learn.RunConfig(
save_checkpoints_steps=1000,
save_checkpoints_secs=None,
save_summary_steps=100
)
est = learn.Estimator(
model_fn=model_fn,
model_dir="data/tf/models/supervised/%s-l2e-3-d.98adam" % MODEL['name'],
config=config
)
train_in = make_policy_input_fn(feat, ["data/tf/features/train.tfrecords"], 64)
eval_in = make_policy_input_fn(feat, ["data/tf/features/eval.tfrecords"], 32)
fouls = 0
for i in range(40):
est.fit(
input_fn=train_in,
steps=2000
)
metrics = {
"accuracy":
learn.MetricSpec(
metric_fn=accuracy, prediction_key="classes")
}
eval_result = est.evaluate(input_fn=eval_in, metrics=metrics, steps=200)
if eval_result['accuracy'] < 1e-2 or eval_result['loss'] > 16:
fouls += 1
if fouls > 3:
break
if __name__ == '__main__':
tf.app.run()
| 25.765625
| 84
| 0.627653
| 205
| 1,649
| 4.8
| 0.463415
| 0.035569
| 0.045732
| 0.051829
| 0.075203
| 0.075203
| 0.075203
| 0.075203
| 0.075203
| 0
| 0
| 0.030604
| 0.266828
| 1,649
| 63
| 85
| 26.174603
| 0.783292
| 0.016374
| 0
| 0
| 0
| 0
| 0.116667
| 0.065432
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.106383
| 0
| 0.12766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccc373e2e0f33ced1981ea3d8ae722d60f288cc2
| 9,747
|
py
|
Python
|
MeterToTransPairingScripts.py
|
sandialabs/distribution-system-model-calibration
|
55493cc03b8ebcc5a0f2e7d2ff9092cb2e608f90
|
[
"BSD-3-Clause"
] | 1
|
2021-11-12T21:30:35.000Z
|
2021-11-12T21:30:35.000Z
|
MeterTransformerPairing/MeterToTransPairingScripts.py
|
sandialabs/distribution-system-model-calibration
|
55493cc03b8ebcc5a0f2e7d2ff9092cb2e608f90
|
[
"BSD-3-Clause"
] | null | null | null |
MeterTransformerPairing/MeterToTransPairingScripts.py
|
sandialabs/distribution-system-model-calibration
|
55493cc03b8ebcc5a0f2e7d2ff9092cb2e608f90
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
BSD 3-Clause License
Copyright 2021 National Technology & Engineering Solutions of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights in this software.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
##############################################################################
# Import standard Python libraries
import sys
import numpy as np
#import datetime
#from copy import deepcopy
from pathlib import Path
import pandas as pd
# Import custom libraries
import M2TUtils
import M2TFuncs
###############################################################################
###############################################################################
# Input Data Notes
# custIDInput: list of str (customers) - the list of customer IDs as strings
# transLabelsTrue: numpy array of int (1,customers) - the transformer labels for each customer as integers. This is the ground truth transformer labels
# transLabelsErrors: numpy array of int (1,customers) - the transformer labels for each customer which may contain errors.
# In the sample data, customer_3 transformer was changed from 1 to 2 and customer_53 transformer was changed from 23 to 22
# voltageInput: numpy array of float (measurements,customers) - the raw voltage AMI measurements for each customer in Volts
# pDataInput: numpy array of float (measurements, customers) - the real power measurements for each customer in Watts
# qDataInput: numpy array of float (measurements, customers) - the reactive power measurements for each customer in VAr
# Note that the indexing of all variables above should match in the customer index, i.e. custIDInput[0], transLabelsInput[0,0], voltageInput[:,0], pDataInput[:,0], and qDataInput[:,0] should all be the same customer
###############################################################################
# Load Sample data
currentDirectory = Path.cwd()
filePath = Path(currentDirectory.parent,'SampleData')
filename = Path(filePath,'VoltageData_AMI.npy')
voltageInput = np.load(filename)
filename = Path(filePath,'RealPowerData_AMI.npy')
pDataInput = np.load(filename)
filename = Path(filePath,'ReactivePowerData_AMI.npy')
qDataInput = np.load(filename)
filename = Path(filePath,'TransformerLabelsTrue_AMI.npy')
transLabelsTrue = np.load(filename)
filename = Path(filePath,'TransformerLabelsErrors_AMI.npy')
transLabelsErrors = np.load(filename)
filename = Path(filePath,'CustomerIDs_AMI.npy')
custIDInput = list(np.load(filename))
###############################################################################
###############################################################################
# Data pre-processing
# Convert the raw voltage measurements into per unit and difference (delta voltage) representation
vPU = M2TUtils.ConvertToPerUnit_Voltage(voltageInput)
vDV = M2TUtils.CalcDeltaVoltage(vPU)
##############################################################################
#
# Error Flagging Section - Correlation Coefficient Analysis
# Calculate CC Matrix
ccMatrix,noVotesIndex,noVotesIDs = M2TUtils.CC_EnsMedian(vDV,windowSize=384,custID=custIDInput)
# The function CC_EnsMedian takes the median CC across windows in the dataset.
# This is mainly done to deal with the issue of missing measurements in the dataset
# If your data does not have missing measurements you could use numpy.corrcoef directly
# Do a sweep of possible CC Thresholds and rank the flagged results
notMemberVector = [0.25,0.26,0.27,0.28,0.29,0.30,0.31,0.32,0.33,0.34,0.35,0.36,0.37,0.38,0.39,0.4,0.41,0.42,0.43,0.44,0.45,0.46,0.47,0.48,0.49,0.50,0.51,0.52,0.53,0.54,0.55,0.56,0.57,0.58,0.59,0.60,0.61,0.62,0.63,0.64,0.65,0.66,0.67,0.68,0.69,0.70,0.71,0.72,0.73,0.74,0.75,0.76,0.78,0.79,0.80,0.81,0.82,0.83,0.84,0.85,0.86,0.87,0.88,0.90,0.91]
allFlaggedTrans, allNumFlagged, rankedFlaggedTrans, rankedTransThresholds = M2TFuncs.RankFlaggingBySweepingThreshold(transLabelsErrors,notMemberVector,ccMatrix)
# Plot the number of flagged transformers for all threshold values
M2TUtils.PlotNumFlaggedTrans_ThresholdSweep(notMemberVector,allNumFlagged,transLabelsErrors,savePath=-1)
# The main output from this Error Flagging section is rankedFlaggedTrans which
# contains the list of flagged transformers ranked by correlation coefficient.
# Transformers at the beginning of the list were flagged with lower CC, indicating
# higher confidence that those transformers do indeed have errors.
##############################################################################
#
# Transformer Assignment Section - Linear Regression Steps
#
# Calculate the pairwise linear regression
r2Affinity,rDist,xDist,regRDistIndiv,regXDistIndiv,mseMatrix = M2TUtils.ParamEst_LinearRegression(voltageInput,pDataInput,qDataInput)
additiveFactor = 0.02
minMSE, mseThreshold = M2TUtils.FindMinMSE(mseMatrix,additiveFactor)
#This sets the mse threshold based on adding a small amount to the smallest MSE value in the pairwise MSE matrix
# Alternatively you could set the mse threshold manually
#mseThreshold = 0.3
# Plot CDF for adjusted reactance distance
replacementValue = np.max(np.max(xDist))
xDistAdjusted = M2TFuncs.AdjustDistFromThreshold(mseMatrix,xDist,mseThreshold, replacementValue)
# Select a particular set of ranked results using a correlation coefficient threshold
notMemberThreshold=0.5
flaggingIndex = np.where(np.array(notMemberVector)==notMemberThreshold)[0][0]
flaggedTrans = allFlaggedTrans[flaggingIndex]
predictedTransLabels,allChangedIndices,allChangedOrgTrans,allChangedPredTrans = M2TFuncs.CorrectFlaggedTransErrors(flaggedTrans,transLabelsErrors,custIDInput,ccMatrix,notMemberThreshold, mseMatrix,xDistAdjusted,reactanceThreshold=0.046)
# predictedTransLabels: numpy array of int (1,customers) - the predicted labels
# for each customer. Positive labels will be unchanged from the original
# set of transformer labels. Negative labels will be new transformer groupings
# which should be the correct groups of customers served by a particular
# transformer but will require mapping back to a particular physical transformer.
# In the sample data customer_4 was injected with an incorrect label and should now be grouped with customer_5 and customer_6
# customer_53 was also injected with an incorrect label and should now be grouped with customer_54 and customer_55
print('Meter to Transformer Pairing Algorithm Results')
M2TUtils.PrettyPrintChangedCustomers(predictedTransLabels,transLabelsErrors,custIDInput)
# This function calculates two transformer level metrics of accuracy that we have been using
# incorrectTrans is a list of incorrect transformers where incorrect means customers added or omitted to the correct grouping
# This defines Transformer Accuracy, i.e. the number of correct transformers out of the total transformers
# incorrectPairedIDs lists the customers from incorrect trans which allows us to define
# Customer Pairing Accuracy which is the number of customers in the correct groupings, i.e. no customers added or omitted from the grouping
incorrectTrans,incorrectPairedIndices, incorrectPairedIDs= M2TUtils.CalcTransPredErrors(predictedTransLabels,transLabelsTrue,custIDInput,singleCustMarker=-999)
print('')
print('Ground Truth Results')
print('Transformers with incorrect groupings:')
print(incorrectTrans)
# In the sample data, these will be empty because all customers were correctly grouped together by their service transformer.
# Write output to a csv file
df = pd.DataFrame()
df['customer ID'] = custIDInput
df['Original Transformer Labels (with errors)'] = transLabelsErrors[0,:]
df['Predicted Transformer Labels'] = predictedTransLabels[0,:]
df['Actual Transformer Labels'] = transLabelsTrue[0,:]
df.to_csv('outputs_PredictedTransformerLabels.csv')
print('Predicted transformer labels written to outputs_PredictedTransformerLabels.csv')
df = pd.DataFrame()
df['Ranked Flagged Transformers'] = flaggedTrans
df.to_csv('outputs_RankedFlaggedTransformers.csv')
print('Flagged and ranked transformers written to outputs_RankedFlaggedTransformers.csv')
| 48.492537
| 344
| 0.733149
| 1,245
| 9,747
| 5.719679
| 0.395181
| 0.006319
| 0.010111
| 0.015447
| 0.124982
| 0.10771
| 0.074287
| 0.053082
| 0.053082
| 0.053082
| 0
| 0.032297
| 0.13912
| 9,747
| 200
| 345
| 48.735
| 0.816351
| 0.560993
| 0
| 0.037736
| 0
| 0
| 0.184976
| 0.07601
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.113208
| 0
| 0.113208
| 0.132075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccc51ab14667f22cd9b5eaba86b2724ac98a38f4
| 10,726
|
py
|
Python
|
mcdc_tnt/pyk_kernels/all/advance.py
|
jpmorgan98/MCDC-TNT-2
|
c437596097caa9af56df95213e7f64db38aac40e
|
[
"BSD-3-Clause"
] | 1
|
2022-02-26T02:12:12.000Z
|
2022-02-26T02:12:12.000Z
|
mcdc_tnt/pyk_kernels/all/advance.py
|
jpmorgan98/MCDC-TNT-2
|
c437596097caa9af56df95213e7f64db38aac40e
|
[
"BSD-3-Clause"
] | null | null | null |
mcdc_tnt/pyk_kernels/all/advance.py
|
jpmorgan98/MCDC-TNT-2
|
c437596097caa9af56df95213e7f64db38aac40e
|
[
"BSD-3-Clause"
] | 1
|
2022-02-09T22:39:42.000Z
|
2022-02-09T22:39:42.000Z
|
import math
import numpy as np
import pykokkos as pk
@pk.workload
class Advance_cycle:
def __init__(self, num_part, p_pos_x, p_pos_y, p_pos_z, p_dir_y, p_dir_z, p_dir_x, p_mesh_cell, p_speed, p_time, dx, mesh_total_xsec, L, p_dist_travled, p_end_trans, rands):
self.p_pos_x: pk.View1D[pk.double] = p_pos_x
self.p_pos_y: pk.View1D[pk.double] = p_pos_y
self.p_pos_z: pk.View1D[pk.double] = p_pos_z
self.p_dir_y: pk.View1D[pk.double] = p_dir_y
self.p_dir_z: pk.View1D[pk.double] = p_dir_z
self.p_dir_x: pk.View1D[pk.double] = p_dir_x
self.p_mesh_cell: pk.View1D[int] = p_mesh_cell
self.p_speed: pk.View1D[pk.double] = p_speed
self.p_time: pk.View1D[pk.double] = p_time
self.dx: pk.double = dx
self.L: pk.double = L
#print(dx)
#print(L)
self.num_part: int = num_part
self.mesh_total_xsec: pk.View1D[pk.double] = mesh_total_xsec
self.p_dist_travled: pk.View1D[pk.double] = p_dist_travled
self.p_end_trans: pk.View1D[int] = p_end_trans
self.rands: pk.View1D[pk.double] = rands
@pk.main
def run(self):
pk.parallel_for(self.num_part, self.advanceCycle_wu)
@pk.workunit
def advanceCycle_wu(self, i: int):
kicker: pk.double = 1e-8
if (self.p_end_trans[i] == 0):
if (self.p_pos_x[i] < 0): #exited rhs
self.p_end_trans[i] = 1
elif (self.p_pos_x[i] >= self.L): #exited lhs
self.p_end_trans[i] = 1
else:
dist: pk.double = -math.log(self.rands[i]) / self.mesh_total_xsec[self.p_mesh_cell[i]]
#pk.printf('%d %f %f %f\n', i, dist, rands[i], mesh_total_xsec[p_mesh_cell[i]])
#p_dist_travled[i] = dist
x_loc: pk.double = (self.p_dir_x[i] * dist) + self.p_pos_x[i]
LB: pk.double = self.p_mesh_cell[i] * self.dx
RB: pk.double = LB + self.dx
if (x_loc < LB): #move partilce into cell at left
self.p_dist_travled[i] = (LB - self.p_pos_x[i])/self.p_dir_x[i] + kicker
self.p_mesh_cell[i] -= 1
elif (x_loc > RB): #move particle into cell at right
self.p_dist_travled[i] = (RB - self.p_pos_x[i])/self.p_dir_x[i] + kicker
self.p_mesh_cell[i] += 1
else: #move particle in cell
self.p_dist_travled[i] = dist
self.p_end_trans[i] = 1
#pk.printf('%d: x pos before step %f\n', i, p_pos_x[i])
self.p_pos_x[i] = self.p_dir_x[i]*self.p_dist_travled[i] + self.p_pos_x[i]
self.p_pos_y[i] = self.p_dir_y[i]*self.p_dist_travled[i] + self.p_pos_y[i]
self.p_pos_z[i] = self.p_dir_z[i]*self.p_dist_travled[i] + self.p_pos_z[i]
#pk.printf('%d: x pos after step: %f should be: %f\n', i, p_pos_x[i], (temp_x))
self.p_time[i] += dist/self.p_speed[i]
@pk.workload
class DistTraveled:
def __init__(self, num_part, max_mesh_index, mesh_dist_traveled_pk, mesh_dist_traveled_squared_pk, p_dist_travled, mesh, p_end_trans, clever_out):
self.num_part: int = num_part
self.max_mesh_index: int = max_mesh_index
self.mesh_dist_traveled_pk: pk.View1D[pk.double] = mesh_dist_traveled_pk
self.mesh_dist_traveled_squared_pk: pk.View1D[pk.double] = mesh_dist_traveled_squared_pk
self.p_dist_travled: pk.View1D[pk.double] = p_dist_travled
self.mesh: pk.View1D[int] = mesh
self.p_end_trans: pk.View1D[int] = p_end_trans
self.clever_out: pk.View1D[int] = clever_out
@pk.main
def distTraveled_main(self):
end_flag: int = 1
cur_cell: int = 0
summer: int = 0
#pk.printf('1 %d\n', cur_cell)
#pk.printf('3 %f\n', mesh_dist_traveled_pk[cur_cell])
for i in range(self.num_part):
cur_cell = int(self.mesh[i])
if (0 < cur_cell) and (cur_cell < self.max_mesh_index):
self.mesh_dist_traveled_pk[cur_cell] += self.p_dist_travled[i]
self.mesh_dist_traveled_squared_pk[cur_cell] += self.p_dist_travled[i]**2
if self.p_end_trans[i] == 0:
end_flag = 0
summer += p_end_trans[i]
clever_out[0] = end_flag
clever_out[1] = summer
#@pk.workunit
#def CellSum
# for i in range(num_parts)
#@profile
def Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time,
num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L):
max_mesh_index = int(len(mesh_total_xsec)-1)
p_end_trans: pk.View1D[int] = pk.View([num_part], int) #flag
p_end_trans.fill(0)
p_dist_travled: pk.View1D[pk.double] = pk.View([num_part], pk.double)
clever_out: pk.View1D[int] = pk.View([4], int)
end_flag = 0
cycle_count = 0
while end_flag == 0:
#allocate randoms
summer = 0
rands_np = np.random.random([num_part])
rands = pk.from_numpy(rands_np)
#vector of indicies for particle transport
p = pk.RangePolicy(pk.get_default_space(), 0, num_part)
p_dist_travled.fill(0)
pre_p_mesh = p_mesh_cell
L = float(L)
#space = pk.ExecutionSpace.OpenMP
pk.execute(pk.ExecutionSpace.OpenMP, Advance_cycle(num_part, p_pos_x, p_pos_y, p_pos_z, p_dir_y, p_dir_z, p_dir_x, p_mesh_cell, p_speed, p_time, dx, mesh_total_xsec, L, p_dist_travled, p_end_trans, rands))#pk for number still in transport
pk.execute(pk.ExecutionSpace.OpenMP,
DistTraveled(num_part, max_mesh_index, mesh_dist_traveled, mesh_dist_traveled_squared, p_dist_travled, pre_p_mesh, p_end_trans, clever_out))
end_flag = clever_out[0]
summer = clever_out[1]
#print(cycle_count)
if (cycle_count > int(1e3)):
print("************ERROR**********")
print(" Max itter hit")
print(p_end_trans)
print()
print()
return()
cycle_count += 1
print("Advance Complete:......{1}% ".format(cycle_count, int(100*summer/num_part)), end = "\r")
print()
@pk.workload
class StillIn:
def __init__(self, p_pos_x, surface_distances, p_alive, num_part, clever_out):
self.p_pos_x: pk.View1D[pk.double] = p_pos_x
self.clever_out: pk.View1D[int] = clever_out
self.surface_distances: pk.View1D[pk.double] = surface_distances
self.p_alive: pk.View1D[int] = p_alive
self.num_part: int = num_part
@pk.main
def run(self):
tally_left: int = 0
tally_right: int = 0
for i in range(self.num_part):
#exit at left
if self.p_pos_x[i] <= 0:
tally_left += 1
self.p_alive[i] = 0
elif self.p_pos_x[i] >= 1:
tally_right += 1
self.p_alive[i] = 0
self.clever_out[0] = tally_left
self.clever_out[1] = tally_right
def speedTestAdvance():
# Position
num_part = int(1e8)
phase_parts = num_parts
p_pos_x_np = np.zeros(phase_parts, dtype=float)
p_pos_y_np = np.zeros(phase_parts, dtype=float)
p_pos_z_np = np.zeros(phase_parts, dtype=float)
p_pos_x = pk.from_numpy(p_pos_x_np)
p_pos_y = pk.from_numpy(p_pos_y_np)
p_pos_z = pk.from_numpy(p_pos_z_np)
# Direction
p_dir_x_np = np.zeros(phase_parts, dtype=float)
p_dir_y_np = np.zeros(phase_parts, dtype=float)
p_dir_z_np = np.zeros(phase_parts, dtype=float)
p_dir_x = pk.from_numpy(p_dir_x_np)
p_dir_y = pk.from_numpy(p_dir_y_np)
p_dir_z = pk.from_numpy(p_dir_z_np)
# Speed
p_speed_np = np.zeros(phase_parts, dtype=float)
p_speed = pk.from_numpy(p_speed_np)
# Time
p_time_np = np.zeros(phase_parts, dtype=float)
p_time = pk.from_numpy(p_time_np)
# Region
p_mesh_cell_np = np.zeros(phase_parts, dtype=np.int32)
p_mesh_cell = pk.from_numpy(p_mesh_cell_np)
# Flags
p_alive_np = np.full(phase_parts, False, dtype=np.int32)
p_alive = pk.from_numpy(p_alive_np)
kernels.Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, surface_distances[len(surface_distances)-1])
"""
def test_Advance():
L = 1
dx = .25
N_m = 4
num_part = 6
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1])
p_pos_y = 2.1*np.ones(num_part)
p_pos_z = 3.4*np.ones(num_part)
p_mesh_cell = np.array([-1, 0, 0, 1, 3, 4], dtype=int)
p_dir_x = np.ones(num_part)
p_dir_x[0] = -1
p_dir_y = np.zeros(num_part)
p_dir_z = np.zeros(num_part)
p_speed = np.ones(num_part)
p_time = np.zeros(num_part)
p_alive = np.ones(num_part, bool)
p_alive[5] = False
particle_speed = 1
mesh_total_xsec = np.array([0.1,1,.1,100])
mesh_dist_traveled_squared = np.zeros(N_m)
mesh_dist_traveled = np.zeros(N_m)
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared] = Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L)
assert (np.sum(mesh_dist_traveled) > 0)
assert (np.sum(mesh_dist_traveled_squared) > 0)
assert (p_pos_x[0] == -.01)
assert (p_pos_x[5] == 1.1)
assert (p_pos_x[1:4].all() > .75)
"""
def test_StillIn():
num_part = 7
surface_distances = [0,.25,.75,1]
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1, 1])
p_alive = np.ones(num_part, bool)
[p_alive, tally_left, tally_right] = StillIn(p_pos_x, surface_distances, p_alive, num_part)
assert(p_alive[0] == False)
assert(p_alive[5] == False)
assert(tally_left == 2)
assert(tally_right == 2)
assert(p_alive[2:4].all() == True)
if __name__ == '__main__':
speedTestAdvance()
| 35.516556
| 312
| 0.591274
| 1,737
| 10,726
| 3.290155
| 0.096718
| 0.041295
| 0.027122
| 0.047594
| 0.545932
| 0.44392
| 0.36028
| 0.314611
| 0.245144
| 0.173928
| 0
| 0.021447
| 0.291441
| 10,726
| 301
| 313
| 35.634552
| 0.730526
| 0.067406
| 0
| 0.197605
| 0
| 0
| 0.009725
| 0.003089
| 0
| 0
| 0
| 0
| 0.02994
| 1
| 0.05988
| false
| 0
| 0.017964
| 0
| 0.095808
| 0.041916
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccc6026c8a78ded0d003eb1ba605983fe1d65590
| 1,123
|
py
|
Python
|
facerec/face_util.py
|
seanbenhur/sih2020
|
f8b5988425185ed3c85872b98b622932895f932b
|
[
"MIT"
] | null | null | null |
facerec/face_util.py
|
seanbenhur/sih2020
|
f8b5988425185ed3c85872b98b622932895f932b
|
[
"MIT"
] | null | null | null |
facerec/face_util.py
|
seanbenhur/sih2020
|
f8b5988425185ed3c85872b98b622932895f932b
|
[
"MIT"
] | null | null | null |
import face_recognition as fr
def compare_faces(file1, file2):
"""
Compare two images and return True / False for matching.
"""
# Load the jpg files into numpy arrays
image1 = fr.load_image_file(file1)
image2 = fr.load_image_file(file2)
# Get the face encodings for each face in each image file
# Assume there is only 1 face in each image, so get 1st face of an image.
image1_encoding = fr.face_encodings(image1)[0]
image2_encoding = fr.face_encodings(image2)[0]
# results is an array of True/False telling if the unknown face matched anyone in the known_faces array
results = fr.compare_faces([image1_encoding], image2_encoding)
return results[0]
# Each face is tuple of (Name,sample image)
known_faces = [('Stark','sample_images/stark.jpg'),
('Hannah','sample_images/hannah.jpg'),
]
def face_rec(file):
"""
Return name for a known face, otherwise return 'Uknown'.
"""
for name, known_file in known_faces:
if compare_faces(known_file,file):
return name
return 'Unknown'
| 33.029412
| 107
| 0.664292
| 159
| 1,123
| 4.553459
| 0.383648
| 0.049724
| 0.030387
| 0.041436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02019
| 0.250223
| 1,123
| 33
| 108
| 34.030303
| 0.839667
| 0.37667
| 0
| 0
| 0
| 0
| 0.098336
| 0.071104
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccc6e968fc2455af2569e67afae509ff5c1e5fc0
| 3,354
|
py
|
Python
|
src/correlation/dataset_correlation.py
|
sakdag/crime-data-analysis
|
9c95238c6aaf1394f68be59e26e8c6d75f669d7e
|
[
"MIT"
] | null | null | null |
src/correlation/dataset_correlation.py
|
sakdag/crime-data-analysis
|
9c95238c6aaf1394f68be59e26e8c6d75f669d7e
|
[
"MIT"
] | null | null | null |
src/correlation/dataset_correlation.py
|
sakdag/crime-data-analysis
|
9c95238c6aaf1394f68be59e26e8c6d75f669d7e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import pyproj as pyproj
import shapely as shapely
from shapely.geometry import Point
from haversine import haversine
import src.config.column_names as col_names
def correlate_and_save(crime_df: pd.DataFrame,
census_df: pd.DataFrame,
file_name: str,
correlation_mode: str):
# Using geolocation information in census dataset, add zipcode column to the crime dataset
if correlation_mode == 'euclidean':
add_zip_code_column_using_euclidean(crime_df, census_df)
else:
add_zip_code_column_using_haversine(crime_df, census_df)
# Save
crime_df.to_csv(file_name, index=False)
# Finds nearest zipcode geolocation to the crime location, then adds this zipcode to the crime dataset
def add_zip_code_column_using_haversine(crime_df: pd.DataFrame, census_df: pd.DataFrame):
crime_df[col_names.ZIP_CODE] = np.nan
for index, row in crime_df.iterrows():
nearest_zip_code = np.nan
nearest_zip_code_distance = -1
for census_index, census_row in census_df.iterrows():
distance = haversine((row[col_names.LATITUDE], row[col_names.LONGITUDE]),
(census_row[col_names.LATITUDE], census_row[col_names.LONGITUDE]))
if nearest_zip_code_distance == -1 or distance < nearest_zip_code_distance:
nearest_zip_code = census_row[col_names.ZIP_CODE]
nearest_zip_code_distance = distance
crime_df.loc[index, col_names.ZIP_CODE] = nearest_zip_code
def add_zip_code_column_using_euclidean(crime_df: pd.DataFrame, census_df: pd.DataFrame):
crime_df[col_names.ZIP_CODE] = np.nan
wgs84_proj = pyproj.CRS('EPSG:4326')
los_angeles_proj = pyproj.CRS('EPSG:6423')
project_los_angeles = pyproj.Transformer.from_crs(wgs84_proj, los_angeles_proj, always_xy=True).transform
census_list = list()
for census_index, census_row in census_df.iterrows():
point2_transformed = shapely.ops.transform(project_los_angeles,
Point(census_row[col_names.LATITUDE], census_row[col_names.LONGITUDE]))
census_list.append((census_row, point2_transformed))
for index, row in crime_df.iterrows():
nearest_zip_code = np.nan
nearest_zip_code_distance = -1
point1_transformed = shapely.ops.transform(project_los_angeles, Point(row[col_names.LATITUDE], row[col_names.LONGITUDE]))
for census_data in census_list:
distance = point1_transformed.distance(census_data[1])
if nearest_zip_code_distance == -1 or distance < nearest_zip_code_distance:
nearest_zip_code = census_data[0][col_names.ZIP_CODE]
nearest_zip_code_distance = distance
crime_df.loc[index, col_names.ZIP_CODE] = nearest_zip_code
def merge_crime_and_census(crime_df: pd.DataFrame, census_df: pd.DataFrame, file_name: str):
merged_df = crime_df.merge(census_df, on=col_names.ZIP_CODE, how='inner')
redundant_columns = ['Latitude_x', 'Latitude_y', 'Longitude_x', 'Longitude_y',
col_names.TOTAL_HOUSEHOLDS, col_names.AVERAGE_HOUSEHOLD_SIZE]
merged_df.drop(columns=redundant_columns, inplace=True)
# Save
merged_df.to_csv(file_name, index=False)
| 46.583333
| 129
| 0.707812
| 461
| 3,354
| 4.793926
| 0.219089
| 0.079186
| 0.088688
| 0.079638
| 0.570588
| 0.567873
| 0.566516
| 0.543891
| 0.400905
| 0.363801
| 0
| 0.00833
| 0.212582
| 3,354
| 71
| 130
| 47.239437
| 0.828474
| 0.059332
| 0
| 0.296296
| 0
| 0
| 0.023492
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.12963
| 0
| 0.203704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cccd71a65c5bf63a13dfaa2062a6d5203e62b9ef
| 2,386
|
py
|
Python
|
blog/resources/tag.py
|
espstan/fBlog
|
7c63d117a3bbae3da80b3e8d7f731ae89036eb0d
|
[
"MIT"
] | 2
|
2019-06-17T13:55:36.000Z
|
2019-06-19T22:40:06.000Z
|
blog/resources/tag.py
|
espstan/fBlog
|
7c63d117a3bbae3da80b3e8d7f731ae89036eb0d
|
[
"MIT"
] | 35
|
2019-06-17T07:00:49.000Z
|
2020-02-17T09:41:53.000Z
|
blog/resources/tag.py
|
espstan/fBlog
|
7c63d117a3bbae3da80b3e8d7f731ae89036eb0d
|
[
"MIT"
] | null | null | null |
from flask_restful import Resource
from flask_restful import reqparse
from sqlalchemy.exc import SQLAlchemyError
from config import Configuration
from models.tag import TagModel as TM
class Tag(Resource):
parser = reqparse.RequestParser()
parser.add_argument('name',
type=str,
required=True,
help='This field cannot be blank.')
def post(self):
data = Tag.parser.parse_args()
name = data['name']
if len(name) > Configuration.MAX_TAG_NAME_SIZE:
return {'message': 'A name\'s length is more than {}'.format(Configuration.MAX_TAG_NAME_SIZE)}
if TM.query.filter(TM.name == name).first():
return {'message': 'Tag \'{}\' already exists'.format(name)}
tag = TM(name=name)
try:
tag.save_to_db()
except SQLAlchemyError as e:
err = str(e.__class__.__name__)
return {'message': '{}'.format(err)}, 500
return tag.get_json(), 201
def put(self):
data = Tag.parser.parse_args()
name = data['name']
if len(name) > Configuration.MAX_TAG_NAME_SIZE:
return {'message': 'A tag\'s length is more than {}'.format(Configuration.MAX_TAG_NAME_SIZE)}
tag = TM.find_by_name(name)
if not tag:
tag = TM(name=name)
else:
if not TM.query.filter(TM.name == name).first():
tag.name = name
else:
return {'message': 'Tag name \'{}\' already exists'.format(data['name'])}
try:
tag.save_to_db()
except SQLAlchemyError as e:
err = str(e.__class__.__name__)
return {'message': '{}'.format(err)}, 500
return tag.get_json(), 201
def delete(self):
data = Tag.parser.parse_args()
name = data['name']
tag = TM.find_by_name(name)
if tag:
try:
tag.delete_from_db()
except SQLAlchemyError as e:
err = str(e.__class__.__name__)
return {'message': '{}'.format(err)}, 500
return {'message': 'Tag was deleted'}
return {'message': 'Tag with name: \'{}\' was not found'.format(name)}
class TagList(Resource):
def get(self):
return {'tags': [tag.get_json() for tag in TM.query.all()]}
| 31.394737
| 106
| 0.55658
| 287
| 2,386
| 4.43554
| 0.285714
| 0.091909
| 0.059701
| 0.07227
| 0.545954
| 0.545954
| 0.545954
| 0.468971
| 0.468971
| 0.43912
| 0
| 0.009197
| 0.316429
| 2,386
| 75
| 107
| 31.813333
| 0.771306
| 0
| 0
| 0.508475
| 0
| 0
| 0.093462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.084746
| 0.016949
| 0.40678
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ccd0e580fc925afb48c3d9bac3c4c4c1b73ecf0c
| 5,493
|
py
|
Python
|
Agent.py
|
594zyc/CMCC_DialogSystem
|
9d85f4c319677bf5e682a562041b908fc3135a17
|
[
"Apache-2.0"
] | 1
|
2019-11-20T16:36:30.000Z
|
2019-11-20T16:36:30.000Z
|
Agent.py
|
594zyc/CMCC_DialogSystem
|
9d85f4c319677bf5e682a562041b908fc3135a17
|
[
"Apache-2.0"
] | null | null | null |
Agent.py
|
594zyc/CMCC_DialogSystem
|
9d85f4c319677bf5e682a562041b908fc3135a17
|
[
"Apache-2.0"
] | null | null | null |
"""
结合所有的 Manager,实现text-in text-out的交互式 agent 的接口
"""
import os
import sys
import time
import argparse
import logging
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../..'))
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from DM.DST.StateTracking import DialogStateTracker
from DM.policy.RuleMapping import RulePolicy
from data.DataManager import DataManager
from NLU.NLUManager import NLUManager
from NLG.NLGManager import rule_based_NLG
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--print', type=bool, default=True, help='print details')
FLAGS= parser.parse_args()
UserPersonal = {
"已购业务": ["180元档幸福流量年包", "18元4G飞享套餐升级版"], # 这里应该是完整的业务的信息dict
"套餐使用情况": "剩余流量 11.10 GB,剩余通话 0 分钟,话费余额 110.20 元,本月已产生话费 247.29 元",
"号码": "18811369685",
"归属地" : "北京",
"品牌": "动感地带",
"是否转品牌过渡期": "否",
"话费查询": "话费余额 110.20 元",
"流量查询": "剩余流量 11.10 GB",
"订购时间": "订购时间 2017-04-04, 生效时间 2017-05-01",
"是否停机": "否",
"话费充值": "请登录网上营业厅、微厅或 APP 充值",
"流量充值": "请登录网上营业厅、微厅或 APP 充值",
"账单查询": "请登录网上营业厅、微厅或 APP 查询"
}
NLU_save_path_dict = {
'domain': os.path.join(BASE_DIR, 'NLU/DomDect/model/ckpt'),
'useract': os.path.join(BASE_DIR, 'NLU/UserAct/model/ckpt'),
'slotfilling': os.path.join(BASE_DIR, 'NLU/SlotFilling/model/ckpt'),
'entity': os.path.join(BASE_DIR, 'NLU/ER/entity_list.txt'),
'sentiment': os.path.join(BASE_DIR, 'NLU/SentiDect')
}
class DialogAgent:
def __init__(self):
self.history_savedir = None
self.detail_savedir = None
self.logger = None
self.user = self.create_user()
self.rule_policy = RulePolicy()
self.dst = DialogStateTracker(UserPersonal, FLAGS.print, self.logger)
self.data_manager = DataManager(os.path.join(BASE_DIR, 'data/tmp'))
self.nlu_manager = NLUManager(NLU_save_path_dict)
# self.nlg_template = NLG_template
self.turn_num = 1
self.dialog_history = []
def create_user(self):
user_name = input("请输入您的用户名:")
user_path = os.path.join(BASE_DIR, 'user', user_name)
log_path = os.path.join(user_path, 'log')
if not os.path.exists(user_path):
os.mkdir(user_path)
os.mkdir(log_path)
self.history_savedir = user_path + '/dialogs.txt'
log_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
self.detail_savedir = log_path +'/' + log_name + '.log'
self.logger = self.create_logger(self.detail_savedir)
return user_name
def create_logger(self, logdir):
fmt = '%(message)s'
# datefmt = "%y-%m-%d %H:%M:%S"
logging.basicConfig(level=logging.INFO,
format=fmt)
# datefmt=datefmt)
logger = logging.getLogger('mylogger')
logger.setLevel(logging.INFO)
fh = logging.FileHandler(logdir)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
return logger
def run(self):
if FLAGS.print:
self.logger.info('对话记录时间:'+time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime()))
try:
while True:
user_utter = input("用户输入:")
if FLAGS.print:
with open(self.detail_savedir, 'a') as f:
f.write('-------------- Turn ' + str(self.turn_num) + '--------------\n')
f.write('用户:' + user_utter + '\n')
self.dialog_history.append('用户:' + user_utter)
if user_utter in ['restart' , '重来' , '重新开始']:
self.dst = DialogStateTracker(UserPersonal, FLAGS.print, self.logger)
self.rule_policy = RulePolicy()
if FLAGS.print:
self.logger.info('对话状态已重置')
else:
print('对话状态已重置')
continue
if '再见' in user_utter or '结束' in user_utter or '谢谢' in user_utter:
self.close()
break
nlu_results = self.nlu_manager.get_NLU_results(user_utter, self.data_manager)
self.dst.update(nlu_results, self.rule_policy, self.data_manager)
reply = rule_based_NLG(self.dst)
if FLAGS.print:
self.logger.info('系统:' + reply + '\n')
else:
print('系统:', reply, '\n')
self.dialog_history.append('系统:' + reply)
self.turn_num += 1
except KeyboardInterrupt:
self.close()
def close(self):
self.nlu_manager.close()
reply = '感谢您的使用,再见!'
if FLAGS.print:
self.logger.info('系统:' + reply + '\n')
else:
print('系统:', reply, '\n')
with open(os.path.join(BASE_DIR, self.history_savedir), 'a') as f:
f.write('对话记录时间:')
f.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'\n\n')
for dialog in self.dialog_history:
dialog = '\n'.join(dialog.split())
f.write(dialog+'\n\n')
f.write('系统:感谢您的使用,再见!\n')
f.write('————————————————————————————————\n')
if __name__ == '__main__':
agent = DialogAgent()
agent.run()
| 34.54717
| 97
| 0.54178
| 647
| 5,493
| 4.502318
| 0.315301
| 0.026777
| 0.034329
| 0.043254
| 0.214212
| 0.174734
| 0.108823
| 0.108823
| 0.108823
| 0.066941
| 0
| 0.015844
| 0.310577
| 5,493
| 158
| 98
| 34.765823
| 0.744917
| 0.026397
| 0
| 0.145161
| 0
| 0.008065
| 0.143797
| 0.023684
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040323
| false
| 0
| 0.080645
| 0
| 0.145161
| 0.08871
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ae9f5ca026e9179a91eee39f486434f7618b47ec
| 1,698
|
py
|
Python
|
Object Detection and Tracking HSV color space.py
|
shivtejshete/Advanced_Image_Processing
|
b5a7ef94a44ab0b3bd9fa4a70d843099af70079e
|
[
"MIT"
] | null | null | null |
Object Detection and Tracking HSV color space.py
|
shivtejshete/Advanced_Image_Processing
|
b5a7ef94a44ab0b3bd9fa4a70d843099af70079e
|
[
"MIT"
] | null | null | null |
Object Detection and Tracking HSV color space.py
|
shivtejshete/Advanced_Image_Processing
|
b5a7ef94a44ab0b3bd9fa4a70d843099af70079e
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
#sample function - dummy callback function
def nothing(x):
pass
#capture live video
cap = cv2.VideoCapture(0)
#window for trackbars
cv2.namedWindow('Track')
#defining trackbars to control HSV values of given video stream
cv2.createTrackbar('L_HUE', 'Track', 0, 255, nothing)
cv2.createTrackbar('L_Sat', 'Track', 0, 255, nothing)
cv2.createTrackbar('L_Val', 'Track', 0, 255, nothing)
cv2.createTrackbar('H_HUE', 'Track', 255, 255, nothing)
cv2.createTrackbar('H_Sat', 'Track', 255, 255, nothing)
cv2.createTrackbar('H_Val', 'Track', 255, 255, nothing)
while cap.isOpened()==True :
#read the video feed
_, frame = cap.read()
cv2.imshow('Actual_Feed', frame)
#get current trackbar positions for every frame
l_hue = cv2.getTrackbarPos('L_HUE', 'Track')
l_sat = cv2.getTrackbarPos('L_Sat', 'Track')
l_val = cv2.getTrackbarPos('L_Val', 'Track')
h_hue = cv2.getTrackbarPos('H_HUE', 'Track')
h_sat = cv2.getTrackbarPos('H_Sat', 'Track')
h_val = cv2.getTrackbarPos('H_Val', 'Track')
#convert the captured frame into HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #Hue(0), Saturation(1) and Value(2)
# print(hsv.shape)
#trim video feed HSV to a range
lower_bound = np.array([l_hue, l_sat, l_val])
upper_bound = np.array([h_hue, h_sat, h_val])
mask = cv2.inRange(hsv, lower_bound,upper_bound )
frame = cv2.bitwise_and(frame, frame,mask=mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
# cv2.imshow('converted', hsv)
key = cv2.waitKey(1)
if key==27:
break
cap.release()
cv2.destroyAllWindows()
| 29.789474
| 86
| 0.6596
| 241
| 1,698
| 4.514523
| 0.352697
| 0.09375
| 0.059743
| 0.124081
| 0.159926
| 0.159007
| 0.128676
| 0
| 0
| 0
| 0
| 0.046426
| 0.200825
| 1,698
| 57
| 87
| 29.789474
| 0.755343
| 0.206714
| 0
| 0
| 0
| 0
| 0.113193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0.030303
| 0.060606
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ae9fdc83392f9acd375ec6b842e275792831330a
| 15,934
|
py
|
Python
|
tools/run_tests/xds_k8s_test_driver/tests/url_map/fault_injection_test.py
|
echo80313/grpc
|
93cdc8b77e7b3fe4a3afec1c9c7e29b3f02ec3cf
|
[
"Apache-2.0"
] | null | null | null |
tools/run_tests/xds_k8s_test_driver/tests/url_map/fault_injection_test.py
|
echo80313/grpc
|
93cdc8b77e7b3fe4a3afec1c9c7e29b3f02ec3cf
|
[
"Apache-2.0"
] | 4
|
2022-02-27T18:59:37.000Z
|
2022-02-27T18:59:53.000Z
|
tools/run_tests/xds_k8s_test_driver/tests/url_map/fault_injection_test.py
|
echo80313/grpc
|
93cdc8b77e7b3fe4a3afec1c9c7e29b3f02ec3cf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from typing import Tuple
from absl import flags
from absl.testing import absltest
import grpc
from framework import xds_url_map_testcase
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
ExpectedResult = xds_url_map_testcase.ExpectedResult
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
# The first batch of RPCs don't count towards the result of test case. They are
# meant to prove the communication between driver and client is fine.
_NUM_RPCS = 10
_LENGTH_OF_RPC_SENDING_SEC = 16
# We are using sleep to synchronize test driver and the client... Even though
# the client is sending at QPS rate, we can't assert that exactly QPS *
# SLEEP_DURATION number of RPC is finished. The final completed RPC might be
# slightly more or less.
_NON_RANDOM_ERROR_TOLERANCE = 0.01
# For random generator involved test cases, we want to be more loose about the
# final result. Otherwise, we will need more test duration (sleep duration) and
# more accurate communication mechanism. The accurate of random number
# generation is not the intention of this test.
_ERROR_TOLERANCE = 0.2
_DELAY_CASE_APPLICATION_TIMEOUT_SEC = 1
_BACKLOG_WAIT_TIME_SEC = 20
def _build_fault_injection_route_rule(abort_percentage: int = 0,
delay_percentage: int = 0):
return {
'priority': 0,
'matchRules': [{
'fullPathMatch': '/grpc.testing.TestService/UnaryCall'
}],
'service': GcpResourceManager().default_backend_service(),
'routeAction': {
'faultInjectionPolicy': {
'abort': {
'httpStatus': 401,
'percentage': abort_percentage,
},
'delay': {
'fixedDelay': {
'seconds': '20'
},
'percentage': delay_percentage,
}
}
},
}
def _wait_until_backlog_cleared(test_client: XdsTestClient,
timeout: int = _BACKLOG_WAIT_TIME_SEC):
""" Wait until the completed RPC is close to started RPC.
For delay injected test cases, there might be a backlog of RPCs due to slow
initialization of the client. E.g., if initialization took 20s and qps is
25, then there will be a backlog of 500 RPCs. In normal test cases, this is
fine, because RPCs will fail immediately. But for delay injected test cases,
the RPC might linger much longer and affect the stability of test results.
"""
logger.info('Waiting for RPC backlog to clear for %d seconds', timeout)
deadline = time.time() + timeout
while time.time() < deadline:
stats = test_client.get_load_balancer_accumulated_stats()
ok = True
for rpc_type in [RpcTypeUnaryCall, RpcTypeEmptyCall]:
started = stats.num_rpcs_started_by_method.get(rpc_type, 0)
completed = stats.num_rpcs_succeeded_by_method.get(
rpc_type, 0) + stats.num_rpcs_failed_by_method.get(rpc_type, 0)
# We consider the backlog is healthy, if the diff between started
# RPCs and completed RPCs is less than 1.5 QPS.
if abs(started - completed) > xds_url_map_testcase.QPS.value * 1.1:
logger.info(
'RPC backlog exist: rpc_type=%s started=%s completed=%s',
rpc_type, started, completed)
time.sleep(_DELAY_CASE_APPLICATION_TIMEOUT_SEC)
ok = False
else:
logger.info(
'RPC backlog clear: rpc_type=%s started=%s completed=%s',
rpc_type, started, completed)
if ok:
# Both backlog of both types of RPCs is clear, success, return.
return
raise RuntimeError('failed to clear RPC backlog in %s seconds' % timeout)
class TestZeroPercentFaultInjection(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=0,
delay_percentage=0)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual('20s', filter_config['delay']['fixedDelay'])
self.assertEqual(
0, filter_config['delay']['percentage'].get('numerator', 0))
self.assertEqual('MILLION',
filter_config['delay']['percentage']['denominator'])
self.assertEqual(401, filter_config['abort']['httpStatus'])
self.assertEqual(
0, filter_config['abort']['percentage'].get('numerator', 0))
self.assertEqual('MILLION',
filter_config['abort']['percentage']['denominator'])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS)
self.assertRpcStatusCode(test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.OK,
ratio=1),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE)
class TestNonMatchingFaultInjection(xds_url_map_testcase.XdsUrlMapTestCase):
"""EMPTY_CALL is not fault injected, so it should succeed."""
@staticmethod
def client_init_config(rpc: str, metadata: str):
# Python interop client will stuck if the traffic is slow (in this case,
# 20s injected). The purpose of this test is examining the un-injected
# traffic is not impacted, so it's fine to just send un-injected
# traffic.
return 'EmptyCall', metadata
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=100,
delay_percentage=100)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
# The first route rule for UNARY_CALL is fault injected
self.assertEqual(
"/grpc.testing.TestService/UnaryCall",
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['path'])
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual('20s', filter_config['delay']['fixedDelay'])
self.assertEqual(1000000,
filter_config['delay']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['delay']['percentage']['denominator'])
self.assertEqual(401, filter_config['abort']['httpStatus'])
self.assertEqual(1000000,
filter_config['abort']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['abort']['percentage']['denominator'])
# The second route rule for all other RPCs is untouched
self.assertNotIn(
'envoy.filters.http.fault',
xds_config.rds['virtualHosts'][0]['routes'][1].get(
'typedPerFilterConfig', {}))
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.assertRpcStatusCode(test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeEmptyCall,
status_code=grpc.StatusCode.OK,
ratio=1),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE)
@absltest.skip('20% RPC might pass immediately, reason unknown')
class TestAlwaysDelay(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=0,
delay_percentage=100)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual('20s', filter_config['delay']['fixedDelay'])
self.assertEqual(1000000,
filter_config['delay']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['delay']['percentage']['denominator'])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS,
app_timeout=_DELAY_CASE_APPLICATION_TIMEOUT_SEC)
_wait_until_backlog_cleared(test_client)
self.assertRpcStatusCode(
test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.DEADLINE_EXCEEDED,
ratio=1),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE)
class TestAlwaysAbort(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=100,
delay_percentage=0)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual(401, filter_config['abort']['httpStatus'])
self.assertEqual(1000000,
filter_config['abort']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['abort']['percentage']['denominator'])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS)
self.assertRpcStatusCode(
test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.UNAUTHENTICATED,
ratio=1),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE)
class TestDelayHalf(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=0,
delay_percentage=50)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual('20s', filter_config['delay']['fixedDelay'])
self.assertEqual(500000,
filter_config['delay']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['delay']['percentage']['denominator'])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS,
app_timeout=_DELAY_CASE_APPLICATION_TIMEOUT_SEC)
_wait_until_backlog_cleared(test_client)
self.assertRpcStatusCode(
test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.DEADLINE_EXCEEDED,
ratio=0.5),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_ERROR_TOLERANCE)
class TestAbortHalf(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=50,
delay_percentage=0)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual(401, filter_config['abort']['httpStatus'])
self.assertEqual(500000,
filter_config['abort']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['abort']['percentage']['denominator'])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS)
self.assertRpcStatusCode(
test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.UNAUTHENTICATED,
ratio=0.5),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_ERROR_TOLERANCE)
if __name__ == '__main__':
absltest.main()
| 43.654795
| 80
| 0.622756
| 1,649
| 15,934
| 5.754397
| 0.198302
| 0.037939
| 0.015175
| 0.028665
| 0.625777
| 0.604384
| 0.591632
| 0.583729
| 0.577616
| 0.566762
| 0
| 0.01431
| 0.289507
| 15,934
| 364
| 81
| 43.774725
| 0.823867
| 0.138572
| 0
| 0.659498
| 0
| 0
| 0.114666
| 0.017427
| 0
| 0
| 0
| 0
| 0.136201
| 1
| 0.075269
| false
| 0.003584
| 0.028674
| 0.007168
| 0.157706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aea011bdabd2351b652997696862bc1051c59d81
| 647
|
py
|
Python
|
reports/migrations/0103_auto_20190224_1000.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 3
|
2020-04-26T06:28:50.000Z
|
2021-04-05T08:02:26.000Z
|
reports/migrations/0103_auto_20190224_1000.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 10
|
2020-06-05T17:36:10.000Z
|
2022-03-11T23:16:42.000Z
|
reports/migrations/0103_auto_20190224_1000.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 5
|
2021-04-08T08:43:49.000Z
|
2021-11-27T06:36:46.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reports', '0102_recordspec_group_key'),
]
operations = [
migrations.AlterField(
model_name='reportimage',
name='image_url',
field=models.URLField(max_length=800),
preserve_default=True,
),
migrations.AlterField(
model_name='reportimage',
name='thumbnail_url',
field=models.URLField(max_length=800),
preserve_default=True,
),
]
| 23.962963
| 50
| 0.591963
| 59
| 647
| 6.220339
| 0.610169
| 0.108992
| 0.13624
| 0.158038
| 0.52861
| 0.52861
| 0.288828
| 0.288828
| 0.288828
| 0.288828
| 0
| 0.024336
| 0.301391
| 647
| 26
| 51
| 24.884615
| 0.787611
| 0.032458
| 0
| 0.5
| 0
| 0
| 0.121795
| 0.040064
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aea133a870e185a7ab120fb1bfa33b1fd3b07e6f
| 5,196
|
py
|
Python
|
AKSDataOpsDemo/scripts/evaluate.py
|
cloudmelon/aks-severless
|
ade3b6a110444c09bc4c0ff44cb232cb09ddebe7
|
[
"MIT"
] | 6
|
2019-12-06T22:55:41.000Z
|
2019-12-10T23:57:40.000Z
|
AKSDataOpsDemo/scripts/evaluate.py
|
cloudmelon/aks-severless
|
ade3b6a110444c09bc4c0ff44cb232cb09ddebe7
|
[
"MIT"
] | null | null | null |
AKSDataOpsDemo/scripts/evaluate.py
|
cloudmelon/aks-severless
|
ade3b6a110444c09bc4c0ff44cb232cb09ddebe7
|
[
"MIT"
] | 2
|
2019-12-26T16:25:44.000Z
|
2020-09-02T22:43:41.000Z
|
import argparse
import os, json, sys
import azureml.core
from azureml.core import Workspace
from azureml.core import Experiment
from azureml.core.model import Model
import azureml.core
from azureml.core import Run
from azureml.core.webservice import AciWebservice, Webservice
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.image import ContainerImage
from azureml.core import Image
print("In evaluate.py")
parser = argparse.ArgumentParser("evaluate")
parser.add_argument("--model_name", type=str, help="model name", dest="model_name", required=True)
parser.add_argument("--image_name", type=str, help="image name", dest="image_name", required=True)
parser.add_argument("--output", type=str, help="eval output directory", dest="output", required=True)
args = parser.parse_args()
print("Argument 1: %s" % args.model_name)
print("Argument 2: %s" % args.image_name)
print("Argument 3: %s" % args.output)
run = Run.get_context()
ws = run.experiment.workspace
print('Workspace configuration succeeded')
model_list = Model.list(ws, name = args.model_name)
latest_model = sorted(model_list, reverse=True, key = lambda x: x.created_time)[0]
latest_model_id = latest_model.id
latest_model_name = latest_model.name
latest_model_version = latest_model.version
latest_model_path = latest_model.get_model_path(latest_model_name, _workspace=ws)
print('Latest model id: ', latest_model_id)
print('Latest model name: ', latest_model_name)
print('Latest model version: ', latest_model_version)
print('Latest model path: ', latest_model_path)
latest_model_run_id = latest_model.tags.get("run_id")
print('Latest model run id: ', latest_model_run_id)
latest_model_run = Run(run.experiment, run_id = latest_model_run_id)
latest_model_accuracy = latest_model_run.get_metrics().get("acc")
print('Latest model accuracy: ', latest_model_accuracy)
ws_list = Webservice.list(ws, model_name = latest_model_name)
print('webservice list')
print(ws_list)
deploy_model = False
current_model = None
if(len(ws_list) > 0):
webservice = ws_list[0]
try:
image_id = webservice.tags['image_id']
image = Image(ws, id = image_id)
current_model = image.models[0]
print('Found current deployed model!')
except:
deploy_model = True
print('Image id tag not found!')
else:
deploy_model = True
print('No deployed webservice for model: ', latest_model_name)
current_model_accuracy = -1 # undefined
if current_model != None:
current_model_run = Run(run.experiment, run_id = current_model.tags.get("run_id"))
current_model_accuracy = current_model_run.get_metrics().get("acc")
print('accuracies')
print(latest_model_accuracy, current_model_accuracy)
if latest_model_accuracy > current_model_accuracy:
deploy_model = True
print('Current model performs better and will be deployed!')
else:
print('Current model does NOT perform better and thus will NOT be deployed!')
eval_info = {}
eval_info["model_name"] = latest_model_name
eval_info["model_version"] = latest_model_version
eval_info["model_path"] = latest_model_path
eval_info["model_acc"] = latest_model_accuracy
eval_info["deployed_model_acc"] = current_model_accuracy
eval_info["deploy_model"] = deploy_model
eval_info["image_name"] = args.image_name
eval_info["image_id"] = ""
os.makedirs(args.output, exist_ok=True)
eval_filepath = os.path.join(args.output, 'eval_info.json')
if deploy_model == False:
with open(eval_filepath, "w") as f:
json.dump(eval_info, f)
print('eval_info.json saved')
print('Model did not meet the accuracy criteria and will not be deployed!')
print('Exiting')
sys.exit(0)
# Continue to package Model and create image
print('Model accuracy has met the criteria!')
print('Proceeding to package model and create the image...')
print('Updating scoring file with the correct model name')
with open('score.py') as f:
data = f.read()
with open('score_fixed.py', "w") as f:
f.write(data.replace('MODEL-NAME', args.model_name)) #replace the placeholder MODEL-NAME
print('score_fixed.py saved')
# create a Conda dependencies environment file
print("Creating conda dependencies file locally...")
conda_packages = ['numpy']
pip_packages = ['tensorflow==1.12.2', 'keras==2.2.4', 'azureml-sdk', 'azureml-monitoring']
mycondaenv = CondaDependencies.create(conda_packages=conda_packages, pip_packages=pip_packages)
conda_file = 'scoring_dependencies.yml'
with open(conda_file, 'w') as f:
f.write(mycondaenv.serialize_to_string())
# create container image configuration
print("Creating container image configuration...")
image_config = ContainerImage.image_configuration(execution_script = 'score_fixed.py',
runtime = 'python', conda_file = conda_file)
print("Creating image...")
image = Image.create(name=args.image_name, models=[latest_model], image_config=image_config, workspace=ws)
# wait for image creation to finish
image.wait_for_creation(show_output=True)
eval_info["image_id"] = image.id
with open(eval_filepath, "w") as f:
json.dump(eval_info, f)
print('eval_info.json saved')
| 33.960784
| 106
| 0.746151
| 745
| 5,196
| 4.974497
| 0.208054
| 0.109822
| 0.03238
| 0.03238
| 0.281975
| 0.208311
| 0.103346
| 0.051268
| 0.033999
| 0.033999
| 0
| 0.003579
| 0.13953
| 5,196
| 152
| 107
| 34.184211
| 0.825319
| 0.038876
| 0
| 0.117117
| 0
| 0
| 0.240763
| 0.004819
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.108108
| 0
| 0.108108
| 0.279279
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aea32992cb3ebb2d5d93b2c9d53e7b2172d6b5d0
| 2,048
|
py
|
Python
|
tools/instruction.py
|
mzins/MIPS-CPU-SIM
|
0f6723c668266447035c5010c67abdd041324d1a
|
[
"MIT"
] | null | null | null |
tools/instruction.py
|
mzins/MIPS-CPU-SIM
|
0f6723c668266447035c5010c67abdd041324d1a
|
[
"MIT"
] | null | null | null |
tools/instruction.py
|
mzins/MIPS-CPU-SIM
|
0f6723c668266447035c5010c67abdd041324d1a
|
[
"MIT"
] | null | null | null |
from tools import instruction_helpers
from tools.errors import InstructionNotFound
from logs.logconfig import log_config
LOG = log_config()
class Instruction:
binary_instruction = ""
def __init__(self):
inst = raw_input("Please type a single command\n")
self.opcode = None
self.binary_instruction = self.parse_instruction(inst)
def parse_instruction(self, instruction=""):
instruction = instruction.replace(",","")
instruction = instruction.upper()
instruction_parts = instruction.split(" ")
LOG.info('<note> INSTRUCTION UNITS ARE {}'.format(instruction_parts))
self.opcode = instruction_parts[0]
type = instruction_helpers.type_finder(opcode=self.opcode)
if type == "I":
LOG.info('<note> FOUND I TYPE INSTRUCTION')
opcode = instruction_helpers.i_type_look_up.get(instruction_parts[0]).get('OPCODE')
rs = instruction_helpers.to_binary(instruction_parts[2])
rt = instruction_helpers.to_binary(instruction_parts[1])
imm = instruction_helpers.immediate_to_binary(instruction_parts[3])
binary_instruction = "{}{}{}{}".format(opcode, rs, rt, imm)
elif type == "R":
LOG.info('<note> FOUND R TYPE INSTRUCTION')
opcode = instruction_helpers.r_type_look_up.get(instruction_parts[0]).get('OPCODE')
rs = instruction_helpers.to_binary(instruction_parts[2])
rt = instruction_helpers.to_binary(instruction_parts[3])
rd = instruction_helpers.to_binary(instruction_parts[1])
func_code = instruction_helpers.r_type_look_up.get(instruction_parts[0]).get('FUNCTION')
binary_instruction = "{}{}{}{}{}{}".format(opcode,rs, rt, rd, "00000", func_code)
elif type == "J":
LOG.info('<note>FOUND J TYPE INSTRUCTION')
else:
raise InstructionNotFound
LOG.info('<note> BINARY INSTRUCTION {}\n'.format(binary_instruction))
return binary_instruction
| 37.236364
| 100
| 0.657715
| 233
| 2,048
| 5.536481
| 0.274678
| 0.171318
| 0.088372
| 0.111628
| 0.405426
| 0.33876
| 0.287597
| 0.253488
| 0.253488
| 0.253488
| 0
| 0.009464
| 0.226074
| 2,048
| 54
| 101
| 37.925926
| 0.804416
| 0
| 0
| 0.052632
| 0
| 0
| 0.114314
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.078947
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aea38d32840ca9cfc7b545135134ea487ccba8a9
| 19,377
|
py
|
Python
|
projects/views.py
|
msherman64/portal
|
e5399ef2ed3051d7c9a46c660f028c666ae22ca6
|
[
"Apache-2.0"
] | null | null | null |
projects/views.py
|
msherman64/portal
|
e5399ef2ed3051d7c9a46c660f028c666ae22ca6
|
[
"Apache-2.0"
] | null | null | null |
projects/views.py
|
msherman64/portal
|
e5399ef2ed3051d7c9a46c660f028c666ae22ca6
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from chameleon.decorators import terms_required
from django.contrib import messages
from django.http import (
Http404,
HttpResponseForbidden,
HttpResponse,
HttpResponseRedirect,
HttpResponseNotAllowed,
JsonResponse,
)
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django import forms
from datetime import datetime
from django.conf import settings
from .models import Project, ProjectExtras
from projects.serializer import ProjectExtrasJSONSerializer
from django.contrib.auth.models import User
from django.views.decorators.http import require_POST
from .forms import (
ProjectCreateForm,
ProjectAddUserForm,
AllocationCreateForm,
EditNicknameForm,
AddBibtexPublicationForm,
)
from django.db import IntegrityError
import re
import logging
import json
from keystoneclient.v3 import client as ks_client
from keystoneauth1 import adapter
from django.conf import settings
import uuid
import sys
from chameleon.keystone_auth import admin_ks_client, sync_projects, get_user
from util.project_allocation_mapper import ProjectAllocationMapper
logger = logging.getLogger("projects")
def project_pi_or_admin_or_superuser(user, project):
if user.is_superuser:
return True
if user.groups.filter(name="Allocation Admin").count() == 1:
return True
if user.username == project.pi.username:
return True
return False
def project_member_or_admin_or_superuser(user, project, project_user):
if project_pi_or_admin_or_superuser(user, project):
return True
for pu in project_user:
if user.username == pu.username:
return True
return False
@login_required
def user_projects(request):
context = {}
username = request.user.username
mapper = ProjectAllocationMapper(request)
user = mapper.get_user(username)
context["is_pi_eligible"] = user["piEligibility"].lower() == "eligible"
context["username"] = username
context["projects"] = mapper.get_user_projects(username, to_pytas_model=True)
return render(request, "projects/user_projects.html", context)
@login_required
def view_project(request, project_id):
mapper = ProjectAllocationMapper(request)
try:
project = mapper.get_project(project_id)
if project.source != "Chameleon":
raise Http404("The requested project does not exist!")
except Exception as e:
logger.error(e)
raise Http404("The requested project does not exist!")
form = ProjectAddUserForm()
nickname_form = EditNicknameForm()
pubs_form = AddBibtexPublicationForm()
if request.POST and project_pi_or_admin_or_superuser(request.user, project):
form = ProjectAddUserForm()
if "add_user" in request.POST:
form = ProjectAddUserForm(request.POST)
if form.is_valid():
try:
add_username = form.cleaned_data["username"]
if mapper.add_user_to_project(project, add_username):
sync_project_memberships(request, add_username)
messages.success(
request, f'User "{add_username}" added to project!'
)
form = ProjectAddUserForm()
except Exception as e:
logger.exception("Failed adding user")
messages.error(
request,
(
"Unable to add user. Confirm that the username is "
"correct and corresponds to a current Chameleon user."
),
)
else:
messages.error(
request,
(
"There were errors processing your request. "
"Please see below for details."
),
)
elif "del_user" in request.POST:
try:
del_username = request.POST["username"]
# Ensure that it's not possible to remove the PI
if del_username == project.pi.username:
raise PermissionDenied(
"Removing the PI from the project is not allowed."
)
if mapper.remove_user_from_project(project, del_username):
sync_project_memberships(request, del_username)
messages.success(
request, 'User "%s" removed from project' % del_username
)
except PermissionDenied as exc:
messages.error(request, exc)
except:
logger.exception("Failed removing user")
messages.error(
request,
"An unexpected error occurred while attempting "
"to remove this user. Please try again",
)
elif "nickname" in request.POST:
nickname_form = edit_nickname(request, project_id)
users = mapper.get_project_members(project)
if not project_member_or_admin_or_superuser(request.user, project, users):
raise PermissionDenied
for a in project.allocations:
if a.start and isinstance(a.start, str):
a.start = datetime.strptime(a.start, "%Y-%m-%dT%H:%M:%SZ")
if a.dateRequested:
if isinstance(a.dateRequested, str):
a.dateRequested = datetime.strptime(
a.dateRequested, "%Y-%m-%dT%H:%M:%SZ"
)
if a.dateReviewed:
if isinstance(a.dateReviewed, str):
a.dateReviewed = datetime.strptime(a.dateReviewed, "%Y-%m-%dT%H:%M:%SZ")
if a.end:
if isinstance(a.end, str):
a.end = datetime.strptime(a.end, "%Y-%m-%dT%H:%M:%SZ")
user_mashup = []
for u in users:
user = {
"username": u.username,
"role": u.role,
}
try:
portal_user = User.objects.get(username=u.username)
user["email"] = portal_user.email
user["first_name"] = portal_user.first_name
user["last_name"] = portal_user.last_name
except User.DoesNotExist:
logger.info("user: " + u.username + " not found")
user_mashup.append(user)
return render(
request,
"projects/view_project.html",
{
"project": project,
"project_nickname": project.nickname,
"users": user_mashup,
"is_pi": request.user.username == project.pi.username,
"form": form,
"nickname_form": nickname_form,
"pubs_form": pubs_form,
},
)
def set_ks_project_nickname(chargeCode, nickname):
for region in list(settings.OPENSTACK_AUTH_REGIONS.keys()):
ks_admin = admin_ks_client(region=region)
project_list = ks_admin.projects.list(domain=ks_admin.user_domain_id)
project = [
this
for this in project_list
if getattr(this, "charge_code", None) == chargeCode
]
logger.info(
"Assigning nickname {0} to project with charge code {1} at {2}".format(
nickname, chargeCode, region
)
)
if project and project[0]:
project = project[0]
ks_admin.projects.update(project, name=nickname)
logger.info(
"Successfully assigned nickname {0} to project with charge code {1} at {2}".format(
nickname, chargeCode, region
)
)
def sync_project_memberships(request, username):
"""Re-sync a user's Keystone project memberships.
This calls utils.auth.keystone_auth.sync_projects under the hood, which
will dynamically create missing projects as well.
Args:
request (Request): the parent request; used for region detection.
username (str): the username to sync memberships for.
Return:
List[keystone.Project]: a list of Keystone projects the user is a
member of.
"""
mapper = ProjectAllocationMapper(request)
try:
ks_admin = admin_ks_client(request=request)
ks_user = get_user(ks_admin, username)
if not ks_user:
logger.error(
(
"Could not fetch Keystone user for {}, skipping membership syncing".format(
username
)
)
)
return
active_projects = mapper.get_user_projects(
username, alloc_status=["Active"], to_pytas_model=True
)
return sync_projects(ks_admin, ks_user, active_projects)
except Exception as e:
logger.error("Could not sync project memberships for %s: %s", username, e)
return []
@login_required
@terms_required("project-terms")
def create_allocation(request, project_id, allocation_id=-1):
mapper = ProjectAllocationMapper(request)
user = mapper.get_user(request.user.username)
if user["piEligibility"].lower() != "eligible":
messages.error(
request,
"Only PI Eligible users can request allocations. If you would "
"like to request PI Eligibility, please "
'<a href="/user/profile/edit/">submit a PI Eligibility '
"request</a>.",
)
return HttpResponseRedirect(reverse("projects:user_projects"))
project = mapper.get_project(project_id)
allocation = None
allocation_id = int(allocation_id)
if allocation_id > 0:
for a in project.allocations:
if a.id == allocation_id:
allocation = a
# goofiness that we should clean up later; requires data cleansing
abstract = project.description
if "--- Supplemental details ---" in abstract:
additional = abstract.split("\n\n--- Supplemental details ---\n\n")
abstract = additional[0]
additional = additional[1].split("\n\n--- Funding source(s) ---\n\n")
justification = additional[0]
if len(additional) > 1:
funding_source = additional[1]
else:
funding_source = ""
elif allocation:
justification = allocation.justification
if "--- Funding source(s) ---" in justification:
parts = justification.split("\n\n--- Funding source(s) ---\n\n")
justification = parts[0]
funding_source = parts[1]
else:
funding_source = ""
else:
justification = ""
funding_source = ""
if request.POST:
form = AllocationCreateForm(
request.POST,
initial={
"description": abstract,
"supplemental_details": justification,
"funding_source": funding_source,
},
)
if form.is_valid():
allocation = form.cleaned_data.copy()
allocation["computeRequested"] = 20000
# Also update the project
project.description = allocation.pop("description", None)
supplemental_details = allocation.pop("supplemental_details", None)
logger.error(supplemental_details)
funding_source = allocation.pop("funding_source", None)
# if supplemental_details == None:
# raise forms.ValidationError("Justifcation is required")
# This is required
if not supplemental_details:
supplemental_details = "(none)"
logger.error(supplemental_details)
if funding_source:
allocation[
"justification"
] = "%s\n\n--- Funding source(s) ---\n\n%s" % (
supplemental_details,
funding_source,
)
else:
allocation["justification"] = supplemental_details
allocation["projectId"] = project_id
allocation["requestorId"] = mapper.get_portal_user_id(request.user.username)
allocation["resourceId"] = "39"
if allocation_id > 0:
allocation["id"] = allocation_id
try:
logger.info(
"Submitting allocation request for project %s: %s"
% (project.id, allocation)
)
updated_project = mapper.save_project(project.as_dict())
mapper.save_allocation(
allocation, project.chargeCode, request.get_host()
)
messages.success(request, "Your allocation request has been submitted!")
return HttpResponseRedirect(
reverse("projects:view_project", args=[updated_project["id"]])
)
except:
logger.exception("Error creating allocation")
form.add_error(
"__all__", "An unexpected error occurred. Please try again"
)
else:
form.add_error(
"__all__",
"There were errors processing your request. "
"Please see below for details.",
)
else:
form = AllocationCreateForm(
initial={
"description": abstract,
"supplemental_details": justification,
"funding_source": funding_source,
}
)
context = {
"form": form,
"project": project,
"alloc_id": allocation_id,
"alloc": allocation,
}
return render(request, "projects/create_allocation.html", context)
@login_required
@terms_required("project-terms")
def create_project(request):
mapper = ProjectAllocationMapper(request)
form_args = {"request": request}
user = mapper.get_user(request.user.username)
if user["piEligibility"].lower() != "eligible":
messages.error(
request,
"Only PI Eligible users can create new projects. "
"If you would like to request PI Eligibility, please "
'<a href="/user/profile/edit/">submit a PI Eligibility '
"request</a>.",
)
return HttpResponseRedirect(reverse("projects:user_projects"))
if request.POST:
form = ProjectCreateForm(request.POST, **form_args)
if form.is_valid():
# title, description, typeId, fieldId
project = form.cleaned_data.copy()
# let's check that any provided nickname is unique
project["nickname"] = project["nickname"].strip()
nickname_valid = (
project["nickname"]
and ProjectExtras.objects.filter(nickname=project["nickname"]).count()
< 1
and Project.objects.filter(nickname=project["nickname"]).count() < 1
)
if not nickname_valid:
form.add_error("__all__", "Project nickname unavailable")
return render(request, "projects/create_project.html", {"form": form})
project.pop("accept_project_terms", None)
# pi
pi_user_id = mapper.get_portal_user_id(request.user.username)
project["piId"] = pi_user_id
# allocations
allocation = {
"resourceId": 39,
"requestorId": pi_user_id,
"computeRequested": 20000,
}
supplemental_details = project.pop("supplemental_details", None)
funding_source = project.pop("funding_source", None)
# if supplemental_details == None:
# raise forms.ValidationError("Justifcation is required")
if not supplemental_details:
supplemental_details = "(none)"
if funding_source:
allocation[
"justification"
] = "%s\n\n--- Funding source(s) ---\n\n%s" % (
supplemental_details,
funding_source,
)
else:
allocation["justification"] = supplemental_details
project["allocations"] = [allocation]
# startup
project["typeId"] = 2
# source
project["source"] = "Chameleon"
try:
created_project = mapper.save_project(project, request.get_host())
logger.info("newly created project: " + json.dumps(created_project))
messages.success(request, "Your project has been created!")
return HttpResponseRedirect(
reverse("projects:view_project", args=[created_project["id"]])
)
except:
logger.exception("Error creating project")
form.add_error(
"__all__", "An unexpected error occurred. Please try again"
)
else:
form.add_error(
"__all__",
"There were errors processing your request. "
"Please see below for details.",
)
else:
form = ProjectCreateForm(**form_args)
return render(request, "projects/create_project.html", {"form": form})
@login_required
def edit_project(request):
context = {}
return render(request, "projects/edit_project.html", context)
@require_POST
def edit_nickname(request, project_id):
mapper = ProjectAllocationMapper(request)
project = mapper.get_project(project_id)
if not project_pi_or_admin_or_superuser(request.user, project):
messages.error(request, "Only the project PI can update nickname.")
return EditNicknameForm()
form = EditNicknameForm(request.POST)
if form.is_valid(request):
# try to update nickname
try:
nickname = form.cleaned_data["nickname"]
ProjectAllocationMapper.update_project_nickname(project_id, nickname)
form = EditNicknameForm()
set_ks_project_nickname(project.chargeCode, nickname)
messages.success(request, "Update Successful")
except:
messages.error(request, "Nickname not available")
else:
messages.error(request, "Nickname not available")
return form
def get_extras(request):
provided_token = request.GET.get("token") if request.GET.get("token") else None
stored_token = getattr(settings, "PROJECT_EXTRAS_API_TOKEN", None)
if not provided_token or not stored_token or provided_token != stored_token:
logger.error("Project Extras json api Access Token validation failed")
return HttpResponseForbidden()
logger.info("Get all project extras json endpoint requested")
response = {"status": "success"}
try:
serializer = ProjectExtrasJSONSerializer()
response["message"] = ""
extras = json.loads(serializer.serialize(ProjectExtras.objects.all()))
response["result"] = extras
except ProjectExtras.DoesNotExist:
response["message"] = "Does not exist."
response["result"] = None
return JsonResponse(response)
| 35.489011
| 95
| 0.588533
| 1,927
| 19,377
| 5.766476
| 0.16191
| 0.025738
| 0.016199
| 0.009719
| 0.351152
| 0.307865
| 0.266559
| 0.212113
| 0.178906
| 0.145608
| 0
| 0.003573
| 0.321206
| 19,377
| 545
| 96
| 35.554128
| 0.841253
| 0.045311
| 0
| 0.344519
| 0
| 0
| 0.172432
| 0.018545
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024609
| false
| 0
| 0.058166
| 0
| 0.136465
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aea75e1eb83214dedb643e0e6cd5c7568f6bf1b4
| 2,193
|
py
|
Python
|
listeners/pipe.py
|
ggilestro/majordomo
|
d111c1dd1a4c4b8d2cdaa9651b51ece60a1b648d
|
[
"MIT"
] | null | null | null |
listeners/pipe.py
|
ggilestro/majordomo
|
d111c1dd1a4c4b8d2cdaa9651b51ece60a1b648d
|
[
"MIT"
] | null | null | null |
listeners/pipe.py
|
ggilestro/majordomo
|
d111c1dd1a4c4b8d2cdaa9651b51ece60a1b648d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# pipe.py
#
# Copyright 2014 Giorgio Gilestro <gg@kozak>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Listen from pipefile
# e.g.: echo "TEST COMMAND" > /tmp/pipefile
import os, tempfile
import logging
import threading
class pipe():
def __init__(self, pipefile, queue, actions):
"""
Reads from a pipe
"""
self.pipefile = pipefile
self.queue = queue
actions["pipe"] = {}
self.__makefifo()
self.listening_thread = threading.Thread(target=self.listen_from_pipe)
#self.listening_thread.daemon = True
self.isListening = True
self.listening_thread.start()
def transmit(self, received):
"""
"""
cmd = ("pipe", received)
self.queue.put(cmd)
def __makefifo(self):
"""
"""
try:
os.mkfifo(self.pipefile)
logging.debug("Listening to FIFO Pipe at %s" % self.pipefile)
return True
except:
logging.debug("Error creating FIFO Pipe %s. File already existing?" % self.pipefile)
return False
def listen_from_pipe(self):
"""
"""
while self.isListening:
logging.debug("Listening from PIPE %s" % self.pipefile)
with open(self.pipefile) as fifo:
self.transmit(fifo.read().strip())
if __name__ == '__main__':
p = pipe("pipefile", "none")
| 28.480519
| 96
| 0.617419
| 271
| 2,193
| 4.911439
| 0.523985
| 0.06311
| 0.029301
| 0.042825
| 0.061608
| 0.042074
| 0
| 0
| 0
| 0
| 0
| 0.011465
| 0.284086
| 2,193
| 76
| 97
| 28.855263
| 0.836306
| 0.430917
| 0
| 0
| 0
| 0
| 0.109229
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.1
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aea97cde74c5b76516a1287e08c111bed632fb99
| 1,608
|
py
|
Python
|
scripts/android-patch.py
|
rasaha91/react-native-macos
|
5da5441c1d98596683590bc076541560db61ff82
|
[
"CC-BY-4.0",
"MIT"
] | 2,114
|
2020-05-06T10:05:45.000Z
|
2022-03-31T23:19:28.000Z
|
scripts/android-patch.py
|
rasaha91/react-native-macos
|
5da5441c1d98596683590bc076541560db61ff82
|
[
"CC-BY-4.0",
"MIT"
] | 623
|
2020-05-05T21:24:26.000Z
|
2022-03-30T21:00:31.000Z
|
scripts/android-patch.py
|
rasaha91/react-native-macos
|
5da5441c1d98596683590bc076541560db61ff82
|
[
"CC-BY-4.0",
"MIT"
] | 85
|
2020-05-05T23:09:40.000Z
|
2022-03-29T10:12:42.000Z
|
import os
import sys
# A Python script that can be used to determine which files that require
# patching have been touched between two points in the repo.
def shell(command):
stream = os.popen(command)
result = stream.read()
stream.close()
return result
def get_patches():
patches = {}
for file in shell('find android-patches/patches -type f').splitlines():
slash_indices = [i for (i, c) in enumerate(file) if c == '/']
if len(slash_indices) < 3:
continue
patch_name = file[slash_indices[1]+1:slash_indices[2]]
filename = file[slash_indices[2]+1:]
if patch_name not in patches:
patches[patch_name] = []
patches[patch_name].append(filename)
return patches
def get_touched_files(branch_from, branch_to):
files = []
command = 'git diff --name-status {0} {1}'.format(branch_from, branch_to)
for line in shell(command).splitlines():
files.append(line.split('\t')[-1])
return files
if __name__ == '__main__':
if len(sys.argv) != 3:
sys.stderr.write('Usage: android-patch.py <commit> <commit>')
sys.exit(1)
patches = get_patches()
touched_files = set(get_touched_files(sys.argv[1], sys.argv[2]))
for patch_name in sorted(patches.keys()):
patched_and_touched = [file for file in patches[patch_name] \
if file in touched_files]
if len(patched_and_touched) > 0:
print('\033[4m{0}\033[0m'.format(patch_name))
for file in patched_and_touched:
print('* {0}'.format(file))
| 34.212766
| 77
| 0.625622
| 222
| 1,608
| 4.36036
| 0.387387
| 0.065083
| 0.027893
| 0.03719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019917
| 0.250622
| 1,608
| 46
| 78
| 34.956522
| 0.783402
| 0.080224
| 0
| 0
| 0
| 0
| 0.094851
| 0.015583
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.052632
| 0
| 0.210526
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeaabc6f5b0d5b7cf3ded3141d91ff7d3817bb49
| 5,624
|
py
|
Python
|
src/example_publish_pypi_medium/asm-enforce-ready-signatures.py
|
lrhazi/example-publish-pypi
|
135bc75b0e37225b1879cb79d644f709977f1f3d
|
[
"MIT"
] | null | null | null |
src/example_publish_pypi_medium/asm-enforce-ready-signatures.py
|
lrhazi/example-publish-pypi
|
135bc75b0e37225b1879cb79d644f709977f1f3d
|
[
"MIT"
] | null | null | null |
src/example_publish_pypi_medium/asm-enforce-ready-signatures.py
|
lrhazi/example-publish-pypi
|
135bc75b0e37225b1879cb79d644f709977f1f3d
|
[
"MIT"
] | null | null | null |
import json
from docopt import docopt
from bigip_utils.logger import logger
from bigip_utils.bigip import *
#
# This script enforces all attack signatures that are ready to be enforced:
# https://support.f5.com/csp/article/K60640453?utm_source=f5support&utm_medium=RSS
#
__doc__ = """
Usage:
enforce-ready-signatures.py [-hvndsb] [-p POLICY_NAME] -l LIST_FILE
Options:
-h --help Show this screen.
-v --version Show version.
-n --dry-run Show actions. Do not execute them.
-s --sync Sync devices after changes.
-b --backup-config Create and download a UCS file.
-d --dev-devices-only Skip non DEV devices.
-l LIST_FILE --list-file=LIST_FILE CSV file with list of bigips. Format: hostname,ip,username,password
-p POLICY_NAME --policy-name=POLICY_NAME Name of a policy to act on. [default: all]
"""
VERSION = "0.2"
def enforce_ready_signatures(bigip, id):
params = {
'$select': '',
'$filter': 'hasSuggestions eq false AND wasUpdatedWithinEnforcementReadinessPeriod eq false and performStaging eq true',
}
data = {'performStaging': 'false'}
url_base_asm = f'https://{bigip.ip}/mgmt/tm/asm/policies/{id}/signatures'
json_data = bigip.patch(url_base_asm, params=params, data=json.dumps(data))
count = int(json_data.get('totalItems', 0))
return count
def get_ready_signatures_count(bigip, id):
params = {
'$select': '',
'$filter': 'hasSuggestions eq false AND wasUpdatedWithinEnforcementReadinessPeriod eq false and performStaging eq true',
'$top': '1',
}
url_base_asm = f'https://{bigip.ip}/mgmt/tm/asm/policies/{id}/signatures'
json_data = bigip.get(url_base_asm, params=params)
# for d in json_data['items']:
# results[d['signatureReference']['name']] = d['signatureReference']['signatureId']
count = int(json_data.get('totalPages', 0))
return count
def process_device(bigip, dry_run=True, policy=None, sync_device_group=None):
policies_virtuals = get_virtuals_asm_policies(bigip)
policies=bigip.get_asm_policies()
enforced_signatures_count = 0
ready_signatures = {}
for i in policies:
if(i['type'] == 'parent'):
continue
policy_id = i['id']
policy_name = i['name']
policy_virtuals = policies_virtuals[policy_name]
if not policy == 'all' and not policy == policy_name:
continue
if(i['enforcementMode'] == 'blocking'):
ready_signatures[policy_name] = get_ready_signatures_count(
bigip, policy_id)
if ready_signatures[policy_name] and dry_run:
logger.info(
f"{bigip.hostname}: [DRY-RUN] : {policy_name}: Enforcing {ready_signatures[policy_name]} ready attack signatures. VIPs={len(policy_virtuals)}")
elif ready_signatures[policy_name]:
logger.info(
f"{bigip.hostname}: {policy_name}: Enforcing {ready_signatures[policy_name]} ready attack signatures. VIPs={len(policy_virtuals)}")
count = enforce_ready_signatures(bigip, policy_id)
if count:
r = apply_asm_policy(bigip, policy_id)
if not r:
logger.error(
f"{bigip.hostname}: Applying policy {policy_name} did not complete successfully.")
enforced_signatures_count += count
if enforced_signatures_count and sync_device_group:
logger.info(f"{bigip.hostname}: Syncing device group.")
sync_devices(bigip, device_group=sync_device_group)
return enforced_signatures_count
if __name__ == "__main__":
arguments = docopt(__doc__, version=VERSION)
devices_file = arguments['--list-file']
dry_run = arguments['--dry-run']
dev_only = arguments['--dev-devices-only']
policy_name = arguments['--policy-name']
sync = arguments['--sync']
backup_config = arguments['--backup-config']
for (hostname, ip, username, password) in get_bigips(devices_file, dev_only=dev_only):
b = BigIP(hostname, username, password, ip=ip, verify_ssl=False)
logger.info(
f"{b.hostname}: Started. Policy: {policy_name} Dry-Run: {dry_run}")
proceed = True
check_active(b)
device_group = get_asm_sync_group(b)
if not device_group and not check_standalone(b):
logger.error(
f"{b.hostname}: Could not find ASM device group name. {device_group}")
proceed = False
elif device_group:
logger.info(f"{b.hostname}: Sync Device Group: {device_group}")
if (not b.token):
logger.warning(
f'{b.hostname}: Unable to obtain authentication token')
proceed = False
if not check_active(b):
logger.warning(f'{b.hostname}: Not active, skipping device.')
proceed = False
enforced_signatures_count = 0
get_ucs(b,overwrite=True)
if proceed:
if backup_config and not dry_run:
get_ucs(b,overwrite=True)
enforced_signatures_count = process_device(
b, dry_run=dry_run, policy=policy_name, sync_device_group=device_group)
logger.info(
f"{b.hostname}: Finished. enforced signatures count: {enforced_signatures_count}")
logger.info("Done.")
| 43.9375
| 163
| 0.618243
| 667
| 5,624
| 5.001499
| 0.253373
| 0.056954
| 0.055156
| 0.03747
| 0.290468
| 0.181655
| 0.181655
| 0.16307
| 0.16307
| 0.16307
| 0
| 0.004168
| 0.274716
| 5,624
| 127
| 164
| 44.283465
| 0.81368
| 0.048186
| 0
| 0.229358
| 0
| 0.036697
| 0.380471
| 0.061541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027523
| false
| 0.027523
| 0.036697
| 0
| 0.091743
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeabb21d13fb47406449b6804d29f4983877b33d
| 13,358
|
py
|
Python
|
farb/sysinstall.py
|
samskivert/farbot
|
d88f16dcbd23d7ca3b7fdcf341c9346c0ab21bb8
|
[
"BSD-3-Clause"
] | null | null | null |
farb/sysinstall.py
|
samskivert/farbot
|
d88f16dcbd23d7ca3b7fdcf341c9346c0ab21bb8
|
[
"BSD-3-Clause"
] | null | null | null |
farb/sysinstall.py
|
samskivert/farbot
|
d88f16dcbd23d7ca3b7fdcf341c9346c0ab21bb8
|
[
"BSD-3-Clause"
] | null | null | null |
# sysinstall.py vi:ts=4:sw=4:expandtab:
#
# Copyright (c) 2006-2008 Three Rings Design, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright owner nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import copy
import os
import string
import farb
class ConfigSection(object):
"""
Abstract class implementing re-usable functions for install.cfg(8)
configuration sections.
"""
def _serializeOptions(self, output):
"""
Serialize all install.cfg options for this section
and the to an output file.
Concrete subclasses MUST provide a sectionOptions list as a class
attribute. This list must contain all valid install.cfg options for
the section, in the order required by sysinstall(8).
Given the sectionOptions list, this implementation will introspect
'self' for attributes with names that match the sectionOptions.
Any available attributes will be used, and any missing attributes
will be ignored.
@param output: Open, writable file handle
"""
for option in self.sectionOptions:
if hasattr(self, option):
output.write('%s=%s\n' % (option, getattr(self, option)))
def _serializeCommands(self, output, commands=None):
"""
Write out all commands listed in the sectionCommands class
attribute.
@param output: Open, writable file handle
@param commands: Commands to output. Defaults to sectionCommands.
"""
if (not commands):
commands = self.sectionCommands
for command in commands:
output.write('%s\n' % (command))
class NetworkConfig(ConfigSection):
"""
install.cfg(8) network configuration section.
"""
# Section option names
sectionOptions = (
'hostname', # New Server's Host Name
'domainname', # New Server's Domain Name
'netDev', # Network Interface
'nfs', # NFS Installation Media
'tryDHCP' # DHCP an address
)
# Default option values
tryDHCP = 'YES'
# Section commands
sectionCommands = (
'mediaSetNFS',
)
def __init__(self, section, config):
"""
Initialize network configuration for a given
installation.
@param section: ZConfig Installation section
@param config: ZConfig Farbot Config
"""
# Install-specific Options
self.hostname = section.hostname
self.domainname = section.domain
self.netDev = section.networkdevice
# FarBot-wide Options
self.nfshost = config.Releases.nfshost
self.nfspath = os.path.join(config.Releases.installroot, section.release.lower())
self.nfs = self.nfshost + ':' + self.nfspath
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class DistSetConfig(ConfigSection):
"""
install.cfg(8) distribution set configuration section.
"""
# Section option names
sectionOptions = (
'dists', # Install these distribution sets
)
# Section commands
sectionCommands = (
'distSetCustom',
)
def __init__(self, release, config):
"""
Initialize distribution set configuration for a given
installation.
@param release: ZConfig Release section
@param config: ZConfig Farbot Config
"""
# Flatten lists of dists, source dists, and kernel dists, inserting the
# sub lists after src or kernels. Not sure if it really necessary to have
# those sub lists in that exact location, but let's be safe.
self.dists = copy.copy(release.dists)
if self.dists.count('src') > 0:
self.dists.insert(self.dists.index('src') + 1, string.join(release.sourcedists))
if self.dists.count('kernels') > 0:
self.dists.insert(self.dists.index('kernels') + 1, string.join(release.kerneldists))
self.dists = string.join(self.dists)
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class DiskLabelConfig(ConfigSection):
"""
install.cfg(8) FreeBSD labels (partition) configuration section.
"""
# Section option names are generated
# Section commands
sectionCommands = (
'diskLabelEditor',
)
def __init__(self, section, diskDevice):
"""
Initialize a disk label configuration for a given
partition map and device.
@param section: ZConfig PartitionMap section
@param diskDevice: Device to label (eg ad0s1)
"""
# Section option names are generated
self.sectionOptions = []
self.diskDevice = diskDevice
# Grab our partition map
for part in section.Partition:
# Build device + slice + partition number, and append it to
# sectionOptions
slice = self.diskDevice + '-' + part.getSectionName()
self.sectionOptions.append(slice)
# Partition settings
if (part.softupdates):
setattr(self, slice, "%s %d %s 1" % (part.type, part.size, part.mount))
else:
setattr(self, slice, "%s %d %s" % (part.type, part.size, part.mount))
# Ensure that partitions are in order (1 ... 9)
self.sectionOptions.sort()
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class DiskPartitionConfig(ConfigSection):
"""
install.cfg(8) BIOS partition configuration section.
"""
# Section option names
sectionOptions = (
'disk', # Disk to partition
'partition', # Partitioning method
'bootManager', # Boot manage to install
)
# We hardcode the use of the entire disk
partition = 'all'
# Hardcode the use of the boot manager, too
bootManager = 'standard'
# Section commands
sectionCommands = (
'diskPartitionEditor',
)
def __init__(self, section, config):
"""
Initialize a disk partition configuration for a given
disk section.
@param section: ZConfig Disk section
@param config: ZConfig Farbot Config
"""
self.disk = section.getSectionName()
# Grab our partition map
# If it doesn't exist, complain loudly
self.diskLabelConfig = None
for map in config.Partitions.PartitionMap:
if (section.partitionmap.lower() == map.getSectionName()):
# Set up the disk labels. Always s1!
self.diskLabelConfig = DiskLabelConfig(map, self.disk + 's1')
break
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
self.diskLabelConfig.serialize(output)
class SystemCommandConfig(ConfigSection):
"""
install.cfg(8) system command configuration section.
"""
# Section option names
sectionOptions = (
'command', # Command name and arguments
)
# Section commands
sectionCommands = (
'system',
)
def __init__(self, cmd):
"""
Initialize system command configuration for a given
installation.
@param section: ZConfig command key value
"""
# Build command + options
self.cmd = cmd
setattr(self, 'command', "%s" % (cmd))
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class PackageConfig(SystemCommandConfig):
"""
install.cfg(8) package install configuration section.
Sysinstall's dependency handling is seriously broken,
relying on an INDEX that doesn't necessarily reflect reality.
We skip the sysinstall package installation code entirely and
use a SystemCommand to call pkg_add(8) ourselves post-install.
"""
installPackageScript = os.path.join('/dist', os.path.basename(farb.INSTALL_PACKAGE_SH))
def __init__(self, section):
"""
Initialize package install configuration for a given
installation.
@param section: ZConfig Package section
"""
# /dist/install_package.sh <package name>
self.package = section.package
cmd = "%s %s" % (self.installPackageScript, self.package)
super(PackageConfig, self).__init__(cmd)
class InstallationConfig(ConfigSection):
"""
InstallationConfig instances represent a
complete install.cfg file for sysinstall(8)
"""
# Section option names
sectionOptions = (
'debug',
'nonInteractive',
'noWarn'
)
# Defaults
debug = 'YES'
nonInteractive = 'YES'
noWarn = 'YES'
# Commands needed to start up the interactive partitioner
interactivePartitionCommands = (
'diskInteractive="YES"', # Partition and label disks interactively
'diskPartitionEditor', # Run disk partition (MBR) editor
'diskLabelEditor' # Run disk label editor
)
# Pre-package commands
prePackageCommands = (
'diskLabelCommit', # Write disk labels to disk
'installCommit' # Write install distribution to disk
)
# Section commands
sectionCommands = (
'shutdown',
)
def __init__(self, section, config):
"""
Initialize a new installation configuration.
@param section: ZConfig Installation section
@param config: ZConfig Farbot Config
"""
self.name = section.getSectionName()
# Network configuration
self.networkConfig = NetworkConfig(section, config)
# Distribution sets
for release in config.Releases.Release:
if release.getSectionName() == section.release.lower():
self.distSetConfig = DistSetConfig(release, config)
break
# Disks (Partitions and Labels)
self.diskPartitionConfigs = []
for disk in section.Disk:
diskPartitionConfig = DiskPartitionConfig(disk, config)
self.diskPartitionConfigs.append(diskPartitionConfig)
# Packages
self.packageConfigs = []
for psetName in section.packageset:
foundPset = False
for pset in config.PackageSets.PackageSet:
if (psetName.lower() == pset.getSectionName()):
foundPset = True
break
for package in pset.Package:
pkgc = PackageConfig(package)
self.packageConfigs.append(pkgc)
# System Commands
self.systemCommandConfigs = []
if (section.PostInstall):
for cmd in section.PostInstall.command:
systemCommandConfig = SystemCommandConfig(cmd)
self.systemCommandConfigs.append(systemCommandConfig)
def serialize(self, output):
# Global configuration options
self._serializeOptions(output)
# Network configuration
self.networkConfig.serialize(output)
# Select distribution sets
self.distSetConfig.serialize(output)
# Disk formatting
for disk in self.diskPartitionConfigs:
disk.serialize(output)
# If we have no diskPartitionConfigs, partition interactively
if len(self.diskPartitionConfigs) == 0:
self._serializeCommands(output, commands=self.interactivePartitionCommands)
# Commit installation to disk
self._serializeCommands(output, commands=self.prePackageCommands)
# Packages
for pkgc in self.packageConfigs:
pkgc.serialize(output)
# System Commands
for scc in self.systemCommandConfigs:
scc.serialize(output)
# Global commands
self._serializeCommands(output)
| 33.562814
| 96
| 0.643584
| 1,382
| 13,358
| 6.183068
| 0.2822
| 0.012873
| 0.025278
| 0.015448
| 0.200819
| 0.176712
| 0.114921
| 0.094441
| 0.078057
| 0.078057
| 0
| 0.003629
| 0.277961
| 13,358
| 397
| 97
| 33.647355
| 0.882322
| 0.423566
| 0
| 0.213873
| 0
| 0
| 0.049159
| 0.002992
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086705
| false
| 0
| 0.023121
| 0
| 0.271676
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeb2c65fe32f5a74d10e3aa724b39a7ca6e18fa4
| 8,153
|
py
|
Python
|
wpscan_out_parse/parser/_cli_parser.py
|
clivewalkden/wpscan_out_parse
|
dff46aa2e98390afc79b7bb622eb4c01d066fbb5
|
[
"MIT"
] | 1
|
2021-06-24T08:35:15.000Z
|
2021-06-24T08:35:15.000Z
|
wpscan_out_parse/parser/_cli_parser.py
|
clivewalkden/wpscan_out_parse
|
dff46aa2e98390afc79b7bb622eb4c01d066fbb5
|
[
"MIT"
] | null | null | null |
wpscan_out_parse/parser/_cli_parser.py
|
clivewalkden/wpscan_out_parse
|
dff46aa2e98390afc79b7bb622eb4c01d066fbb5
|
[
"MIT"
] | null | null | null |
import re
from typing import Any, Dict, Sequence, List, Optional, Tuple
from .base import Parser
from .components import InterestingFinding
from .results import WPScanResults
#################### CLI PARSER ######################
class WPScanCliParser(Parser):
"""Main interface to parse WPScan CLI output.
- wpscan_output: WPScan output as string.
- false_positives_strings: List of false positive strings.
"""
def __init__(self, wpscan_output:str,
false_positives_strings:Optional[Sequence[str]]=None) -> None:
if not wpscan_output:
wpscan_output = ""
# Parser config: false positives string and verbosity (not available with cli parser)
parser_config = dict(
false_positives_strings=false_positives_strings, show_all_details=False
)
super().__init__({'output':wpscan_output}, **parser_config)
self._infos, self._warnings, self._alerts = self.parse_cli(wpscan_output)
def get_infos(self) -> Sequence[str]:
""" Return all the parsed infos"""
return self._infos
def get_warnings(self) -> Sequence[str]:
""" Return all the parsed warnings"""
return self._warnings
def get_alerts(self)-> Sequence[str]:
""" Return all the parsed alerts"""
return self._alerts
def _parse_cli_toogle(self, line:str, warning_on:bool, alert_on:bool) -> Tuple[bool, bool]:
# Color parsing
if "33m[!]" in line:
warning_on = True
elif "31m[!]" in line:
alert_on = True
# No color parsing Warnings string are hard coded here
elif "[!]" in line and any(
[
m in line
for m in [
"The version is out of date",
"No WPVulnDB API Token given",
"You can get a free API token",
]
]
):
warning_on = True
elif "[!]" in line:
alert_on = True
# Both method with color and no color apply supplementary proccessing
# Warning for insecure Wordpress and based on interesting findings strings
if any(
string in line
for string in ["Insecure", "Outdated"]
+ InterestingFinding.INTERESTING_FINDING_WARNING_STRINGS
):
warning_on = True
# Trigger alert based on interesting finding alert strings
if any(
string in line
for string in InterestingFinding.INTERESTING_FINDING_ALERT_STRINGS
):
alert_on = True
# Lower voice of Vulnerabilities found but not plugin version
if "The version could not be determined" in line and alert_on:
alert_on = False
warning_on = True
return (warning_on, alert_on)
def _ignore_false_positives(self, infos:List[str], warnings:List[str], alerts:List[str]) -> Tuple[List[str], List[str], List[str]]:
"""Process false positives"""
for alert in warnings + alerts:
if self.is_false_positive(alert):
try:
alerts.remove(alert)
except ValueError:
warnings.remove(alert)
infos.append("[False positive]\n{}".format(alert))
return infos, warnings, alerts
def parse_cli(self, wpscan_output:str) -> Tuple[List[str], List[str], List[str]]:
"""Parse the ( messages, warnings, alerts ) from WPScan CLI output string.
Return results as tuple( messages, warnings, alerts )."""
# Init scan messages
(messages, warnings, alerts) = ([], [], [])
# Init messages toogles
warning_on, alert_on = False, False
message_lines = []
current_message = ""
# Every blank ("") line will be considered as a message separator
for line in wpscan_output.splitlines() + [""]:
# Parse all output lines and build infos, warnings and alerts
line = line.strip()
# Parse line
warning_on, alert_on = self._parse_cli_toogle(line, warning_on, alert_on)
# Remove colorization anyway after parsing
line = re.sub(r"(\x1b|\[[0-9][0-9]?m)", "", line)
# Append line to message. Handle the begin of the message case
message_lines.append(line)
# Build message
current_message = "\n".join(
[m for m in message_lines if m not in ["", "|"]]
).strip()
# Message separator just a white line.
# Only if the message if not empty.
if line.strip() not in [""] or current_message.strip() == "":
continue
# End of the message
# Post process message to separate ALERTS into different messages of same status and add rest of the infos to warnings
if (alert_on or warning_on) and any(
s in current_message
for s in ["vulnerabilities identified", "vulnerability identified"]
):
messages_separated = []
msg: List[str] = []
for l in message_lines + ["|"]:
if l.strip() == "|":
messages_separated.append(
"\n".join([m for m in msg if m not in ["", "|"]])
)
msg = []
msg.append(l)
# Append Vulnerabilities messages to ALERTS and other infos in one message
vulnerabilities = [
m for m in messages_separated if "| [!] Title" in m.splitlines()[0]
]
# Add the plugin infos to warnings or false positive if every vulnerabilities are ignore
plugin_infos = "\n".join(
[
m
for m in messages_separated
if "| [!] Title" not in m.splitlines()[0]
]
)
if (
len([v for v in vulnerabilities if not self.is_false_positive(v)])
> 0
and "The version could not be determined" in plugin_infos
):
warnings.append(
plugin_infos + "\nAll known vulnerabilities are listed"
)
else:
messages.append(plugin_infos)
if alert_on:
alerts.extend(vulnerabilities)
elif warning_on:
warnings.extend(vulnerabilities)
elif warning_on:
warnings.append(current_message)
else:
messages.append(current_message)
message_lines = []
current_message = ""
# Reset Toogle Warning/Alert
warning_on, alert_on = False, False
return self._ignore_false_positives(messages, warnings, alerts)
def get_error(self) -> Optional[str]:
if "Scan Aborted" in self.data.get('output', ''):
return "WPScan failed: {}".format(
"\n".join(
line for line in self.data.get('output', '').splitlines() if "Scan Aborted" in line
)
)
else:
return None
def get_results(self) -> WPScanResults:
"""
Returns a dictionnary structure like
::
{
'infos':[],
'warnings':[],
'alerts':[],
'summary':{
'table':None,
'line':'WPScan result summary: alerts={}, warnings={}, infos={}, error={}'
},
'error':None
}
"""
results = WPScanResults()
results["infos"] = self.get_infos()
results["warnings"] = self.get_warnings()
results["alerts"] = self.get_alerts()
results["summary"]["line"] = self.get_summary_line()
results["error"] = self.get_error()
return results
| 36.725225
| 135
| 0.531461
| 855
| 8,153
| 4.933333
| 0.218713
| 0.027738
| 0.012802
| 0.018966
| 0.163585
| 0.122807
| 0.083689
| 0.045045
| 0.016596
| 0
| 0
| 0.002346
| 0.372501
| 8,153
| 221
| 136
| 36.891403
| 0.822127
| 0.214277
| 0
| 0.192857
| 0
| 0
| 0.071858
| 0.003414
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064286
| false
| 0
| 0.035714
| 0
| 0.171429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeb3a40599bade079c5b02f2bf2cc33038b720fa
| 3,220
|
py
|
Python
|
tests/animation/generator/test_animation.py
|
OrangeUtan/MCMetagen
|
0293ea14bf1c6b1bae58741f9876ba662930b43d
|
[
"MIT"
] | null | null | null |
tests/animation/generator/test_animation.py
|
OrangeUtan/MCMetagen
|
0293ea14bf1c6b1bae58741f9876ba662930b43d
|
[
"MIT"
] | null | null | null |
tests/animation/generator/test_animation.py
|
OrangeUtan/MCMetagen
|
0293ea14bf1c6b1bae58741f9876ba662930b43d
|
[
"MIT"
] | null | null | null |
import pytest
from mcanitexgen.animation.generator import Animation, GeneratorError
def frame(index: int, time: int):
return {"index": index, "time": time}
class Test_append:
def test(self):
anim1 = Animation(0, 10, [frame(0, 10)])
anim2 = Animation(10, 20, [frame(0, 10)])
anim1.append(anim2)
assert anim1 == Animation(0, 20, [frame(0, 10), frame(0, 10)])
@pytest.mark.parametrize(
"anim1, anim2, result",
[
(
Animation(0, 10, [frame(0, 10)]),
Animation(11, 20, [frame(0, 9)]),
Animation(0, 20, [frame(0, 11), frame(0, 9)]),
),
(
Animation(0, 10, [frame(0, 10)]),
Animation(30, 40, [frame(0, 10)]),
Animation(0, 40, [frame(0, 30), frame(0, 10)]),
),
],
)
def test_fill_time_gap_between_animations(
self, anim1: Animation, anim2: Animation, result: Animation
):
anim1.append(anim2)
assert anim1 == result
def test_time_ranges_overlap(self):
anim1 = Animation(0, 10, [frame(0, 10)])
anim2 = Animation(5, 15, [frame(0, 10)])
with pytest.raises(GeneratorError, match=".*starts before the other.*"):
anim1.append(anim2)
class Test_add_frame:
@pytest.mark.parametrize(
"anim, index, start, end, result",
[
(Animation(0, 0), 0, 0, 10, Animation(0, 10, [frame(0, 10)])),
],
)
def test(self, anim: Animation, index, start, end, result):
anim.add_frame(index, start, end)
assert anim == result
@pytest.mark.parametrize(
"anim, index, start, end, result",
[
(Animation(0, 0), 0, 10, 25, Animation(10, 25, [frame(0, 15)])),
(Animation(10, 10), 0, 20, 30, Animation(20, 30, [frame(0, 10)])),
],
)
def test_add_frame_with_start_to_empty_animation(
self, anim: Animation, index, start, end, result
):
anim.add_frame(index, start, end)
assert anim == result
@pytest.mark.parametrize(
"anim, index, start, end, result",
[
(
Animation(0, 10, [frame(0, 10)]),
0,
20,
30,
Animation(0, 30, [frame(0, 20), frame(0, 10)]),
),
(
Animation(20, 40, [frame(0, 5), frame(0, 5)]),
0,
60,
70,
Animation(20, 70, [frame(0, 5), frame(0, 25), frame(0, 10)]),
),
],
)
def test_fill_time_gap(self, anim: Animation, index, start, end, result):
anim.add_frame(index, start, end)
assert anim == result
@pytest.mark.parametrize(
"start, end",
[
(0, 0),
(10, 10),
(12, 11),
(-4, -5),
(-6, -5),
],
)
def test_invalid_start_and_end(self, start, end):
anim = Animation(0, 0)
with pytest.raises(
GeneratorError, match=f"Illegal start and end for frame: '{start}' '{end}'"
):
anim.add_frame(0, start, end)
| 29.009009
| 87
| 0.491304
| 372
| 3,220
| 4.174731
| 0.166667
| 0.100451
| 0.07727
| 0.040567
| 0.596909
| 0.430779
| 0.405666
| 0.358017
| 0.324533
| 0.324533
| 0
| 0.094823
| 0.358075
| 3,220
| 110
| 88
| 29.272727
| 0.656507
| 0
| 0
| 0.378947
| 0
| 0
| 0.064907
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.084211
| false
| 0
| 0.021053
| 0.010526
| 0.136842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeb447aba93643942a7e8b9f82a86a22f927bf04
| 2,214
|
py
|
Python
|
bin/mircx_polsplit.py
|
jdmonnier/mircx_mystic
|
45bf8491117674157b39f49cfe0c92c5ec6da500
|
[
"MIT"
] | 1
|
2022-01-13T19:32:51.000Z
|
2022-01-13T19:32:51.000Z
|
bin/mircx_polsplit.py
|
jdmonnier/mircx_mystic
|
45bf8491117674157b39f49cfe0c92c5ec6da500
|
[
"MIT"
] | null | null | null |
bin/mircx_polsplit.py
|
jdmonnier/mircx_mystic
|
45bf8491117674157b39f49cfe0c92c5ec6da500
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
"""
This is a "quick and dirty" solution to getting polarization data through the pipeline.
This script creates new fits files with independent polarization states.
Make sure you have plenty of diskspace.
"""
from __future__ import print_function
import argparse
import os
from time import sleep
from astropy.io import fits
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Process MIRC-X raw data files')
parser.add_argument("--no-warn", action="store_true")
parser.add_argument("--crop-bad", action="store_true")
parser.add_argument("files", nargs="+", help="File(s) to process")
args = parser.parse_args()
if not args.no_warn:
print("Warning: Make sure you have plenty of disk space; this is going to hurt.")
print("(Hint: ^C while you still can! Sleeping 10 seconds for your benefit.)")
sleep(10)
for dir in ["pol1", "pol2"]:
try:
os.mkdir(dir)
except FileExistsError:
if os.path.isdir(dir):
print("Warning: directory `" + dir + "` already exists")
else:
raise FileExistsError("Looks like you have a file named `" + dir + "`; please remove it.")
def polstate(file, state):
f = fits.open(file)
f[0].header["POLSTATE"] = state
f[0].header["CONF_NA"] = "H_PRISM50" # TEMPORARY FIX
rows = f[0].header["CROPROWS"].split(",")
if len(rows) != 2:
raise ValueError("There must be exactly 2 detector regions. Is this a polarization data file?")
span = 1 - eval(rows[0]) # 50-50 chance it should be rows[1]
if state == 1:
f[0].data = f[0].data[:,:,:span,:]
elif state == 2:
if args.crop_bad:
f[0].data = f[0].data[:,:,span:-2,:]
else:
f[0].data = f[0].data[:,:,span:,:]
else:
raise ValueError("`state` (2nd arg of fcn `polstate`) must have the value either 1 or 2")
path = "pol" + str(state) + "/" + file
f.writeto(path)
f.close()
os.system("fpack " + path)
os.remove(path)
for file in tqdm(args.files):
fz = file[-3:] == ".fz"
if fz:
os.system("funpack " + file)
file = file[:-3]
polstate(file, 1)
polstate(file, 2)
if fz:
os.remove(file)
| 32.558824
| 103
| 0.62421
| 325
| 2,214
| 4.206154
| 0.470769
| 0.013168
| 0.026335
| 0.015362
| 0.115582
| 0.115582
| 0.035113
| 0
| 0
| 0
| 0
| 0.02109
| 0.228997
| 2,214
| 67
| 104
| 33.044776
| 0.779731
| 0.121951
| 0
| 0.092593
| 0
| 0
| 0.274043
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.111111
| 0
| 0.12963
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeb5bac5587511509d5243a09d8d7dc4620f3a3a
| 3,233
|
py
|
Python
|
mars/dataframe/datasource/core.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | 2
|
2019-03-29T04:11:10.000Z
|
2020-07-08T10:19:54.000Z
|
mars/dataframe/datasource/core.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | null | null | null |
mars/dataframe/datasource/core.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...context import get_context
from ...serialize import Int64Field, KeyField
from ...tiles import TilesError
from ..operands import DataFrameOperand, DataFrameOperandMixin
class HeadOptimizedDataSource(DataFrameOperand, DataFrameOperandMixin):
__slots__ = '_tiled',
# Data source op that optimized for head,
# First, it will try to trigger first_chunk.head() and raise TilesError,
# When iterative tiling is triggered,
# check if the first_chunk.head() meets requirements.
_nrows = Int64Field('nrows')
# for chunk
_first_chunk = KeyField('first_chunk')
@property
def nrows(self):
return self._nrows
@property
def first_chunk(self):
return getattr(self, '_first_chunk', None)
@classmethod
def _tile(cls, op): # pragma: no cover
raise NotImplementedError
@classmethod
def _tile_head(cls, op: "HeadOptimizedDataSource"):
if op.first_chunk is None:
op._tiled = tiled = cls._tile(op)
chunks = tiled[0].chunks
err = TilesError('HeadOrTailOptimizeDataSource requires '
'some dependencies executed first')
op._first_chunk = chunk = chunks[0]
err.partial_tiled_chunks = [chunk.data]
raise err
else:
tiled = op._tiled
chunks = tiled[0].chunks
del op._tiled
ctx = get_context()
chunk_shape = ctx.get_chunk_metas([op.first_chunk.key])[0].chunk_shape
# reset first chunk
op._first_chunk = None
for c in chunks:
c.op._first_chunk = None
if chunk_shape[0] == op.nrows:
# the first chunk has enough data
tiled[0]._nsplits = tuple((s,) for s in chunk_shape)
chunks[0]._shape = chunk_shape
tiled[0]._chunks = chunks[:1]
tiled[0]._shape = chunk_shape
else:
for chunk in tiled[0].chunks:
chunk.op._nrows = None
# otherwise
tiled = [tiled[0].iloc[:op.nrows]._inplace_tile()]
return tiled
@classmethod
def tile(cls, op: "HeadOptimizedDataSource"):
if op.nrows is not None:
return cls._tile_head(op)
else:
return cls._tile(op)
class ColumnPruneSupportedDataSourceMixin(DataFrameOperandMixin):
__slots__ = ()
def get_columns(self): # pragma: no cover
raise NotImplementedError
def set_pruned_columns(self, columns): # pragma: no cover
raise NotImplementedError
| 33.677083
| 82
| 0.631921
| 381
| 3,233
| 5.207349
| 0.383202
| 0.065524
| 0.030242
| 0.027218
| 0.108871
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012153
| 0.287349
| 3,233
| 95
| 83
| 34.031579
| 0.848958
| 0.274977
| 0
| 0.224138
| 0
| 0
| 0.064683
| 0.03191
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12069
| false
| 0
| 0.068966
| 0.034483
| 0.37931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeb5f63f02d815be3691dc63ea53c88a38fdabc3
| 1,759
|
py
|
Python
|
scripts/chengyu.py
|
cfeibiao/chinese-xinhua
|
fc39a885e6bbd6d79576997bea53682af4e8f596
|
[
"MIT"
] | 9,321
|
2018-02-10T09:17:33.000Z
|
2022-03-31T11:39:38.000Z
|
scripts/chengyu.py
|
cfeibiao/chinese-xinhua
|
fc39a885e6bbd6d79576997bea53682af4e8f596
|
[
"MIT"
] | 48
|
2018-07-30T12:35:49.000Z
|
2022-03-11T03:49:24.000Z
|
scripts/chengyu.py
|
cfeibiao/chinese-xinhua
|
fc39a885e6bbd6d79576997bea53682af4e8f596
|
[
"MIT"
] | 2,255
|
2018-03-12T09:54:37.000Z
|
2022-03-31T10:17:47.000Z
|
# -*- coding: utf-8 -*-
"""
author: pwxcoo
date: 2018-02-05
description: 抓取下载成语并保存
"""
import requests, json
from bs4 import BeautifulSoup
def downloader(url):
"""
下载成语并保存
"""
response = requests.get(url)
if response.status_code != 200:
print(f'{url} is failed!')
return
print(f'{url} is parsing')
html = BeautifulSoup(response.content.decode('gbk', errors='ignore'), "lxml")
table = html.find_all('table')[-2]
prefix = 'http://www.zd9999.com'
words = [prefix + a.get('href') for a in table.find_all('a')]
res = []
for i in range(0, len(words)):
response = requests.get(words[i])
print(f'{[words[i]]} is parsing')
if response.status_code != 200:
print(f'{words[i]} is failed!')
continue
wordhtml = BeautifulSoup(response.content.decode('gbk', errors='ignore'), "lxml")
explanation = wordhtml.find_all('table')[-3].find_all('tr')
res.append({'word':explanation[0].text.strip(),\
'pinyin': explanation[1].find_all('tr')[0].find_all('td')[1].text.strip(),\
'explanation': explanation[1].find_all('tr')[1].find_all('td')[1].text.strip(),\
'derivation': explanation[1].find_all('tr')[2].find_all('td')[1].text.strip(),\
'example': explanation[1].find_all('tr')[3].find_all('td')[1].text.strip()})
return res
if __name__ == '__main__':
res = downloader('http://www.zd9999.com/cy/')
for i in range(2, 199):
res += downloader(f'http://www.zd9999.com/cy/index_{i}.htm')
print(len(res))
with open('chengyu.json', mode='w+', encoding='utf-8') as json_file:
json.dump(res, json_file, ensure_ascii=False)
| 32.574074
| 100
| 0.582149
| 234
| 1,759
| 4.264957
| 0.388889
| 0.084168
| 0.04509
| 0.076152
| 0.382766
| 0.240481
| 0.164329
| 0.106212
| 0
| 0
| 0
| 0.035845
| 0.222854
| 1,759
| 54
| 101
| 32.574074
| 0.694221
| 0.048891
| 0
| 0.058824
| 0
| 0
| 0.172226
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.058824
| 0
| 0.147059
| 0.147059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeba289fa36713ab8da25a19a9e51eb95799d065
| 6,125
|
py
|
Python
|
iadmin/filters.py
|
saxix/django-iadmin
|
675317e8f0b4142eaf351595da27c065637a83ba
|
[
"BSD-1-Clause"
] | 1
|
2015-06-23T09:24:12.000Z
|
2015-06-23T09:24:12.000Z
|
iadmin/filters.py
|
saxix/django-iadmin
|
675317e8f0b4142eaf351595da27c065637a83ba
|
[
"BSD-1-Clause"
] | null | null | null |
iadmin/filters.py
|
saxix/django-iadmin
|
675317e8f0b4142eaf351595da27c065637a83ba
|
[
"BSD-1-Clause"
] | null | null | null |
from django.contrib.admin.filters import RelatedFieldListFilter, AllValuesFieldListFilter
from django.db import models
from django.db.models.query_utils import Q
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode
class CellFilter(object):
title = ""
menu_labels = {'lt': _('Less than'),
'gt': _('Greater than'),
'lte': _('Less or equals than'),
'gte': _('Greater or equals than'),
'exact': _('Equals to'),
'not': _('Not equals to'),
'rem': _('Remove filter')}
def __init__(self, field, request, params, model, model_admin, field_path, column=None):
self.column = column or field_path or field.name
self.col_operators = model_admin.cell_filter_operators.get(field.name, ('exact', 'not'))
self.seed = field_path
def __repr__(self):
return "<%s for `%s` as %s>" % (self.__class__.__name__, self.column, id(self))
def is_active(self, cl):
active_filters = cl.params.keys()
for x in self.expected_parameters():
if x in active_filters:
return True
return False
def has_output(self):
return True
def get_menu_item_for_op(self, op):
return CellFilter.menu_labels.get(op), '%s__%s' % (self.seed, op)
def expected_parameters(self):
expected_parameters = []
for op in self.col_operators:
filter = '%s__%s' % (self.seed, op)
expected_parameters.append(filter)
return expected_parameters
class ChoicesCellFilter(CellFilter, AllValuesFieldListFilter):
pass
class BooleanCellFilter(CellFilter, AllValuesFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path, column=None):
self.col_operators = model_admin.cell_filter_operators.get(field.name, ('exact', 'not'))
super(BooleanCellFilter, self).__init__(field, request, params, model, model_admin, field_path, column)
def get_menu_item_for_op(self, op):
if op in ('exact', ''):
return _('Yes'), self.seed
else:
return _('No'), '%s__not' % self.seed
def expected_parameters(self):
expected_parameters = []
ops = [op for op in self.col_operators if op != 'exact']
expected_parameters.append(self.seed)
for op in ops:
filter = '%s__%s' % (self.seed, op)
expected_parameters.append(filter)
return expected_parameters
class FieldCellFilter(CellFilter, AllValuesFieldListFilter):
def get_menu_item_for_op(self, op):
if op == 'exact':
return CellFilter.menu_labels.get(op), self.seed
return CellFilter.menu_labels.get(op), '%s__%s' % (self.seed, op)
def expected_parameters(self):
expected_parameters = []
ops = [op for op in self.col_operators if op != 'exact']
expected_parameters.append(self.seed)
for op in ops:
filter = '%s__%s' % (self.seed, op)
expected_parameters.append(filter)
return expected_parameters
class RelatedFieldCellFilter(RelatedFieldListFilter, CellFilter):
def __init__(self, field, request, params, model, model_admin, field_path, column=None):
super(RelatedFieldCellFilter, self).__init__(field, request, params, model, model_admin, field_path)
self.column = column or field_path or field.name
self.col_operators = model_admin.cell_filter_operators.get(field.name, ('exact', 'not'))
self.seed = "__".join(self.lookup_kwarg.split('__')[:-1])
class AllValuesComboFilter(AllValuesFieldListFilter):
template = 'iadmin/filters/combobox.html'
class RelatedFieldComboFilter(RelatedFieldListFilter):
template = 'iadmin/filters/fieldcombobox.html'
class RelatedFieldRadioFilter(RelatedFieldListFilter):
template = 'iadmin/filters/fieldradio.html'
class RelatedFieldCheckBoxFilter(RelatedFieldListFilter):
template = 'iadmin/filters/fieldcheckbox.html'
def __init__(self, field, request, params, model, model_admin, field_path):
super(RelatedFieldCheckBoxFilter, self).__init__(field, request, params, model, model_admin, field_path)
self.lookup_val = request.GET.getlist(self.lookup_kwarg, [])
def queryset(self, request, queryset):
if not len(self.lookup_val):
return queryset
filters = []
for val in self.lookup_val:
filters.append(Q(**{self.lookup_kwarg: val}))
query = filters.pop()
for item in filters:
query |= item
return queryset.filter(query)
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': not len(self.lookup_val) and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
yield {
'selected': self.lookup_val_isnull,
'query_string': cl.get_query_string({self.lookup_kwarg_isnull: 1},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('None'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': smart_unicode(pk_val) in self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field, models.related.RelatedObject)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
| 37.576687
| 112
| 0.629388
| 692
| 6,125
| 5.302023
| 0.186416
| 0.057236
| 0.04906
| 0.043881
| 0.501499
| 0.473699
| 0.458981
| 0.458981
| 0.432815
| 0.419733
| 0
| 0.000442
| 0.260408
| 6,125
| 162
| 113
| 37.808642
| 0.809492
| 0
| 0
| 0.346457
| 0
| 0
| 0.077224
| 0.020245
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11811
| false
| 0.007874
| 0.047244
| 0.023622
| 0.393701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aebcb1ae1aa0b22cd2b6d96651c21aa61504ef82
| 504
|
py
|
Python
|
DGF/resolvers/change.py
|
LonguCodes/DGF
|
bd344eff34cbe6438f71631e2cc103f1c4584e09
|
[
"MIT"
] | null | null | null |
DGF/resolvers/change.py
|
LonguCodes/DGF
|
bd344eff34cbe6438f71631e2cc103f1c4584e09
|
[
"MIT"
] | null | null | null |
DGF/resolvers/change.py
|
LonguCodes/DGF
|
bd344eff34cbe6438f71631e2cc103f1c4584e09
|
[
"MIT"
] | null | null | null |
from .utils import get_filters, get_values, get_relations, set_values, set_relations
def default_resolve(schema, model, data, **kwargs):
filters = get_filters(schema, data)
return model.objects.filter(**filters)
def default_execute(schema, data, raw_data, **kwargs):
values = get_values(schema, raw_data)
relations = get_relations(schema, raw_data)
for model in data:
set_values(model, values)
set_relations(model, relations)
model.save()
return data
| 28
| 84
| 0.712302
| 66
| 504
| 5.212121
| 0.348485
| 0.061047
| 0.104651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 504
| 17
| 85
| 29.647059
| 0.843137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aec0c6a44b056aadc36e40e0073e2808b0d0bb55
| 10,463
|
py
|
Python
|
mvg_distributions/sqrt_gamma_gaussian.py
|
Liang813/tf_mvg
|
01bc681a8b3aac5dcf0837d481b963f4968eb777
|
[
"MIT"
] | 21
|
2019-04-04T07:46:54.000Z
|
2021-12-15T18:06:35.000Z
|
mvg_distributions/sqrt_gamma_gaussian.py
|
Liang813/tf_mvg
|
01bc681a8b3aac5dcf0837d481b963f4968eb777
|
[
"MIT"
] | 8
|
2019-03-01T10:08:30.000Z
|
2021-10-04T13:00:11.000Z
|
mvg_distributions/sqrt_gamma_gaussian.py
|
Liang813/tf_mvg
|
01bc681a8b3aac5dcf0837d481b963f4968eb777
|
[
"MIT"
] | 7
|
2019-12-18T23:41:44.000Z
|
2021-11-21T10:15:48.000Z
|
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.distributions import seed_stream
import tensorflow_probability as tfp
import mvg_distributions.covariance_representations as cov_rep
from mvg_distributions.gamma import SqrtGamma
tfd = tfp.distributions
tfb = tfp.bijectors
class SqrtGammaGaussian(tfd.Distribution):
def __init__(self, df, log_diag_scale, add_mode_correction=False, validate_args=False, allow_nan_stats=True,
name="SqrtGammaGaussian"):
"""
Square root Gamma-Gaussian distribution, this is equivalent to a Cholesky-Wishart distribution with a
diagonal scale matrix. Thus it has the same hyper-parameters, as a the Cholesky-Wishart distribution.
This distribution expects as input Cholesky Precision matrices. Moreover, it assumes that the diagonal elements
in the matrix are log(values).
Args:
The distribution is defined for batch (b) of M (pxp) matrices, forming a tensor of [b, p, p]
df: degrees of freedom, a tensor of [b], the values in it must be df > p - 1
log_diag_scale: a tensor of [b, p] with the log diagonal values of the matrix S
add_mode_correction: bool, if using the distribution as a prior, setting this to True will add
a correction factor to log_diag_scale, such that the log_prob will have the maximum in S
validate_args:
allow_nan_stats:
name:
"""
parameters = locals()
with tf.name_scope(name=name):
df = tf.convert_to_tensor(df)
log_diag_scale = tf.convert_to_tensor(log_diag_scale)
assert df.shape.ndims == 1
assert log_diag_scale.shape.ndims == 2
self._df = df
self._log_diag_scale = log_diag_scale
graph_parents = [df, log_diag_scale]
self.p = self.log_diag_scale.shape[1].value
if self.p is None:
self.p = tf.shape(self.log_diag_scale)[1]
self._mode_correction_factor(add_mode_correction)
self._sqrt_gamma_dist = None
self._normal_dist = None
super().__init__(dtype=self.df.dtype, reparameterization_type=tf.distributions.FULLY_REPARAMETERIZED,
validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters,
graph_parents=graph_parents, name=name)
@property
def sqrt_gamma_dist(self):
if self._sqrt_gamma_dist is None:
half_df = 0.5 * self.df # [b]
# 0.0 to 0.5 - 0.5 p, then add 0.5 * df to all
a = np.linspace(0.0, 0.5 - 0.5 * self.p, self.p, dtype=np.float32) # [n]
a = a[np.newaxis, :] + half_df[:, tf.newaxis] # [b, n]
b = 0.5 / tf.exp(self.log_diag_scale) # [b, n]
self._sqrt_gamma_dist = SqrtGamma(concentration=a, rate=b)
return self._sqrt_gamma_dist
@property
def normal_dist(self):
if self._normal_dist is None:
sqrt_diag_scale = tf.exp(0.5 * self.log_diag_scale)
sqrt_diag_scale = tf.tile(sqrt_diag_scale[:, :, tf.newaxis], (1, 1, self.p)) # [b, n, n]
self._normal_dist = tfd.Normal(loc=0, scale=sqrt_diag_scale) # [b, n, n]
return self._normal_dist
@property
def log_diag_scale(self):
return self._log_diag_scale
@property
def df(self):
return self._df
def _mode_correction_factor(self, add_mode_correction):
if add_mode_correction:
# corrected_diag_scale = diag_scale * ((p - 1)/(tf.range(p) * (1 - p) + (p - 1) * (df - 1)))
correction_factor = tf.log(self.p - 1.)
p_range = tf.range(self.p, dtype=self._log_diag_scale.dtype)[tf.newaxis, :]
correction_factor -= tf.log(p_range * (1. - self.p) + (self.p - 1.) * (self.df[:, tf.newaxis] - 1.))
self._log_diag_scale += correction_factor
def _log_prob_sqrt_gamma(self, x):
log_diag_prob = self.sqrt_gamma_dist.log_prob(tf.matrix_diag_part(x))
return tf.reduce_sum(log_diag_prob, axis=1)
def _log_prob_normal(self, x):
log_off_diag_prob = self.normal_dist.log_prob(x)
off_diag_mask = tf.ones(shape=tf.shape(x))
off_diag_mask = tf.matrix_band_part(off_diag_mask, -1, 0)
off_diag_mask = tf.matrix_set_diag(off_diag_mask, tf.zeros(shape=tf.shape(x)[:-1]))
log_off_diag_prob *= off_diag_mask
return tf.reduce_sum(log_off_diag_prob, axis=[1, 2])
def _log_prob(self, x):
log_diag_prob = self._log_prob_sqrt_gamma(x)
log_off_diag_prob = self._log_prob_normal(x)
return log_diag_prob + log_off_diag_prob
def _batch_shape_tensor(self):
return tf.shape(self.log_diag_scale)[0]
def _batch_shape(self):
return self.log_diag_scale.shape[0:1]
def _event_shape_tensor(self):
event_dim = tf.shape(self.log_diag_scale)[1]
return tf.stack([event_dim, event_dim])
def _event_shape(self):
event_dim = self.log_diag_scale.shape[1]
return tf.TensorShape([event_dim, event_dim])
def _sample_n(self, n, seed=None):
stream = seed_stream.SeedStream(seed=seed, salt="Wishart")
# Sample a normal full matrix
x = self.normal_dist.sample(sample_shape=n, seed=stream())
# Sample the log diagonal
log_g = self.sqrt_gamma_dist.sample(sample_shape=n, seed=stream())
# Discard the upper triangular part
x = tf.matrix_band_part(x, -1, 0)
# Set the diagonal
x = tf.matrix_set_diag(x, log_g)
return x
class SparseSqrtGammaGaussian(SqrtGammaGaussian):
def __init__(self, df, log_diag_scale, add_mode_correction=False, validate_args=False, allow_nan_stats=True,
name="SparseSqrtGammaGaussian"):
"""
Sparse square root Gamma-Gaussian distribution, this is equivalent to a Cholesky-Wishart distribution with a
diagonal scale matrix and with a sparsity correction factor. Thus it has the same hyper-parameters, as a the
Cholesky-Wishart distribution.
Args:
The distribution is defined for batch (b) of M (pxp) matrices, forming a tensor of [b, p, p]
df: degrees of freedom, a tensor of [b], the values in it must be df > p - 1
log_diag_scale: a tensor of [b, p] with the log diagonal values of the matrix S
add_mode_correction: bool, if using the distribution as a prior, setting this to True will add
a correction factor to log_diag_scale, such that the log_prob will have the maximum in S
validate_args:
allow_nan_stats:
name:
"""
super().__init__(df, log_diag_scale, add_mode_correction=add_mode_correction, validate_args=validate_args,
allow_nan_stats=allow_nan_stats, name=name)
@staticmethod
def _convert_to_cov_obj(value):
if not isinstance(value, cov_rep.PrecisionConvCholFilters):
value = tf.convert_to_tensor(value, name="value")
log_prob_shape = ()
if value.shape.ndims == 2:
# Add batch dimension
value = tf.expand_dims(value, axis=0)
if value.shape.ndims == 3:
log_prob_shape = tf.shape(value)[0:1]
if value.shape.ndims == 4:
# Collapse batch and sample dimension
shape = tf.shape(value)
log_prob_shape = shape[0:2]
new_shape = [log_prob_shape[0] * log_prob_shape[1]]
new_shape = tf.concat((new_shape, shape[2:]), axis=0)
value = tf.reshape(value, new_shape)
value = cov_rep.PrecisionCholesky(chol_precision=value)
else:
log_prob_shape = value.sample_shape[0:1]
return value, log_prob_shape
@property
def normal_dist(self):
if self._normal_dist is None:
sqrt_diag_scale = tf.exp(0.5 * self.log_diag_scale)
sqrt_diag_scale = sqrt_diag_scale[:, :, tf.newaxis] # [b, n, 1]
self._normal_dist = tfd.Normal(loc=0, scale=sqrt_diag_scale) # [b, n, 1]
return self._normal_dist
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name):
value, log_prob_shape = self._convert_to_cov_obj(value)
try:
log_prob = self._log_prob(value)
return tf.reshape(log_prob, log_prob_shape)
except NotImplementedError as original_exception:
try:
log_prob = tf.log(self._prob(value))
return tf.reshape(log_prob, log_prob_shape)
except NotImplementedError:
raise original_exception
def _log_prob_sqrt_gamma(self, x):
log_diag_prob = self.sqrt_gamma_dist.log_prob(x.log_diag_chol_precision)
return tf.reduce_sum(log_diag_prob, axis=1)
def _log_prob_normal(self, x):
if isinstance(x, cov_rep.PrecisionConvCholFilters):
nb = x.recons_filters_precision.shape[2].value
# Get the elements in matrix [b, n, n] after they've been aligned per row, this is a [b, n, nb] tensor
# that if it were reshaped to [b, n_w, n_h, n_b], the vector [b, i, j, :] contain the values of
# the kth row in the matrix, where k corresponds to the i,j pixel.
# For each row, we discard the leading zeros and the diagonal element
off_diag_elements_aligned = x.recons_filters_precision_aligned[:, :, nb // 2 + 1:]
log_off_diag_prob = self.normal_dist.log_prob(off_diag_elements_aligned)
# Some elements in recons_filters_precision get zeroed out due to the zero padding for elements out of the
# image in the convolution operator, thus they are not part of the Cholesky matrix.
# Do not take into account those elements for the log probability computation
off_diag_mask_aligned = x.off_diag_mask_compact_aligned()
# log_off_diag_prob is [b, n, nb // 2 + 1], off_diag_mask is [n, nb]
log_off_diag_prob *= off_diag_mask_aligned[tf.newaxis, :, nb // 2 + 1:]
log_off_diag_prob = tf.reduce_sum(log_off_diag_prob, axis=[1, 2])
else:
log_off_diag_prob = super()._log_prob_normal(x.chol_precision)
return log_off_diag_prob
| 43.057613
| 119
| 0.640925
| 1,510
| 10,463
| 4.163576
| 0.156291
| 0.051535
| 0.049626
| 0.033084
| 0.431684
| 0.395419
| 0.367584
| 0.333386
| 0.333386
| 0.322252
| 0
| 0.010617
| 0.270859
| 10,463
| 242
| 120
| 43.235537
| 0.813475
| 0.252604
| 0
| 0.2
| 0
| 0
| 0.006873
| 0.00304
| 0
| 0
| 0
| 0
| 0.013793
| 1
| 0.137931
| false
| 0
| 0.041379
| 0.027586
| 0.317241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aec3ddbaacba1c772ca5cc048314681c7330681e
| 3,606
|
py
|
Python
|
anymail/webhooks/sendinblue.py
|
alee/django-anymail
|
acca6a46e17143caadb0445d7bdeb2f1eb00b71b
|
[
"BSD-3-Clause"
] | 1,324
|
2016-03-10T04:57:52.000Z
|
2022-03-31T15:14:58.000Z
|
anymail/webhooks/sendinblue.py
|
alee/django-anymail
|
acca6a46e17143caadb0445d7bdeb2f1eb00b71b
|
[
"BSD-3-Clause"
] | 208
|
2016-03-10T03:40:59.000Z
|
2022-03-22T23:16:08.000Z
|
anymail/webhooks/sendinblue.py
|
alee/django-anymail
|
acca6a46e17143caadb0445d7bdeb2f1eb00b71b
|
[
"BSD-3-Clause"
] | 129
|
2016-03-10T09:24:52.000Z
|
2022-02-07T05:37:24.000Z
|
import json
from datetime import datetime
from django.utils.timezone import utc
from .base import AnymailBaseWebhookView
from ..signals import AnymailTrackingEvent, EventType, RejectReason, tracking
class SendinBlueTrackingWebhookView(AnymailBaseWebhookView):
"""Handler for SendinBlue delivery and engagement tracking webhooks"""
esp_name = "SendinBlue"
signal = tracking
def parse_events(self, request):
esp_event = json.loads(request.body.decode('utf-8'))
return [self.esp_to_anymail_event(esp_event)]
# SendinBlue's webhook payload data doesn't seem to be documented anywhere.
# There's a list of webhook events at https://apidocs.sendinblue.com/webhooks/#3.
event_types = {
# Map SendinBlue event type: Anymail normalized (event type, reject reason)
"request": (EventType.QUEUED, None), # received even if message won't be sent (e.g., before "blocked")
"delivered": (EventType.DELIVERED, None),
"hard_bounce": (EventType.BOUNCED, RejectReason.BOUNCED),
"soft_bounce": (EventType.BOUNCED, RejectReason.BOUNCED),
"blocked": (EventType.REJECTED, RejectReason.BLOCKED),
"spam": (EventType.COMPLAINED, RejectReason.SPAM),
"invalid_email": (EventType.BOUNCED, RejectReason.INVALID),
"deferred": (EventType.DEFERRED, None),
"opened": (EventType.OPENED, None), # see also unique_opened below
"click": (EventType.CLICKED, None),
"unsubscribe": (EventType.UNSUBSCRIBED, None),
"list_addition": (EventType.SUBSCRIBED, None), # shouldn't occur for transactional messages
"unique_opened": (EventType.OPENED, None), # you'll *also* receive an "opened"
}
def esp_to_anymail_event(self, esp_event):
esp_type = esp_event.get("event")
event_type, reject_reason = self.event_types.get(esp_type, (EventType.UNKNOWN, None))
recipient = esp_event.get("email")
try:
# SendinBlue supplies "ts", "ts_event" and "date" fields, which seem to be based on the
# timezone set in the account preferences (and possibly with inconsistent DST adjustment).
# "ts_epoch" is the only field that seems to be consistently UTC; it's in milliseconds
timestamp = datetime.fromtimestamp(esp_event["ts_epoch"] / 1000.0, tz=utc)
except (KeyError, ValueError):
timestamp = None
tags = []
try:
# If `tags` param set on send, webhook payload includes 'tags' array field.
tags = esp_event['tags']
except KeyError:
try:
# If `X-Mailin-Tag` header set on send, webhook payload includes single 'tag' string.
# (If header not set, webhook 'tag' will be the template name for template sends.)
tags = [esp_event['tag']]
except KeyError:
pass
try:
metadata = json.loads(esp_event["X-Mailin-custom"])
except (KeyError, TypeError):
metadata = {}
return AnymailTrackingEvent(
description=None,
esp_event=esp_event,
event_id=None, # SendinBlue doesn't provide a unique event id
event_type=event_type,
message_id=esp_event.get("message-id"),
metadata=metadata,
mta_response=esp_event.get("reason"),
recipient=recipient,
reject_reason=reject_reason,
tags=tags,
timestamp=timestamp,
user_agent=None,
click_url=esp_event.get("link"),
)
| 42.928571
| 111
| 0.640044
| 408
| 3,606
| 5.544118
| 0.426471
| 0.049514
| 0.024315
| 0.015031
| 0.06366
| 0.027409
| 0
| 0
| 0
| 0
| 0
| 0.002624
| 0.260122
| 3,606
| 83
| 112
| 43.445783
| 0.845202
| 0.278979
| 0
| 0.098361
| 0
| 0
| 0.074806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0.016393
| 0.081967
| 0
| 0.213115
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeca5f1921632d7c1be82445181945bbbb66f42a
| 901
|
py
|
Python
|
cogs/insult.py
|
nikhilvayeda/Bhendi-Bot-3
|
2268e4310b91d6f3d62fe7bb642c3a57f623e215
|
[
"MIT"
] | 8
|
2020-10-02T04:35:01.000Z
|
2021-11-08T10:38:32.000Z
|
cogs/insult.py
|
nikhilvayeda/Bhendi-Bot-3
|
2268e4310b91d6f3d62fe7bb642c3a57f623e215
|
[
"MIT"
] | 8
|
2020-10-05T07:45:38.000Z
|
2021-03-13T22:02:28.000Z
|
cogs/insult.py
|
nikhilvayeda/Bhendi-Bot-3
|
2268e4310b91d6f3d62fe7bb642c3a57f623e215
|
[
"MIT"
] | 2
|
2020-10-15T05:38:13.000Z
|
2020-10-29T11:41:16.000Z
|
import json
import requests
import discord
from discord.ext import commands
class Fun_insult(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def insult(self, ctx):
'''insult command'''
_res = requests.get(url='https://evilinsult.com/generate_insult.php?lang=en&type=json')
if _res.status_code == 200:
try:
_data = _res.json()
_insult = _data['insult']
except:
return False
_embed = discord.Embed(color=0x42c42b, title=_insult)
_embed.set_author(name='Bhendi Bot')
_embed.set_thumbnail(url='https://media.giphy.com/media/2pjspMQCi70k/giphy.gif')
await ctx.send(ctx.author.mention, embed=_embed)
def setup(client):
client.add_cog(Fun_insult(client))
| 26.5
| 96
| 0.593785
| 103
| 901
| 4.980583
| 0.553398
| 0.035088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017323
| 0.295228
| 901
| 33
| 97
| 27.30303
| 0.790551
| 0
| 0
| 0
| 0
| 0
| 0.151122
| 0
| 0
| 0
| 0.009445
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aecc522dc3defa35037e11a7262e7658ad5e8c35
| 1,873
|
py
|
Python
|
util/monitor.py
|
reservoirlabs/G2-Mininet
|
4e0dd6b5367a1d51ee65310e59fc3b0ba55575b8
|
[
"BSD-3-Clause"
] | 2
|
2021-08-20T08:29:49.000Z
|
2022-02-25T02:08:26.000Z
|
util/monitor.py
|
reservoirlabs/G2-Mininet
|
4e0dd6b5367a1d51ee65310e59fc3b0ba55575b8
|
[
"BSD-3-Clause"
] | 2
|
2019-11-27T09:54:35.000Z
|
2019-12-05T15:52:03.000Z
|
util/monitor.py
|
reservoirlabs/G2-Mininet
|
4e0dd6b5367a1d51ee65310e59fc3b0ba55575b8
|
[
"BSD-3-Clause"
] | 2
|
2020-03-12T14:38:06.000Z
|
2022-03-20T10:15:13.000Z
|
"""
G2_RIGHTS.
This module defines Monitor class to monitor CPU and memory utilization.
Pre-requisite non-standard Python module(s):
psutil
"""
import time
import threading
import psutil
class Monitor():
def __init__(self, interval=1):
""" Monitor constructor.
Args:
interval (int): the polling interval.
Attributes:
interval (int): The polling interval.
readings (list): Observations.
_running (bool): State of monitor.
"""
self.interval = interval
self.readings = []
self._running = False
def start(self):
"""Start the monitor.
"""
self._running = True
def monitor(self):
"""Monitor CPU and memory usage.
"""
if self._running:
# Current time
ctime = time.time()
# A float representing the current system-wide CPU utilization as a percentage.
cpu = psutil.cpu_percent()
# System memory usage percent = (total - available) * 100.0 / total
memData = dict(psutil.virtual_memory()._asdict())
vmem = memData['percent']
self.readings.append((ctime, cpu, vmem))
t = threading.Timer(self.interval, self.monitor)
t.start()
def stop(self):
"""Stop a monitor.
"""
self._running = False
def writeReadings(self, filepath):
"""Write CPU and memory usage to a CSV file; header row is also written..
Args:
filepath (str): Path to output file.
"""
with open(filepath, "w") as fs:
header = "timestamp,cpuPercent,memoryPercent"
fs.write(header + '\n')
for t, c, m in self.readings:
line = str(t)+','+str(c)+','+str(m)
fs.write(line + '\n')
| 25.310811
| 91
| 0.549386
| 204
| 1,873
| 4.980392
| 0.460784
| 0.043307
| 0.035433
| 0.037402
| 0.057087
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004858
| 0.34063
| 1,873
| 73
| 92
| 25.657534
| 0.817814
| 0.379605
| 0
| 0.071429
| 0
| 0
| 0.046021
| 0.032598
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0
| 0.107143
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aecfc854993f2f476c1f4760e4745a5af412f1bd
| 15,322
|
py
|
Python
|
spiders/spiders/spiders/qunar.py
|
jiangxuewen16/hq-crawler
|
f03ec1e454513307e335943f224f4d927eaf2bbf
|
[
"MIT"
] | 1
|
2021-02-25T08:33:40.000Z
|
2021-02-25T08:33:40.000Z
|
spiders/spiders/spiders/qunar.py
|
jiangxuewen16/hq-crawler
|
f03ec1e454513307e335943f224f4d927eaf2bbf
|
[
"MIT"
] | null | null | null |
spiders/spiders/spiders/qunar.py
|
jiangxuewen16/hq-crawler
|
f03ec1e454513307e335943f224f4d927eaf2bbf
|
[
"MIT"
] | 2
|
2021-03-08T07:25:16.000Z
|
2021-12-07T15:28:02.000Z
|
# -*- coding: utf-8 -*-
import json
import math
import random
import time
import requests
import scrapy
from scrapy.http import HtmlResponse
from scrapy import Request
from spiders.common import OTA
from spiders.items.spot import spot
from spiders.items.price import price
class QunarSpider(scrapy.Spider):
# 标签 0 系统标签,1用户标签
sys_tags = 0
user_tags_true = 1
user_tags_false = 2
name = 'qunar'
allowed_domains = ['www.qunar.com']
start_urls = ['http://www.qunar.com/']
ota_spot_ids = OTA.OtaSpotIdMap.get_ota_spot_list(OTA.OtaCode.QUNAR) # ota 景区id列表
def parse(self, response):
pass
class QunarTagSpider(scrapy.Spider):
name = 'qunar_tag'
allowed_domains = ['www.qunar.com']
total_num = 0 # 总评论
page_size = 20 # 默认爬取每页100条
base_url = r'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize={page_size}&fromType=SIGHT&pageNum={page_num}&sightId={ota_spot_id}&tagType=44&tagName=%E6%9C%80%E6%96%B0'
start_urls = [
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=1&fromType=SIGHT&pageNum=1&sightId=706176810']
def parse(self, response: HtmlResponse):
# 爬取景区列表数据
for ota_spot_id in QunarSpider.ota_spot_ids:
# 更新景区的评论数量
url = self.base_url.format(ota_spot_id=ota_spot_id, page_num=1, page_size=1)
yield Request(url=url, callback=self.spot_tag, dont_filter=True,
meta={'page_num': 1, 'ota_spot_id': ota_spot_id})
"""获取景区用户点评标签"""
def spot_tag(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
comment = json.loads(response_str)
if 'data' in comment and 'tagList' in comment['data']:
spot_tag = []
for key, value in enumerate(comment['data']['tagList']):
# print(value['tagName'])
if value['tagType'] in [0, 1, 41, 43, 44]:
tag_type = QunarSpider.sys_tags # 属于系统标签
else:
tag_type = QunarSpider.user_tags_true # 属于用户标签
tag = {'tag_name': value['tagName'], 'tag_num': value['tagNum'], 'tag_score': value['tagScore'],
'tag_type': tag_type}
spot_tag.append(tag)
print(spot_tag, "#" * 20)
print('-' * 20, 'ota_id', OTA.OtaCode.QUNAR.value.id, 'ota_spot_id', response.meta['ota_spot_id'])
spot.Spot.objects(ota_id=OTA.OtaCode.QUNAR.value.id,
ota_spot_id=response.meta['ota_spot_id']).update(
set__tag_list=spot_tag)
pass
class CommentSpider(scrapy.Spider):
name = 'qunar_comment'
allowed_domains = ['www.qunar.com']
total_num = 0 # 总评论
page_size = 10 # 默认爬取每页100条
base_url = r'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize={page_size}&fromType=SIGHT&pageNum={page_num}&sightId={ota_spot_id}&tagType=44&tagName=%E6%9C%80%E6%96%B0'
start_urls = [
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=1&fromType=SIGHT&pageNum=1&sightId=706176810']
def parse(self, response: HtmlResponse):
headers = {'content-type': 'application/json'}
# 爬取景区列表数据
for ota_spot_id in QunarSpider.ota_spot_ids:
# 更新景区的评论数量
url = self.base_url.format(ota_spot_id=ota_spot_id, page_num=1, page_size=10)
# headers = {'content-type': 'application/json'}
data = requests.get(url, headers=headers)
comment = data.json()
print(ota_spot_id, "共", comment['data']['total'], "条", "*" * 20)
page_size = 10
# 网页上总条数
total_page = comment['data']['total']
# 数据库总条数
now_total = spot.SpotComment.objects(ota_id=OTA.OtaCode.QUNAR.value.id,
ota_spot_id=ota_spot_id).count()
# 准备保存的总条数
to_save_total = total_page - now_total
# 准备保存的总页数
total_page = math.ceil(to_save_total / page_size)
for page_num in range(1, total_page + 1):
if page_num == total_page:
page_size = to_save_total - (page_num - 1) * page_size
else:
page_size = page_size
url = self.base_url.format(ota_spot_id=ota_spot_id, page_num=page_num, page_size=page_size)
print("-" * 30)
print(url)
print("+" * 30)
# headers = {'content-type': 'application/json'}
data = requests.get(url, headers=headers)
try:
comment = data.json()
except Exception:
try:
data = requests.get(url, headers=headers)
comment = data.json()
except Exception:
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
print(ota_spot_id, " 第", page_num, "页: ", "共", page_size, "条 ", "*" * 20)
if 'data' in comment and 'commentList' in comment['data']:
for key, value in enumerate(comment['data']['commentList']):
print('正在添加 ', value['author'], ' 的评论', "*" * 20)
spot_comment = spot.SpotComment.objects(ota_id=10004).first()
spot_comment.ota_id = OTA.OtaCode.QUNAR.value.id
spot_comment.ota_spot_id = ota_spot_id
spot_comment.goods_name = value['sightName']
# spot_comment.u_avatar = value['headImg']
spot_comment.u_name = value['author']
spot_comment.c_tag = value['tagList']
spot_comment.c_id = value['commentId']
spot_comment.c_score = value['score']
spot_comment.c_content = value['content']
# spot_comment.c_img = value['imgs']
spot_comment.c_img = [item['small'] for item in value['imgs']]
spot_comment.create_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
yield spot_comment
'''
点评数据加tag
'''
class CommentAndTagSpider(scrapy.Spider):
name = 'comment_and_tag'
allowed_domains = ['touch.piao.qunar.com']
def start_requests(self):
for ota_spot_id in QunarSpider.ota_spot_ids:
print(ota_spot_id, 'ota' * 20)
yield scrapy.FormRequest(
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=10&fromType=SIGHT&pageNum=0&sightId=' + str(
ota_spot_id)
, method='GET'
, meta={'ota_spot_id': ota_spot_id}
, callback=self.after_login)
def after_login(self, response):
print('-' * 20)
result = json.loads(response.body)
if 'data' in result and 'tagList' in result['data']:
spot_tag = []
for key, value in enumerate(result['data']['tagList']):
if value['tagType'] in [0, 1, 41, 43, 44]:
tag_type = QunarSpider.sys_tags # 属于系统标签
else:
tag_type = QunarSpider.user_tags_true # 属于用户标签
tag = {'tag_name': value['tagName'], 'tag_num': value['tagNum'], 'tag_score': value['tagScore'],
'tag_type': tag_type}
spot_tag.append(tag)
print(spot_tag, "#" * 20)
print('-' * 20, 'ota_id', OTA.OtaCode.QUNAR.value.id, 'ota_spot_id', response.meta['ota_spot_id'])
spot.Spot.objects(ota_id=OTA.OtaCode.QUNAR.value.id,
ota_spot_id=response.meta['ota_spot_id']).update_one(
set__tag_list=spot_tag, upsert=True)
if 'data' in result and 'total' in result['data']:
print('共', result['data']['total'], '条', '*' * 20)
for pag_num in range(1, math.ceil(result['data']['total'] / 10)):
# for pag_num in range(1, 5):
print('第', pag_num, '页', '+' * 20)
yield scrapy.FormRequest(
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=10&fromType=SIGHT&pageNum=' + str(
pag_num) + '&sightId=' + str(response.meta['ota_spot_id'])
, method='GET'
, meta={'page': pag_num, 'ota_spot_id': response.meta['ota_spot_id']}
, callback=self.each_page)
def each_page(self, response):
print('-' * 20)
result = json.loads(response.body)
if 'data' in result and 'commentList' in result['data']:
for key, value in enumerate(result['data']['commentList']):
print(value['author'], '第', response.meta['page'], '页', '+' * 20)
if 'headImg' in value:
headImg = value['headImg']
else:
headImg = ''
yield spot.SpotComment.objects(c_id=value['commentId']).update_one(
set__ota_id=OTA.OtaCode.QUNAR.value.id,
set__ota_spot_id=response.meta['ota_spot_id'],
set__goods_name=value['sightName'],
set__u_avatar=headImg,
set__u_name=value['author'],
set__c_tag=value['tagList'],
set__c_score=value['score'],
set__c_content=value['content'],
set__c_img=[item['small'] for item in value['imgs']],
set__create_at=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
upsert=True)
class PriceSpider(scrapy.Spider):
ota_map = [{'ota_spot_id': 706176810, 'sightId': '14407', 'sightName': '石燕湖'} # 石燕湖
, {'ota_spot_id': 1915618311, 'sightId': '187730', 'sightName': '石牛寨'} # 石牛寨
, {'ota_spot_id': 2877753081, 'sightId': '469141', 'sightName': '益阳嘉年华'} # 益阳嘉年华
, {'ota_spot_id': 2554926827, 'sightId': '470541', 'sightName': '花田溪谷'} # 花田溪谷
, {'ota_spot_id': 225118749, 'sightId': '461232', 'sightName': '东浒寨'} # 东浒寨
, {'ota_spot_id': 3821817759, 'sightId': '11829', 'sightName': '马仁奇峰'} # 马仁奇峰
, {'ota_spot_id': 420237024, 'sightId': '39499', 'sightName': '大茅山'} # 大茅山
, {'ota_spot_id': 4123349957, 'sightId': '35473', 'sightName': '九龙江'} # 九龙江
, {'ota_spot_id': 2333288470, 'sightId': '196586', 'sightName': '侠天下'} # 侠天下
, {'ota_spot_id': 3333064220, 'sightId': '461903', 'sightName': '三翁花园'} # 三翁花园
]
name = 'qunar_price'
allowed_domains = ['piao.qunar.com']
login_url = 'http://piao.qunar.com/ticket/detail/getTickets.json'
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def start_requests(self):
price.OPrice.objects(ota_id=10006).delete()
price.OPriceCalendar.objects(ota_id=10006, create_at=time.strftime("%Y-%m-%d", time.localtime())).delete()
print('start_request')
for value in self.ota_map:
# print(value['sightId'], "*" * 20)
yield scrapy.FormRequest(self.login_url
, formdata={'sightId': value['sightId'], 'from': 'detail'}
, meta={'ota_spot_id': value['ota_spot_id'], 'sight_name': value['sightName']}
, callback=self.after_login)
def after_login(self, response):
print('-' * 20)
result = json.loads(response.body)
if 'data' in result and 'groups' in result['data']:
for k1, v1 in enumerate(result['data']['groups']): # group数据 sightId
ota_product = []
for k2, v2 in enumerate(v1): # 票型数据
tickets = []
typeId = str(v2['typeId'])
ota_spot_name = response.meta['sight_name']
typeKey = ota_spot_name + v2['ticketZoneName']
ticketZoneName = v2['typeName']
total_count = v2['totalCount'] # 总共票数
total_price = 0 # 总共票数
normal_price = v2['qunarPrice']
if 'tickets' in v2:
print(v2['qunarPrice'])
for k3, v3 in enumerate(v2['tickets']):
tickets_list = {'price_id': str(v3['priceId'])
, 'title': v3['title']
, 'seller_nick': v3['supplierName']
, 'price': v3['qunarPrice']
, 'cash_back': v3['cashBack']
, 'cut_price': v3['cutPrice']
, 'sale_num': 0
, 'url': 'http://touch.piao.qunar.com/touch/detail_' + str(response.meta[
'ota_spot_id']) + '.html?st=a3clM0QlRTclOUYlQjMlRTclODclOTUlRTYlQjklOTYlMjZpZCUzRDE0NDA3JTI2dHlwZSUzRDAlMjZpZHglM0QxJTI2cXQlM0RuYW1lJTI2YXBrJTNEMiUyNnNjJTNEV1dXJTI2YWJ0cmFjZSUzRGJ3ZCU0MCVFNiU5QyVBQyVFNSU5QyVCMCUyNnVyJTNEJUU5JTk1JUJGJUU2JUIyJTk5JTI2bHIlM0QlRTklOTUlQkYlRTYlQjIlOTklMjZmdCUzRCU3QiU3RA%3D%3D#from=mpl_search_suggest'
}
tickets.append(tickets_list)
total_price = total_price + v3['qunarPrice']
# print(v3['title']) # priceId qunarPrice cashBack cutPrice supplierId supplierName
ota_product_list = {'type_id': typeId, 'type_key': typeKey, 'type_name': ticketZoneName,
'normal_price': normal_price,
'tickets': tickets}
ota_product.append(ota_product_list)
pre_price = round(total_price / total_count, 2)
print(pre_price, "+" * 20)
# print(ota_product)
'''
价格日历保存
'''
price_calendar = price.OPriceCalendar()
price_calendar.ota_id = OTA.OtaCode.QUNAR.value.id
price_calendar.ota_spot_id = response.meta['ota_spot_id']
price_calendar.ota_spot_name = response.meta['sight_name']
price_calendar.pre_price = pre_price
price_calendar.type_id = typeId
price_calendar.type_key = typeKey
price_calendar.type_name = ticketZoneName
price_calendar.create_at = time.strftime("%Y-%m-%d", time.localtime())
o_price = price.OPrice()
o_price.ota_id = OTA.OtaCode.QUNAR.value.id
o_price.ota_spot_id = response.meta['ota_spot_id']
o_price.ota_spot_name = ota_spot_name
o_price.ota_product = ota_product # typeId typeName qunarPrice
price_calendar.save(force_insert=False, validate=False, clean=True)
yield o_price
| 48.487342
| 424
| 0.547513
| 1,674
| 15,322
| 4.783154
| 0.164277
| 0.055077
| 0.059573
| 0.016486
| 0.449482
| 0.408642
| 0.385163
| 0.352941
| 0.329712
| 0.300862
| 0
| 0.034479
| 0.328025
| 15,322
| 315
| 425
| 48.64127
| 0.743201
| 0.041705
| 0
| 0.282258
| 0
| 0.024194
| 0.200343
| 0.023626
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03629
| false
| 0.008065
| 0.048387
| 0
| 0.205645
| 0.084677
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aed49e56a367c6273f6328e65eb9c26caf7bc5da
| 5,941
|
py
|
Python
|
stage_2_semantic/not_used/subsample.py
|
grtzsohalf/Audio-Phonetic-and-Semantic-Embedding
|
1207cb61ec4587f38817b030a1e92cb315ebd178
|
[
"MIT"
] | 2
|
2019-08-09T00:49:25.000Z
|
2019-09-30T06:37:07.000Z
|
stage_2_semantic/not_used/subsample.py
|
grtzsohalf/Audio-Phonetic-and-Semantic-Embedding
|
1207cb61ec4587f38817b030a1e92cb315ebd178
|
[
"MIT"
] | null | null | null |
stage_2_semantic/not_used/subsample.py
|
grtzsohalf/Audio-Phonetic-and-Semantic-Embedding
|
1207cb61ec4587f38817b030a1e92cb315ebd178
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
import sys
import random
import math
from tqdm import tqdm
from collections import Counter
import operator
import numpy as np
FLAGS = None
next_random = 1
def subsampling_bool(freq, sampling):
global next_random
next_random = (next_random * 25214903917 + 11) & 0xFFFF
prob = (math.sqrt(sampling/freq)+sampling/freq)
return next_random/65536.0 - (math.sqrt(sampling/freq)+sampling/freq)
# return 1. - (math.sqrt(sampling/freq)+sampling/freq)
def subsample(label_dir, label_list, sampling, min_count):
word_dic = Counter()
count = 0
for l_file in label_list:
with open(os.path.join(label_dir, l_file), 'r') as f_l:
for line in f_l:
word = line.split(',')[0]
word_dic[word] += 1
count += 1
sorted_words = sorted(word_dic.items(), key=operator.itemgetter(1), reverse=True)
with open(os.path.join(label_dir, '../word_count'), 'w') as fout:
for word in sorted_words:
fout.write(word[0] + ', ' + str(word[1]) + '\n')
prob_dic = {}
for word in sorted_words:
if word[1] < min_count:
prob_dic[word[0]] = 1.
else:
prob_dic[word[0]] = subsampling_bool(word[1]/count, sampling)
if prob_dic[word[0]] < 0.:
prob_dic[word[0]] = 0.
return prob_dic
def main():
example_list = os.listdir(FLAGS.example_dir)
label_list = os.listdir(FLAGS.label_dir)
utter_list = os.listdir(FLAGS.utter_dir)
num_file = len(example_list)
subsampling_dic = subsample(FLAGS.label_dir, label_list, FLAGS.sampling, FLAGS.min_count)
subsampled_words = Counter()
for u_file, e_file, l_file in tqdm(zip(utter_list, example_list, label_list)):
count = 0
with open(os.path.join(FLAGS.example_dir, e_file), 'r') as f_e:
with open(os.path.join(FLAGS.label_dir, l_file), 'r') as f_l:
with open(os.path.join(FLAGS.utter_dir, u_file), 'r') as f_u:
with open(os.path.join(FLAGS.subsampled_example_dir, e_file), 'w') as f_out_e:
with open(os.path.join(FLAGS.subsampled_label_dir, l_file), 'w') as f_out_l:
with open(os.path.join(FLAGS.subsampled_utter_dir, u_file), 'w') as f_out_u:
for u, e, l in zip(f_u, f_e, f_l):
count += 1
label = l.split(',')[0]
utter = u.split(',')[0]
spk = utter.split('-')[0]
context_labels = l[:-1].split(',')[1].split()
write_bool = True
for c_l in context_labels:
if subsampling_dic[c_l] == 1.:
write_bool = False
break
if write_bool == False:
continue
prob = subsampling_dic[label]
if np.random.choice([True, False], p=[1-prob, prob]):
try:
f_out_e.write(e[:-1]+'\n')
f_out_l.write(l[:-1]+'\n')
# f_out_u.write(spk+'\n')
f_out_u.write(u[:-1]+'\n')
subsampled_words[label] += 1
except:
print (l)
print (count)
sorted_words = sorted(subsampled_words.items(), key=operator.itemgetter(1), reverse=True)
with open(os.path.join(FLAGS.label_dir, '../subsampled_word_count'), 'w') as fout:
for word in sorted_words:
fout.write(word[0] + ', ' + str(word[1]) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description =
'transform text format features into tfrecords')
parser.add_argument(
'example_dir',
metavar='<example dir>',
type=str,
help='example dir'
)
parser.add_argument(
'label_dir',
metavar='<label dir>',
type=str,
help='label dir'
)
parser.add_argument(
'utter_dir',
metavar='<utter dir>',
type=str,
help='utter dir'
)
parser.add_argument(
'subsampled_example_dir',
metavar='<subsampled example dir>',
type=str,
help='subsampled_example_dir'
)
parser.add_argument(
'subsampled_label_dir',
metavar='<subsampled label dir>',
type=str,
help='subsampled_label_dir'
)
parser.add_argument(
'subsampled_utter_dir',
metavar='<subsampled utter dir>',
type=str,
help='subsampled_utter_dir'
)
parser.add_argument(
'sampling',
metavar='<subsampling factor>',
type=float,
help='subsampling factor'
)
parser.add_argument(
'min_count',
metavar='<min count>',
type=int,
help='min count'
)
parser.add_argument(
'--feats_dim',
metavar='<feats-dim>',
type=int,
default=256,
help='feature dimension'
)
parser.add_argument(
'--norm_var',
metavar='<True|False>',
type=bool,
default=False,
help='Normalize Variance of each sentence'
)
parser.add_argument(
'--norm_mean',
metavar='<True|False>',
type=bool,
default=False,
help='Normalize mean of each sentence'
)
FLAGS = parser.parse_args()
main()
| 35.57485
| 104
| 0.508669
| 681
| 5,941
| 4.232012
| 0.18649
| 0.038862
| 0.064886
| 0.04372
| 0.386884
| 0.236641
| 0.217557
| 0.138793
| 0.11381
| 0.079806
| 0
| 0.015629
| 0.375358
| 5,941
| 166
| 105
| 35.789157
| 0.760981
| 0.016496
| 0
| 0.220779
| 0
| 0
| 0.107534
| 0.011644
| 0
| 0
| 0.001027
| 0
| 0
| 1
| 0.019481
| false
| 0
| 0.058442
| 0
| 0.090909
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aed4b5966f761ef3662819b99d5360ad4611d44f
| 819
|
py
|
Python
|
scripts/hofstede_csv_to_dict.py
|
tuejari/yoshi-2
|
2247e2c2820928c0e8ecd1b535a72ca74a5c5281
|
[
"Apache-2.0"
] | 2
|
2021-12-02T14:05:40.000Z
|
2021-12-27T08:49:48.000Z
|
scripts/hofstede_csv_to_dict.py
|
tuejari/yoshi-2
|
2247e2c2820928c0e8ecd1b535a72ca74a5c5281
|
[
"Apache-2.0"
] | null | null | null |
scripts/hofstede_csv_to_dict.py
|
tuejari/yoshi-2
|
2247e2c2820928c0e8ecd1b535a72ca74a5c5281
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
# Read the Hofstede indices into a pandas dataframe
data = pd.read_csv("..\\data\\Hofstede Insights - Manual 2021-05-13.csv", delimiter=",", index_col="country")
# Transform all data in the dataframe to strings
data["pdi"] = data["pdi"].astype(str)
data["idv"] = data["idv"].astype(str)
data["mas"] = data["mas"].astype(str)
data["uai"] = data["uai"].astype(str)
result = ""
for country, row in data.iterrows():
# Generate the C# code to add the Hofstede metrics to a dictionary of the form:
# Dictionary<string, (int Pdi, int Idv, int Mas, int Uai)>
result += "{ \"" + country.lower() + "\", (" + row["pdi"] + ", " + row["idv"] + ", " + row["mas"] + ", " + row["uai"] + ") },\n"
# Print the result so we can copy the generated c# code from the console
print(result)
| 40.95
| 132
| 0.62149
| 120
| 819
| 4.225
| 0.475
| 0.071006
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01203
| 0.188034
| 819
| 20
| 133
| 40.95
| 0.750376
| 0.3663
| 0
| 0
| 0
| 0
| 0.261719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aed8dca665c2fddde4b08f8f8f84ab30b49b9928
| 10,397
|
py
|
Python
|
classes/App.py
|
JeanExtreme002/Aim-Coach
|
25369e036073bc1fe95efdcad34b6648b1498672
|
[
"BSD-3-Clause"
] | 1
|
2019-07-31T18:21:10.000Z
|
2019-07-31T18:21:10.000Z
|
classes/App.py
|
JeanExtreme002/Aim-Training
|
25369e036073bc1fe95efdcad34b6648b1498672
|
[
"BSD-3-Clause"
] | null | null | null |
classes/App.py
|
JeanExtreme002/Aim-Training
|
25369e036073bc1fe95efdcad34b6648b1498672
|
[
"BSD-3-Clause"
] | null | null | null |
from classes.Display import Display
from classes.FinalScoreboard import FinalScoreboard
from classes.Sounds import Sounds
from classes.TargetArea import TargetArea
from classes.Target import Target
from classes.Text import Text
from classes.Timer import Timer
from time import time
import pygame
class App(object):
"""
Main Class
"""
BORDER = 10
DISPLAY_COLOR = (100,100,100)
DISPLAY_GEOMETRY = [700,500]
DISPLAY_TITLE = "Aim Trainer"
FRAMES_PER_SECOND = 60
LIVES = 5
MISSING_SHOTS_DECREASES_LIFE = False
SCOREBOARD_AREA = 50
SCOREBOARD_COLOR = (255,255,255)
SCOREBOARD_FONT = ('Comic Sans MS', 21)
SCOREBOARD_FORMAT = "Hits: %i Accuracy: %.1f%% FPS: %i Targets: %.2f/s Lives: %i"
SCOREBOARD_LOCATION = [BORDER+1,10]
SOUNDS_BUFFER = 64
TARGET_ADD_TIME = 0.2
TARGET_AREA_COLORS = [(128,128,128),(148,148,148)]
TARGET_BORDER = 0
TARGET_AREA_GEOMETRY = [0+BORDER,SCOREBOARD_AREA+BORDER,DISPLAY_GEOMETRY[0]-BORDER,DISPLAY_GEOMETRY[1]-BORDER]
TARGET_COLORS = [(255,0,0),(255,255,255)]
TARGET_LIMIT_PER_SECOND = None
TARGET_RADIUS = 40
TARGETS_PER_SECOND = 1.8
TARGET_SPEED = 0.4
FINAL_SCOREBOARD_BACKGROUND_COLOR = (255,255,255)
FINAL_SCOREBOARD_BORDER = 5
FINAL_SCOREBOARD_BORDER_COLOR = (139,69,19)
FINAL_SCOREBOARD_FONT = ("Arial",40)
FINAL_SCOREBOARD_GEOMETRY = [TARGET_AREA_GEOMETRY[0]+50,TARGET_AREA_GEOMETRY[1]+50,TARGET_AREA_GEOMETRY[2]-50,TARGET_AREA_GEOMETRY[3]-50]
FINAL_SCOREBOARD_TEXT_COLOR = (80,80,80)
def __init__(self):
self.sounds = Sounds(self.SOUNDS_BUFFER)
pygame.init()
self.display = Display(
*self.DISPLAY_GEOMETRY,
self.DISPLAY_TITLE,
self.DISPLAY_COLOR
)
self.__surface = self.display.getSurface()
self.finalScoreboard = FinalScoreboard(
self.__surface,
*self.FINAL_SCOREBOARD_GEOMETRY,
self.FINAL_SCOREBOARD_FONT,
self.FINAL_SCOREBOARD_BORDER,
self.FINAL_SCOREBOARD_BORDER_COLOR,
self.FINAL_SCOREBOARD_TEXT_COLOR,
self.FINAL_SCOREBOARD_BACKGROUND_COLOR,
self.TARGET_COLORS
)
self.scoreboardText = Text(
self.__surface,
*self.SCOREBOARD_LOCATION,
text_font=self.SCOREBOARD_FONT,
text_color=self.SCOREBOARD_COLOR
)
self.targetArea = TargetArea(
self.__surface,
*self.TARGET_AREA_GEOMETRY,
self.TARGET_AREA_COLORS
)
self.__timer = Timer()
self.__clock = pygame.time.Clock()
def captureEvents(self):
"""
Method for capturing events and taking action based on them.
"""
for event in pygame.event.get():
# Verifica se houve um evento para fechar a janela do programa.
if event.type == pygame.QUIT:
self.__stop = True
break
# Verifica se uma tecla foi pressionada.
if event.type == pygame.KEYDOWN:
# Se a tecla pressionada foi "Esc", o programa será fechado.
if event.key == pygame.K_ESCAPE:
self.__stop = True
break
# Se a tecla pressionada foi "Enter" ou "Space", será criada uma nova
# sessão caso o usuário esteja na tela de fim de jogo.
elif event.key in [pygame.K_RETURN,pygame.K_SPACE]:
if not self.__start:
self.__start = True
# Se o botão "1" do mouse foi pressionado, será efetuado um disparo.
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
# Se uma sessão estiver em execução, será executado um som de tiro.
# Senão, o som a ser executado será de uma arma sem munição.
if self.__start:
self.sounds.playSound(self.sounds.shooting_sound)
else:
self.sounds.playSound(self.sounds.without_ammunition_sound)
continue
# Verifica se o tiro acertou algum alvo.
for target in self.__targets.copy():
# Obtém a posição (x,y) do tiro em relação ao alvo
hit = target.checkHit()
# Se acertou, o número de acertos aumentará e o alvo será removido.
if hit:
self.sounds.playSound(self.sounds.metal_hit_sound)
self.__shots.append(hit)
self.__targets.remove(target)
self.__hits += 1
return
# Se nenhum alvo foi acertado, o número de falhas aumentará e caso
# a opção para perda de vida por tiros perdidos esteja ativada,
# o usuário perderá uma vida na sessão.
if self.MISSING_SHOTS_DECREASES_LIFE:
self.__lives -= 1
self.__failures += 1
def createTarget(self):
"""
Method to create a target within the screen.
"""
target = Target(
surface = self.__surface,
area_geometry = self.TARGET_AREA_GEOMETRY,
radius=self.TARGET_RADIUS,
target_colors=self.TARGET_COLORS
)
self.__targets.append(target)
def gameOver(self):
"""
Method for creating an endgame screen.
"""
self.__start = False
# Obtém as informações da última sessão para inserir os dados no placar final.
hits = self.__hits
accuracy = FinalScoreboard.getAccuracy(self.__hits+self.__failures,self.__hits)
targets_per_second = self.__target_per_second
time = self.__timer.getTime()
shots = self.__shots.copy()
# Enquanto o usuário não tentar fechar o programa ou pressionar uma tecla para criar
# uma nova sessão, a tela de fim de jogo será desenhada.
while not self.__stop and not self.__start:
self.captureEvents()
self.display.drawDisplay()
self.targetArea.drawArea()
# Coloca instrução no área do placar para continuar, criando uma nova sessão.
self.scoreboardText.setText('GAME OVER: Click "Enter" or "Space" to continue.')
self.scoreboardText.drawText()
self.finalScoreboard.drawFinalScoreboard(hits,accuracy,targets_per_second,time,shots)
self.__clock.tick(self.FRAMES_PER_SECOND)
pygame.display.flip()
# Se o usuário pressionar uma botão para sair do programa, o mesmo fechará.
# Se o usuário pressionar uma tecla para continuar, uma nova sessão será criada.
if self.__stop:
pygame.quit()
else: self.run()
def run(self):
"""
Method to start a new session.
"""
self.__failures = 0
self.__hits = 0
self.__stop = False
self.__targets = []
self.__shots = []
self.__lives = self.LIVES
self.__target_per_second = self.TARGETS_PER_SECOND
self.__start = True
# Define a fonte para o placar
self.scoreboardText.setFont(self.SCOREBOARD_FONT)
# Inicia o cronômetro
self.__timer.start()
last_time_to_create_target = time()
last_time_to_add_tps = time()
# Enquanto o usuário não tentar fechar o programa e possuir vidas, a sessão
# continuará a ser executada.
while not self.__stop and self.__lives > 0:
self.captureEvents()
# Cria um novo alvo com base na quantidade de alvos por segundo.
if time() - last_time_to_create_target >= 1/self.__target_per_second:
self.createTarget()
last_time_to_create_target = time()
# Aumenta a quantidade de alvos por segundos.
if time() - last_time_to_add_tps >= self.TARGET_ADD_TIME:
if not self.TARGET_LIMIT_PER_SECOND or self.TARGET_LIMIT_PER_SECOND > self.__target_per_second:
self.__target_per_second += 1/self.__target_per_second/100
last_time_to_add_tps = time()
self.update()
# Se o programa saiu do "while" devido a chamada de um evento
# para fechar o programa, o programa será finalizado.
# Se este não foi o caso, quer dizer que a sessão atual encerrou e irá
# direto para a tela de fim de jogo.
if self.__stop:
pygame.quit()
else:
self.gameOver()
def setScore(self):
"""
Method for inserting updated information in the scoreboard.
"""
hits = self.__hits
accuracy = FinalScoreboard.getAccuracy(self.__hits+self.__failures,self.__hits)
fps = self.__clock.get_fps()
targets_per_second = self.__target_per_second
self.scoreboardText.setText(self.SCOREBOARD_FORMAT%(hits,accuracy,fps,targets_per_second,self.__lives))
def targetAnimation(self):
"""
Method for generating target animation.
"""
targets = self.__targets.copy()
targets.reverse()
for target in targets:
try:
# Caso não seja possível aumentar ainda mais o alvo,
# seu tamanho diminuirá.
if target.increase(self.TARGET_SPEED) == -1:
target.decreases(self.TARGET_SPEED)
target.drawTarget(border=self.TARGET_BORDER)
# Caso o alvo tenha diminuido até o limite, ele será removido
# e um som de alvo perdido será executado.
except ValueError:
self.sounds.playSound(self.sounds.target_loss_sound)
self.__targets.remove(target)
self.__lives -= 1
def update(self):
"""
Method for updating the graphics part of the program.
"""
self.setScore()
self.display.drawDisplay()
self.scoreboardText.drawText()
self.targetArea.drawArea()
self.targetAnimation()
self.__clock.tick(self.FRAMES_PER_SECOND)
pygame.display.flip()
| 33.756494
| 141
| 0.598346
| 1,205
| 10,397
| 4.922822
| 0.26639
| 0.028827
| 0.019724
| 0.022421
| 0.19791
| 0.109069
| 0.082266
| 0.054956
| 0.04147
| 0.04147
| 0
| 0.019442
| 0.32721
| 10,397
| 307
| 142
| 33.86645
| 0.828592
| 0.214485
| 0
| 0.22905
| 0
| 0.005587
| 0.018474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044693
| false
| 0
| 0.050279
| 0
| 0.26257
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeda62809d73ce75af8f774735efd74dd45b137f
| 3,501
|
py
|
Python
|
tests/conftest.py
|
kown7/pymergevcd
|
b4716b5aff49c7c496cae4f70fda0e5d52231e1b
|
[
"MIT"
] | 1
|
2021-02-27T21:22:16.000Z
|
2021-02-27T21:22:16.000Z
|
tests/conftest.py
|
kown7/pymergevcd
|
b4716b5aff49c7c496cae4f70fda0e5d52231e1b
|
[
"MIT"
] | 1
|
2021-10-21T18:44:59.000Z
|
2021-10-21T18:44:59.000Z
|
tests/conftest.py
|
kown7/pymergevcd
|
b4716b5aff49c7c496cae4f70fda0e5d52231e1b
|
[
"MIT"
] | null | null | null |
"""Provide test fixtures"""
import logging
import os
import pytest
import vcd
@pytest.fixture
def dummy_vcd_file(tmpdir):
"""Create vcd file with random data"""
filename = os.path.sep.join([str(tmpdir), 'test.vcd'])
with open(filename, 'w+') as fptr:
with vcd.VCDWriter(fptr, timescale=(10, 'ns'), date='today') as writer:
counter_var = writer.register_var('', 'dummyvar', 'integer',
size=8)
counter3_var = writer.register_var('a', 'dummyvara', 'integer')
counter_var = writer.register_var('a.b.c', 'counter', 'integer',
size=8)
for i in range(1000, 300000, 300):
timestamp = 0
for timestamp, value in enumerate(range(10, 200, 2)):
writer.change(counter_var, i + timestamp, value)
writer.change(counter3_var, i + timestamp, i % 42)
return filename
# pylint: disable=too-many-locals
@pytest.fixture
def src_merge_file(tmpdir):
"""Create two vcd files with random data and the expected, merged result"""
src1 = os.path.sep.join([str(tmpdir), 'src1.vcd'])
src2 = os.path.sep.join([str(tmpdir), 'src2.vcd'])
dest = os.path.sep.join([str(tmpdir), 'test_merged.vcd'])
with open(src1, 'w+') as ptr1, open(
src2, 'w+') as ptr2, open(dest, 'w+') as dest_fp:
with vcd.VCDWriter(
dest_fp, timescale=(10, 'ns'), date='today'
) as dest_wr, vcd.VCDWriter(
ptr1, timescale=(10, 'ns'), date='today'
) as src1_wr, vcd.VCDWriter(ptr2, timescale=(10, 'ns'),
date='today') as src2_wr:
counter_merge = dest_wr.register_var(src1[:-4], 'foobar',
'integer', size=32)
bartwo_merge = dest_wr.register_var(src1[:-4] + '.a', 'bar_two',
'reg', size=16)
counter8_merge = dest_wr.register_var(src1[:-4] + '.a', 'counter',
'integer', size=8)
lcounter_merge = dest_wr.register_var(src2[:-4], 'foobar2',
'integer', size=32)
logging.info('%s.a', src1[:-4])
counter_var1 = src1_wr.register_var('', 'foobar', 'integer',
size=32)
bartwo_var1 = src1_wr.register_var('a', 'bar_two', 'reg', size=16)
counter8_var1 = src1_wr.register_var('a', 'counter', 'integer',
size=8)
lcounter_var2 = src2_wr.register_var('', 'foobar2', 'integer',
size=32)
timestamp = 1
for i in range(1000, 10_000, 100):
src1_wr.change(counter_var1, timestamp, i)
src1_wr.change(bartwo_var1, timestamp, i % 2)
src1_wr.change(counter8_var1, timestamp, i % 256)
dest_wr.change(counter_merge, timestamp, i)
dest_wr.change(bartwo_merge, timestamp, i % 2)
dest_wr.change(counter8_merge, timestamp, i % 256)
if timestamp >= 20:
src2_wr.change(lcounter_var2, timestamp, i - 20)
dest_wr.change(lcounter_merge, timestamp, i - 20)
timestamp += 1
return (src1, src2, dest)
| 46.065789
| 79
| 0.509854
| 399
| 3,501
| 4.320802
| 0.255639
| 0.070186
| 0.060325
| 0.030162
| 0.358469
| 0.243039
| 0.105568
| 0.032483
| 0
| 0
| 0
| 0.054635
| 0.362182
| 3,501
| 75
| 80
| 46.68
| 0.717421
| 0.044844
| 0
| 0.145161
| 0
| 0
| 0.071514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.064516
| 0
| 0.129032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aedc557dd8c720698321e37793a35e36a2e095e9
| 1,408
|
py
|
Python
|
bot.py
|
tungr/CoeusBot
|
90bdc869a1f8c077a1f88dcf1335d20a19d49fee
|
[
"MIT"
] | null | null | null |
bot.py
|
tungr/CoeusBot
|
90bdc869a1f8c077a1f88dcf1335d20a19d49fee
|
[
"MIT"
] | null | null | null |
bot.py
|
tungr/CoeusBot
|
90bdc869a1f8c077a1f88dcf1335d20a19d49fee
|
[
"MIT"
] | null | null | null |
import discord, os
from discord.ext import commands
from discord.ext.commands import has_permissions, bot_has_permissions
from dotenv import load_dotenv
client = commands.Bot(command_prefix='-')
client.remove_command('help')
client.remove_command('reload')
# Loads a cog
@client.command()
@has_permissions(administrator=True)
@commands.is_owner()
async def load(ctx, extension):
await ctx.channel.purge(limit=1)
client.load_extension(f'cogs.{extension}')
await ctx.send(f"Loaded {extension} cog", delete_after=5)
# Unloads a cog
@client.command()
@has_permissions(administrator=True)
@commands.is_owner()
async def unload(ctx, extension):
await ctx.channel.purge(limit=1)
client.unload_extension(f'cogs.{extension}')
await ctx.send(f"Unloaded {extension} cog", delete_after=5)
# Reloads a cog
@client.command(aliases=['reload'])
@has_permissions(administrator=True)
@commands.is_owner()
async def _reload(ctx, extension):
await ctx.channel.purge(limit=1)
client.unload_extension(f'cogs.{extension}')
client.load_extension(f'cogs.{extension}')
await ctx.send(f"Reloaded {extension} cog", delete_after=5)
load_dotenv()
# Grabs cogs from cogs directory
for fname in os.listdir(os.getenv('COGS')):
if fname.endswith('.py'):
client.load_extension(f'cogs.{fname[:-3]}')
else:
print(f'Unable to load {fname[:-3]}')
client.run(os.getenv('TOKEN'))
| 30.608696
| 69
| 0.736506
| 200
| 1,408
| 5.07
| 0.315
| 0.08284
| 0.100592
| 0.09073
| 0.572978
| 0.478304
| 0.478304
| 0.478304
| 0.465483
| 0.374753
| 0
| 0.006452
| 0.119318
| 1,408
| 46
| 70
| 30.608696
| 0.81129
| 0.049716
| 0
| 0.416667
| 0
| 0
| 0.155172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aede0775f444057271755bf3520984120506d311
| 2,177
|
py
|
Python
|
setup.py
|
llaurabat91/topic-modelling-tools
|
9b53f52e5671005642faf065e993e19f0b249e5c
|
[
"MIT"
] | null | null | null |
setup.py
|
llaurabat91/topic-modelling-tools
|
9b53f52e5671005642faf065e993e19f0b249e5c
|
[
"MIT"
] | null | null | null |
setup.py
|
llaurabat91/topic-modelling-tools
|
9b53f52e5671005642faf065e993e19f0b249e5c
|
[
"MIT"
] | null | null | null |
import sys
from setuptools import setup
from setuptools.extension import Extension
### unit tests for this package
import topicmodel_tests
### set include dirs for numpy
try:
import numpy
except ImportError:
numpy_already_installed = False
from distutils.sysconfig import get_python_lib
include_numpy_dir = get_python_lib()+"/numpy/core/include"
else:
numpy_already_installed = True
include_numpy_dir = numpy.get_include()
### Cython - rebuild the .c from the .pyx file if there, or if not, just use the .c
try:
from Cython.Distutils import build_ext
## from Cython.Build import cythonize
except ImportError:
use_cython = False
else:
use_cython = True
cmdclass = { }
ext_modules = [ ]
if use_cython:
ext_modules += [
Extension("topicmodels.samplers.samplers_lda",
["topicmodels/samplers/samplers_lda.pyx"],
include_dirs=[
include_numpy_dir,
],
)
]
cmdclass.update({ 'build_ext': build_ext })
else:
ext_modules += [
Extension("topicmodels.samplers.samplers_lda",
["topicmodels/samplers/samplers_lda.c"],
include_dirs=[
include_numpy_dir,
],
)
]
setup(name = "topic-modelling-tools",
version="0.6dev",
author="Stephen Hansen",
url="https://github.com/alan-turing-institute/topic-modelling-tools",
author_email="stephen.hansen@economics.ox.ac.uk",
ext_modules=ext_modules,
packages=['topicmodels', 'topicmodel_tests', 'topicmodels.LDA', 'topicmodels.multimix','topicmodels.samplers'],
package_data={'topicmodels': ['*.txt']},
cmdclass=cmdclass,
license="LICENSE",
description = "Python library that performs Latent Dirichlet Allocation using Gibbs sampling.",
long_description = open("README.md").read(),
install_requires=[
"numpy >= 1.13.3",
"nltk >= 3.2.4",
"pandas >= 0.20.3",
"scipy >= 0.19.1",
"Cython >= 0.20.1"
],
test_suite = 'topicmodel_tests.my_test_suite'
)
| 28.272727
| 117
| 0.618741
| 242
| 2,177
| 5.384298
| 0.450413
| 0.038373
| 0.046048
| 0.092095
| 0.161167
| 0.121259
| 0.121259
| 0.121259
| 0.121259
| 0.121259
| 0
| 0.013174
| 0.2678
| 2,177
| 76
| 118
| 28.644737
| 0.804266
| 0.079467
| 0
| 0.295082
| 0
| 0
| 0.300703
| 0.111446
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.147541
| 0
| 0.147541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aedff3138bbb43e485ad12558c8ec12fb523b030
| 3,311
|
py
|
Python
|
game/tests/models/test_ownership.py
|
gafderks/monopoly-vue
|
4d18938508412476b19d205258a6339ff1f5975a
|
[
"MIT"
] | null | null | null |
game/tests/models/test_ownership.py
|
gafderks/monopoly-vue
|
4d18938508412476b19d205258a6339ff1f5975a
|
[
"MIT"
] | 172
|
2020-10-01T18:38:15.000Z
|
2022-03-28T19:20:11.000Z
|
game/tests/models/test_ownership.py
|
gafderks/monopoly-vue
|
4d18938508412476b19d205258a6339ff1f5975a
|
[
"MIT"
] | null | null | null |
import datetime
from unittest import mock
import pytz
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.test import TestCase
from game.models import Ownership, Game, Player, RealEstate
from game.tests.factories.ownership import OwnershipFactory
from game.tests.factories.player import PlayerFactory
from game.tests.factories.realestate import RealEstateFactory
class OwnershipTest(TestCase):
def test_create_ownership(self):
estate = RealEstateFactory()
player = PlayerFactory(game=estate.game)
my_ownership = Ownership(real_estate=estate, player=player)
my_ownership.full_clean()
my_ownership.save()
self.assertTrue(my_ownership in Ownership.objects.all())
self.assertEqual(my_ownership.real_estate, estate)
self.assertEqual(my_ownership.player, player)
def test_buy_timestamp(self):
estate = RealEstateFactory()
player = PlayerFactory(game=estate.game)
mocked = datetime.datetime(2021, 3, 3, 0, 2, 3, tzinfo=pytz.utc)
with mock.patch("django.utils.timezone.now", mock.Mock(return_value=mocked)):
my_ownership = Ownership(real_estate=estate, player=player)
my_ownership.save()
self.assertEqual(my_ownership.buy_timestamp, mocked)
def test_cannot_create_ownership_without_player(self):
ownership = Ownership(real_estate=RealEstateFactory())
with self.assertRaises(IntegrityError):
ownership.save()
def test_cannot_create_ownership_without_realestate(self):
ownership = Ownership(player=PlayerFactory())
with self.assertRaises(IntegrityError):
ownership.save()
def test_delete_ownership_does_not_delete_game_player_or_realestate(self):
ownership = OwnershipFactory()
player = ownership.player
real_estate = ownership.real_estate
game = ownership.player.game
self.assertEqual(player.game, real_estate.game)
ownership.delete()
self.assertTrue(ownership not in Ownership.objects.all())
self.assertTrue(game in Game.objects.all())
self.assertTrue(player in Player.objects.all())
self.assertTrue(real_estate in RealEstate.objects.all())
def test_cannot_player_and_realestate_from_different_game(self):
player = PlayerFactory()
estate = RealEstateFactory()
my_ownership = Ownership(player=player, real_estate=estate)
with self.assertRaises(ValidationError):
my_ownership.full_clean()
def test_delete_player_deletes_ownership(self):
ownership = OwnershipFactory()
ownership.save()
player = ownership.player
player.delete()
self.assertTrue(ownership not in Ownership.objects.all())
def test_delete_realestate_deletes_ownership(self):
ownership = OwnershipFactory()
ownership.save()
realestate = ownership.real_estate
realestate.delete()
self.assertTrue(ownership not in Ownership.objects.all())
def test_delete_game_deletes_ownership(self):
ownership = OwnershipFactory()
ownership.save()
game = ownership.player.game
game.delete()
self.assertTrue(ownership not in Ownership.objects.all())
| 39.416667
| 85
| 0.712776
| 364
| 3,311
| 6.299451
| 0.181319
| 0.052769
| 0.049717
| 0.045792
| 0.370693
| 0.358046
| 0.330571
| 0.254688
| 0.155255
| 0.109027
| 0
| 0.00341
| 0.20296
| 3,311
| 83
| 86
| 39.891566
| 0.865479
| 0
| 0
| 0.422535
| 0
| 0
| 0.007551
| 0.007551
| 0
| 0
| 0
| 0
| 0.211268
| 1
| 0.126761
| false
| 0
| 0.140845
| 0
| 0.28169
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aee3b41530ad1d83d280d311e7b96dac16a5ac08
| 12,327
|
py
|
Python
|
crop_rotator/core/classes.py
|
Bahusson/crop_rotator
|
c1d86d36ce1867a84b927708f92c62c7815250a4
|
[
"MIT"
] | 1
|
2021-05-08T07:04:45.000Z
|
2021-05-08T07:04:45.000Z
|
crop_rotator/core/classes.py
|
Bahusson/crop_rotator
|
c1d86d36ce1867a84b927708f92c62c7815250a4
|
[
"MIT"
] | 80
|
2020-11-18T20:35:12.000Z
|
2021-06-13T08:08:36.000Z
|
crop_rotator/core/classes.py
|
Bahusson/crop_rotator
|
c1d86d36ce1867a84b927708f92c62c7815250a4
|
[
"MIT"
] | null | null | null |
from .snippets import (
remove_repeating,
flare,
list_appending_long,
level_off,
)
import itertools
import copy
from core.models import RotatorAdminPanel
class PageLoad(object):
"""Zwraca tyle języków ile mamy zainstalowane
w ustawieniach w zakładce LANGUAGES w formacie naprzemiennym
pasującym do wzornika z dwoma wyjściowymi
(ID_Języka, Ścieżka_Flagi_Języka), oraz
Ładuje wszystkie podstawowe elementy w widoku strony."""
def __init__(self, *args):
lang_id = []
langsl = []
a = args[0]
b = args[1]
self.langs = []
locations = list(a.objects.all())
self.items = locations[0]
for item in b:
lang_id.append("lang_flag_" + str(item[0]))
x = len(lang_id) - 1
y = 0
while x + 1 > 0:
z = self.items.__dict__[lang_id[y]]
langsl.append(z)
x = x - 1
y = y + 1
self.langs = zip(lang_id, langsl)
# Funkcji używaj jeśli chcesz używać zmiennych skórek.
# Defaultuje do 0 jeśli nie wybierzesz żadnej.
def page_dress(self, **kwargs):
c = 0
s = kwargs["skins"]
if "choice" in kwargs:
c = int(kwargs["choice"])
self.skins = list(s.objects.all())
self.skin = self.skins[c]
self.skinctx = {
"skin": self.skin,
}
return self.skinctx
# Funkcja tworzy za nas podstawowy kontekst,
# który rozszerza się o dany w funkcji.
def lazy_context(self, **kwargs):
self.context = {
"items": self.items,
"langs": self.langs,
}
if "skins" in kwargs:
self.page_dress(**kwargs)
self.context.update(self.skinctx)
if "context" in kwargs:
self.context.update(kwargs["context"])
return self.context
# Nakładka na django do obsługi dodatku tłumaczeniowego django-modeltranslation.
# Bo tak jest po prostu łatwiej...
class PageElement(object):
def __init__(self, *args, **kwargs):
self.x = args[0]
self.listed = list(self.x.objects.all()) # Lista obiektów
self.allelements = self.x.objects.all() # Wszystkie obiekty
self.elements = self.x.objects # Obiekty
self.baseattrs = self.listed[0] # Pierwsze obiekty na liście
# Elementy po Id.
def by_id(self, **kwargs):
G404 = kwargs["G404"]
x_id = kwargs["id"]
one_by_id = G404(self.x, pk=x_id)
return one_by_id
# Klasa ładuje stronę po dodaniu opcji typu panel admina.
class PortalLoad(PageLoad):
def __init__(self, *args):
super().__init__(*args)
menus = args[2]
advert = args[3]
self.adverts = advert.objects.all()
self.adverts_listed = list(advert.objects.all())
self.menu = list(menus.objects.all())[0]
if len(self.adverts_listed) == 0:
self.adverts = False
def page_dress(self, **kwargs):
super().page_dress(**kwargs)
def lazy_context(self, **kwargs):
self.context = {
"items": self.items,
"langs": self.langs,
"menu": self.menu,
"adverts": self.adverts,
}
if "skins" in kwargs:
self.page_dress(**kwargs)
self.context.update(self.skinctx)
if "context" in kwargs:
self.context.update(kwargs["context"])
return self.context
# Klasa liczy interakcje w crop plannerze - (klasa-slave)
class PlannerRelationship(object):
def __init__(self, *args, **kwargs):
self.top_tier = kwargs['top_tier']
self.a = kwargs['a']
self.b = kwargs['b']
self.ifdict = {
"crop_to_crop": self.a[4].crop_relationships.filter(
about_crop__id=self.b[4].id),
}
self.seasondict = {
0: None,
1: "Summer",
2: "Winter",
}
def finishing(self, **kwargs):
interactiondict = {
# Interakcje po subkrokach:
0: [0, 0, True], # Współrzędne
1: [0, 1, True], # Allelopatyczne / Współrzędne i następcze
2: [1, 1, True], # Następcze
# Interakcje po krokach:
3: [2, 2, False], # W całym drugim roku
4: [3, 3, False], # W całym trzecim roku
5: [1, 2, False], # W pierwszym i drugim roku
6: [1, 1, False], # W całym następnym roku
7: [2, 3, False], # W drugim i trzecim roku
}
signdict = {1:False, 2:True}
self.given_list = kwargs['given_list']
season = self.seasondict[self.i.season_of_interaction]
if season == "Summer" or season is None:
if interactiondict[self.i.type_of_interaction][2]:
if (
self.a[3][1] == self.b[3][1] - interactiondict[self.i.type_of_interaction][0]
or self.a[3][1] == self.b[3][1] - interactiondict[self.i.type_of_interaction][1]
):
level_off(self.top_tier, self.a, self.b)
if self.i.interaction_sign != 0:
self.given_list.append(self.a + self.b + [signdict[self.i.interaction_sign]])
return self.given_list
else:
if (
self.a[3][0] == self.b[3][0] - interactiondict[self.i.type_of_interaction][0]
or self.a[3][0] == self.b[3][0] - interactiondict[self.i.type_of_interaction][1]
):
if self.i.interaction_sign != 0:
self.given_list.append(self.a + self.b + [signdict[self.i.interaction_sign]])
return self.given_list
def relationship(self, **kwargs):
if self.ifdict[kwargs['relationship']].exists():
for self.i in self.ifdict[kwargs['relationship']]:
self.finishing(given_list=kwargs['given_list'])
return self.given_list
# Klasa obchodzi błędy związane z używaniem wzornika
# CropPlanner tam gdzie nie potrzeba analizować treści.
class DummyCropPlanner(object):
def __init__(self, *args, **kwargs):
plan_id = kwargs['plan_id']
self.pe_rp_id = args[0]
self.pe_rs = args[1].objects.filter(from_plan=plan_id)
self.pe_rss = args[3].objects.filter(from_step__from_plan=plan_id)
err_crop_list = []
tabs = []
self.error_family_crops = {
"e_crops": err_crop_list,
"e_tabs": tabs,
}
listed_pe_rs = list(self.pe_rs)
top_tier_list = []
for item in listed_pe_rs:
top_tier_list.append(item.order)
top_tier_list.sort()
self.top_tier = top_tier_list[-1]
def basic_context(self, **kwargs):
self.context = {
"efcs": self.error_family_crops,
"plan": self.pe_rp_id,
"steps": self.pe_rs,
"substeps": self.pe_rss,
"top_tier": self.top_tier,
}
self.context.update(kwargs['context'])
return self.context
def top_tier(self):
return self.top_tier
# Klasa analizuje płodozmian pod kątem błędów i synergii.
class CropPlanner(object):
def __init__(self, *args, **kwargs):
plan_id = kwargs['plan_id']
self.pe_rp_id = args[0]
self.pe_rs = args[1].objects.filter(from_plan=plan_id)
self.pe_rss = args[3].objects.filter(from_step__from_plan=plan_id)
rss_object = args[3]
listed_pe_rs = list(self.pe_rs)
len_listed_pe_rs = len(listed_pe_rs)
cooldown_list = []
fabacae = []
top_tier_list = []
sub_index = 0
sub_index_2 = 0
self.substep_indices = []
for item in listed_pe_rs:
pe_rss_pack = args[3].objects.filter(from_step=item)
rss_list = []
for sub_item in pe_rss_pack:
rss_list.append(sub_item)
sub_index_2 += 1
self.substep_indices.append((sub_item, sub_index_2))
i4 = item.order
top_tier_list.append(i4)
vars = [cooldown_list, item, fabacae, sub_index]
sub_index = list_appending_long(rss_list, vars, rss_object)
cooldown_list.sort()
top_tier_list.sort()
self.clw = False
error_len_crops = []
cooldown_list1 = copy.deepcopy(cooldown_list)
self.top_tier = top_tier_list[-1]
for item in cooldown_list1:
item[3][0] += self.top_tier
cooldown_list2 = cooldown_list + cooldown_list1
err_tab_list = []
err_crop_list = []
crop_interaction_list = []
for item in cooldown_list:
if item[0] > len_listed_pe_rs:
error_len_crops.append(item[1])
self.clw = args[2].objects.filter(id__in=error_len_crops)
if not self.clw:
for a, b in itertools.permutations(cooldown_list2, 2):
if a[2] == b[2] and a[0] < b[0] and a[0]!=0 and b[0]!=0:
a[0]=b[0]
if a[2] == b[2] and a[3][0] - b[3][0] < a[0] and a[3][0] - b[3][0] > 0:
level_off(self.top_tier, a, b)
err_tab_list.append(a[3][0])
err_tab_list.append(b[3][0])
err_crop_list.append(a + b)
err_crop_list.append(b + a)
pr = PlannerRelationship(top_tier=self.top_tier, a=a, b=b)
pr.relationship(
given_list=crop_interaction_list,
relationship="crop_to_crop")
fabs = []
tabs = []
self.interactions = []
remove_repeating(fabs, fabacae)
remove_repeating(tabs, err_tab_list)
remove_repeating(self.interactions, crop_interaction_list)
fabs_percent = float(len(fabs)) / float(self.top_tier * 3)
fabs_rounded = round(fabs_percent, 3)
self.fabs_error = False
if fabs_rounded >= 0.25 and fabs_rounded <= 0.33:
pass
else:
self.fabs_error = int(fabs_rounded * 100)
self.fabs_error = str(self.fabs_error) + "%"
self.error_family_crops = {
"e_crops": err_crop_list,
"e_tabs": tabs,
}
def basic_context(self, **kwargs):
self.context = {
"subs_indices": self.substep_indices,
"interactions": self.interactions,
"f_error": self.fabs_error,
"efcs": self.error_family_crops,
"cr_len_warning": self.clw,
"plan": self.pe_rp_id,
"steps": self.pe_rs,
"substeps": self.pe_rss,
"top_tier": self.top_tier,
}
self.context.update(kwargs['context'])
return self.context
def top_tier(self):
return self.top_tier
def count_sources_pages(main_source):
sourcelist = []
for source in PageElement(main_source).allelements:
sourcelist.append([source.at_data_string, str(source.pages_from), str(source.pages_to)])
sourcelist1 = []
remove_repeating(sourcelist1, sourcelist)
sourcelist2 = copy.deepcopy(sourcelist1)
sourcelist3 = []
# Niewydajne - popraw!
for source in sourcelist1:
for source_bis in sourcelist2:
if source[0] == source_bis[0]:
if any(source[0] in sl for sl in sourcelist3):
for sublist in sourcelist3:
if source[0] in sublist:
if source[2] == "None":
sublist[1].append((source[1],))
else:
sublist[1].append((source[1], source[2]))
else:
sourcelist3.append([source[0], [(source[1], source[2])]])
sourcelist4 = []
for source in sourcelist3:
newsource = []
remove_repeating(newsource, source[1])
newsource.sort()
sourcelist4.append([source[0], newsource])
flare(sourcelist4)
# Do ładowania po raz pierwszy na serwer.
try:
edit_delay_sec = PageElement(RotatorAdminPanel).baseattrs.evaluated_plan_cooldown
lurk_delay_min = PageElement(RotatorAdminPanel).baseattrs.lurk_plan_cooldown
except:
edit_delay_sec = 60
lurk_delay_min = 15
| 35.834302
| 101
| 0.564695
| 1,527
| 12,327
| 4.354289
| 0.199083
| 0.027373
| 0.019853
| 0.013536
| 0.332381
| 0.283802
| 0.258084
| 0.21898
| 0.21898
| 0.21898
| 0
| 0.021523
| 0.321571
| 12,327
| 343
| 102
| 35.938776
| 0.773526
| 0.095319
| 0
| 0.319588
| 0
| 0
| 0.032784
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061856
| false
| 0.003436
| 0.013746
| 0.006873
| 0.134021
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aee4f9bbfdac79e9118eae4558fc269abff34caa
| 11,972
|
py
|
Python
|
ai/RLPlayer.py
|
Unn20/achtung_die_kurve
|
e2dbb1752c070cfc398e415d5a427384c0230f3c
|
[
"MIT"
] | null | null | null |
ai/RLPlayer.py
|
Unn20/achtung_die_kurve
|
e2dbb1752c070cfc398e415d5a427384c0230f3c
|
[
"MIT"
] | null | null | null |
ai/RLPlayer.py
|
Unn20/achtung_die_kurve
|
e2dbb1752c070cfc398e415d5a427384c0230f3c
|
[
"MIT"
] | null | null | null |
import copy
import math
import random
import numpy as np
import torch
from game.Player import Player
class RLPlayer(Player):
def __init__(self, name, game_state, action_space, parameters={}):
super().__init__(name)
print(f"Player {name} parameters: {parameters}")
self.action_space = action_space
self.device = parameters["device"] if "device" in parameters else 'cpu'
self.map_extracter = MapFeatureExtractor(164, 164, 256).to(self.device)
observation = self.game_state_to_observation(game_state)
self.agent = NeuralQLearningAgent(len(observation), action_space, parameters=parameters)
def action(self, game_state, learning=False):
observation = self.game_state_to_observation(game_state)
action = self.agent.get_action(observation, learning)
return self.action_space[action]
def process_transition(self, game_state, action, reward, next_game_state, done):
observation = self.game_state_to_observation(game_state)
next_observation = self.game_state_to_observation(next_game_state)
if action == "left":
action = 0
elif action == "right":
action = 2
else:
action = 1
self.agent.process_transition(observation, action, reward, next_observation, done)
def game_state_to_observation(self, game_state):
# player_features = ["x", "y", "direction", "speed", "turn_speed", "marker_size", "no_clip"]
player_features = ["x", "y", "direction"]
observation = []
my_player = game_state["players"][self.player_index]
observation = observation + [float(my_player[feature]) for feature in player_features]
observation[0] = observation[0] / 500
observation[1] = observation[1] / 500
observation[2] = observation[2] / 360
# min_dist_to_border = np.min([my_player["x"], (500 - my_player["x"]), my_player["y"], (500 - my_player["y"])])
#
# observation.append(min_dist_to_border)
x, y = my_player["x"], my_player["y"]
radius = 60
board = game_state["board"]
features = []
for angle in [math.radians(my_player["direction"] + (a - 135)) for a in np.linspace(0, 270, 13)]:
coords = [round(x + radius * math.sin(angle)), round(y + radius * math.cos(angle))]
interpolated = np.linspace((x, y), coords, 10)
try:
is_obstacle = any([board[round(p[0]), round(p[1])] > 0 for p in interpolated[1:]])
except IndexError:
is_obstacle = True
features.append(float(is_obstacle))
observation += features
# board = game_state["board"].astype(np.float32)
# board[board > 0] = 255.0
# board = resize(board, (164, 164))
# board[0, :] = 255.0
# board[-1, :] = 255.0
# board[:, 0] = 255.0
# board[:, -1] = 255.0
#
# board_tensor = torch.from_numpy(board)
# board_tensor = board_tensor.view(1, 1, board_tensor.shape[0], board_tensor.shape[1]).to(self.device)
# map_features = self.map_extracter.forward(board_tensor)
# observation = observation + map_features.squeeze().tolist()
return np.array(observation, dtype=np.float32)
def save_model_weights(self, output_path):
torch.save(self.agent.q_dash.state_dict(), output_path + "Q")
torch.save(self.map_extracter.state_dict(), output_path + "me")
def load_model_weights(self, path, learning=False):
self.agent.q_dash.load_state_dict(torch.load(path + "Q"))
self.map_extracter.load_state_dict(torch.load(path + "me"))
if not learning:
self.agent.q_dash.eval()
self.map_extracter.eval()
class MapFeatureExtractor(torch.nn.Module):
def __init__(self, map_width, map_height, output_features, hidden_count=128):
super(MapFeatureExtractor, self).__init__()
self.cnn_layers = torch.nn.Sequential(
torch.nn.Conv2d(1, 8, kernel_size=5, stride=1, padding=1),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Conv2d(8, 16, kernel_size=5, stride=1, padding=1),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=1),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=1),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
)
self.linear_layers = torch.nn.Sequential(
torch.nn.Linear(4096, output_features)
)
# Defining the forward pass
def forward(self, x):
x = self.cnn_layers(x)
x = x.view(x.size(0), -1)
x = self.linear_layers(x)
return x
class ActionApproximation(torch.nn.Module):
def __init__(self, state_observations_count, action_count, hidden_count=512):
super(ActionApproximation, self).__init__()
self.ReLU = torch.nn.ReLU()
self.dense0 = torch.nn.Linear(state_observations_count, hidden_count)
self.dense1 = torch.nn.Linear(hidden_count, hidden_count)
self.dense2 = torch.nn.Linear(hidden_count, action_count)
def forward(self, x):
x = x.float()
x = self.dense0(x)
x = self.ReLU(x)
x = self.dense1(x)
x = self.ReLU(x)
x = self.dense2(x)
return x
class Agent:
def __init__(self):
pass
def process_transition(self, observation, action, reward, next_observation, done):
raise NotImplementedError()
def get_action(self, observation, learning):
raise NotImplementedError()
class NeuralQLearningAgent(Agent):
def __init__(self, observation_len, action_space, parameters={}):
super().__init__()
# torch.manual_seed(42)
learning_episodes = parameters["learning_episodes"] if "learning_episodes" in parameters else 200
self.device = parameters["device"] if "device" in parameters else 'cpu'
self.action_space = action_space
# PARAMETERS
self.network_freezing = parameters["network_freezing"] if "network_freezing" in parameters else True
self.double_q_learning = parameters["double_q_learning"] if "double_q_learning" in parameters else True
self.batch_learning = parameters["batch_learning"] if "batch_learning" in parameters else True
self.initial_epsilon = parameters["epsilon"] if "epsilon" in parameters else 0.7
self.epsilon = self.initial_epsilon
self.gamma = parameters["gamma"] if "gamma" in parameters else 0.99
self.learning_rate = parameters["lr"] if "lr" in parameters else 0.001
self.memory_size = parameters["memory_size"] if "memory_size" in parameters else 10000
self.memory_start_learning = 1000
self.batch_size = 128
self.batch_refresh_interval = 1
self.network_freezing_i = 3000
# ...........
self.q_dash = ActionApproximation(observation_len, len(action_space)).to(self.device)
if self.network_freezing or self.double_q_learning:
self.q_dash2 = copy.deepcopy(self.q_dash).to(self.device)
self.loss_function = torch.nn.MSELoss()
self.optimizer = torch.optim.Adam(self.q_dash.parameters(), lr=self.learning_rate)
self.exploration_weights = [1 / 3, 1 / 3, 1 / 3]
self.memory = []
self.memory_index = 0
self.epsilon_decay_parameter = math.log(
0.02) / learning_episodes # (learning_episodes - (learning_episodes // 4))
self.total_episode_reward = 0
self.total_reward_memory = []
self.max_episode_reward = 0
self.episodes_finished = 0
self.steps = 0
def update_approximator(self, batch):
observation, action, reward, next_observation, done = batch[:, 0], batch[:, 1], batch[:, 2], batch[:, 3], batch[
:, 4]
observation = torch.from_numpy(np.array(observation.tolist())).to(self.device)
next_observation = torch.from_numpy(np.array(next_observation.tolist())).to(self.device)
y_pred = self.q_dash.forward(observation)
action = torch.from_numpy(action[:, np.newaxis].astype(np.int64)).to(self.device)
score = torch.gather(y_pred, 1, action)
score = torch.squeeze(score, 1)
if self.double_q_learning:
y_n = self.q_dash.forward(next_observation).to(self.device)
action_n = torch.argmax(y_n, 1, keepdim=True)
y_next = self.q_dash2.forward(next_observation).to(self.device)
elif self.network_freezing:
y_next = self.q_dash2.forward(next_observation).to(self.device)
else:
y_next = self.q_dash.forward(next_observation).to(self.device)
done = done.astype(np.bool_)
y_next[done] = 0.0
reward = torch.from_numpy(reward.astype(np.float32)).to(self.device)
if self.double_q_learning:
score_next = torch.gather(y_next, 1, action_n)
score_next = torch.squeeze(score_next, 1)
target = reward + (self.gamma * score_next)
else:
target = reward + (self.gamma * torch.max(y_next, 1).values)
target = target.float()
self.optimizer.zero_grad()
loss = self.loss_function(score, target)
loss.backward()
self.optimizer.step()
if (self.network_freezing or self.double_q_learning) and self.steps % self.network_freezing_i == 0:
self.q_dash2.load_state_dict(self.q_dash.state_dict())
def process_transition(self, observation, action, reward, next_observation, done):
self.steps += 1
self.total_episode_reward += reward
if done:
self.episodes_finished += 1
self.max_episode_reward = max(self.total_episode_reward, self.max_episode_reward)
self.total_reward_memory.append(self.total_episode_reward)
self.total_episode_reward = 0
if self.epsilon > 0.05:
self.epsilon = self.initial_epsilon * math.exp(self.episodes_finished * self.epsilon_decay_parameter)
if self.episodes_finished % 50 == 0:
print(f"Episode={self.episodes_finished}, epsilon={round(self.epsilon, 4)}, \
total_steps={self.steps}, max_reward={round(self.max_episode_reward, 4)}, steps_per_episode={round(self.steps / self.episodes_finished, 2)}")
if self.batch_learning:
el = (observation, action, reward, next_observation, done)
if len(self.memory) < self.memory_size:
self.memory.append(el)
if len(self.memory) < self.memory_start_learning:
return
else:
self.memory[self.memory_index] = el
self.memory_index = (self.memory_index + 1) % self.memory_size
if self.steps % self.batch_refresh_interval == 0:
batch = np.array(random.sample(self.memory, self.batch_size), dtype=object)
else:
return
else:
# One element batch
batch = np.array((observation, action, reward, next_observation, done), dtype=object)[np.newaxis, :]
self.update_approximator(batch)
def get_action(self, observation, learning):
if learning and random.random() < self.epsilon:
action = random.choices([0, 1, 2], k=1)[0]
return action
observation = torch.from_numpy(observation).to(self.device)
y_pred = self.q_dash.forward(observation)
action = torch.argmax(y_pred).item()
return action
| 43.064748
| 157
| 0.628383
| 1,502
| 11,972
| 4.796272
| 0.156458
| 0.021377
| 0.021655
| 0.022488
| 0.339534
| 0.257496
| 0.167407
| 0.162965
| 0.143809
| 0.113964
| 0
| 0.026331
| 0.254511
| 11,972
| 277
| 158
| 43.220217
| 0.78084
| 0.069078
| 0
| 0.214286
| 0
| 0.004762
| 0.026524
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080952
| false
| 0.004762
| 0.028571
| 0
| 0.171429
| 0.009524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aee5927bf583c22f6d9d0f89495d9cdad0d60cd0
| 4,761
|
py
|
Python
|
Intervention_Scenarios/helpers/what_if_helpers.py
|
tomtuamnuq/covasim-dds
|
1e3ce8f9dda6908ca20040a3b532495de3bdc4c1
|
[
"Apache-2.0"
] | 2
|
2022-03-11T09:48:19.000Z
|
2022-03-20T09:06:31.000Z
|
Intervention_Scenarios/helpers/what_if_helpers.py
|
AzadehKSH/covasim-dds
|
8bbaf4ffbebb4904ea56142d40043d2259ec7f25
|
[
"Apache-2.0"
] | null | null | null |
Intervention_Scenarios/helpers/what_if_helpers.py
|
AzadehKSH/covasim-dds
|
8bbaf4ffbebb4904ea56142d40043d2259ec7f25
|
[
"Apache-2.0"
] | null | null | null |
from functools import partial
from typing import Tuple
import covasim as cv
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
def get_current_infected_ratio():
# Returns the current ratio of infected people in germany
number_infected = 651500 # https://www.deutschland.de/de/topic/politik/corona-in-deutschland-zahlen-und-fakten
number_total = 83100000 # https://www.destatis.de/DE/Themen/Gesellschaft-Umwelt/Bevoelkerung/Bevoelkerungsstand/_inhalt.html
infected_ratio = number_infected / number_total
return infected_ratio
delta_variant = cv.variant('delta', days=0) # delta is the dominant variant in germany
# Define baseline parameters
baseline_pars = dict(
start_day='2022-01-01',
n_days=60,
pop_type='hybrid',
pop_size=10_000,
pop_infected=int(get_current_infected_ratio() * 10000),
location='Germany',
use_waning=True, # use dynamically calculated immunity
n_beds_hosp=80, # https://tradingeconomics.com/germany/hospital-beds - 8 per 1000 people
n_beds_icu=62, # https://tradingeconomics.com/germany/icu-beds - 620 per 100.000 people
variants=[delta_variant],
)
def run_simulations(sim: cv.Sim, n_runs: int, confidence_level: float, method: str = "t") -> cv.MultiSim:
msim = cv.MultiSim(sim)
msim.run(n_runs=n_runs)
if method == "t": # use t-distribution
bounds = st.t.interval(alpha=confidence_level, df=n_runs - 1)[1]
else: # use normal distribution
bounds = st.norm.interval(alpha=confidence_level)[1]
bounds = bounds / np.sqrt(n_runs)
msim.mean(bounds=bounds)
return msim
def run_base_and_intervention(base_sim: cv.Sim, intervention_sim: cv.Sim, n_runs: int = 100,
confidence_level: float = 0.9) -> cv.MultiSim:
base_msim = run_simulations(base_sim, n_runs, confidence_level)
intervention_msim = run_simulations(intervention_sim, n_runs, confidence_level)
return cv.MultiSim([base_msim.base_sim, intervention_msim.base_sim])
# calculate by hand for reference
def calculate_mean_and_confidence(msim: cv.MultiSim, result_key: str, method: str = "t",
confidence_level: float = 0.9) -> Tuple[np.array, np.array, np.array]:
data = np.array([s.results[result_key] for s in msim.sims], dtype=float)
data_mean = np.mean(data, axis=0)
data_sem = st.sem(data, axis=0)
if method == "t":
conf_intervals = st.t.interval(alpha=confidence_level, df=data.shape[0] - 1, loc=data_mean, scale=data_sem)
else:
conf_intervals = st.norm.interval(alpha=confidence_level, loc=data_mean, scale=data_sem)
lower_band, upper_band = conf_intervals
return data_mean, lower_band, upper_band
# plot by hand for reference
def plot_with_bands(base_msim: cv.MultiSim, intervention_msim: cv.MultiSim, result_key: str, ax=None,
colors_base=("b", "c"), colors_intervention=("r", "tab:orange"), show_dates=False):
if ax is None:
_, ax = plt.subplots()
ax.set_title(result_key)
if show_dates:
x = base_msim.results['date']
else:
x = base_msim.results['t']
for sim, c in ((base_msim, colors_base), (intervention_msim, colors_intervention)):
data_mean, lower_band, upper_band = calculate_mean_and_confidence(sim, result_key)
ax.fill_between(x, lower_band, upper_band, alpha=.75, linewidth=0, label=f"{sim.label} band", color=c[1])
ax.plot(x, data_mean, label=sim.label, color=c[0])
if show_dates:
cv.date_formatter(sim=base_msim.base_sim, ax=ax)
else:
# show intervention as vertical line
for intervention in intervention_msim.base_sim.get_interventions():
intervention.plot_intervention(intervention_msim.base_sim, ax)
ax.legend()
return ax
def _inf_thresh(self: cv.Intervention, sim: cv.Sim, thresh: int):
''' Dynamically define on and off days with respect to the number of infected people.
See https://docs.idmod.org/projects/covasim/en/latest/tutorials/tut_interventions.html#Dynamic-triggering'''
if sim.people.infectious.sum() > thresh:
if not self.active:
self.active = True
self.t_on = sim.t
self.plot_days.append(self.t_on)
else:
if self.active:
self.active = False
self.t_off = sim.t
self.plot_days.append(self.t_off)
return [self.t_on, self.t_off]
def inf_thresh_callback(thresh: int = 500):
return partial(_inf_thresh, thresh=thresh)
def init_intervention_for_inf_thresh(c: cv.Intervention):
"""Setup attributes for `inf_thresh_callback`"""
c.t_on = np.nan
c.t_off = np.nan
c.active = False
c.plot_days = []
return c
| 38.707317
| 129
| 0.692502
| 690
| 4,761
| 4.572464
| 0.301449
| 0.042789
| 0.017433
| 0.035499
| 0.168621
| 0.117274
| 0.038035
| 0.017116
| 0
| 0
| 0
| 0.019639
| 0.197858
| 4,761
| 122
| 130
| 39.02459
| 0.806494
| 0.179164
| 0
| 0.1
| 0
| 0
| 0.01701
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.066667
| 0.011111
| 0.244444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aee5c643860aef69edc1c551fc556f5c1921368e
| 25,812
|
py
|
Python
|
src/nba_history/player_data.py
|
odonnell31/nba_history
|
bfcaffa265ee193f1faf4e6786ddc7d2cbfc9142
|
[
"MIT"
] | null | null | null |
src/nba_history/player_data.py
|
odonnell31/nba_history
|
bfcaffa265ee193f1faf4e6786ddc7d2cbfc9142
|
[
"MIT"
] | null | null | null |
src/nba_history/player_data.py
|
odonnell31/nba_history
|
bfcaffa265ee193f1faf4e6786ddc7d2cbfc9142
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 11:45:09 2021
@author: Michael ODonnell
@purpose: scrape NBA draft picks by year
"""
# import needed libraries
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
# function to scrape a list of years of NBA Drafts
def scrape_draft_data(start_year = 2017, end_year = 2020, export = True):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty dataframe
final_df = pd.DataFrame(columns = ['Pk', 'Tm', 'Player', 'College', 'Yrs',
'G', 'MP', 'PTS', 'TRB', 'AST','FG%',
'3P%', 'FT%', 'MP', 'PTS', 'TRB', 'AST',
'WS', 'WS/48', 'BPM', 'VORP', 'round',
'year'])
# scape one year at a time
for y in years:
# define URL of draft class
url = f'https://www.basketball-reference.com/draft/NBA_{y}.html'
# create bs4 object using requests and bs4
response = requests.get(url)
# if response code != 200, print and exit
if response.status_code != 200:
print("invalid url response code:", response.status_code)
break
html = response.text
soup = BeautifulSoup(response.content, features = 'lxml')
column_names = [th.getText() for th in soup.findAll('tr', limit=2)[1].findAll('th')]
table_rows = soup.findAll('tr')[0:]
draft_picks = [[td.getText() for td in table_rows[i].findAll('td')]
for i in range(len(table_rows))]
# function to find length of each draft round
def find_draft_rounds(draft_picks:list):
# this will store number of picks in each round
round_cutoffs = []
# find empty lists, they indicate new draft round
for index, value in enumerate(draft_picks[2:]):
if value == []:
round_cutoffs.append(index)
# since there are always 2 empty lists in a row, only use 2nd
round_cutoffs = round_cutoffs[::2]
# print the total number of round in draft class
print(f"total rounds of the {y} draft:", len(round_cutoffs)+1)
print(f"picks per round in {y} draft:", round_cutoffs[0])
return round_cutoffs
# call find_draft_rounds on the data
round_cutoffs = find_draft_rounds(draft_picks)
# remove empty rows from draft_picks
draft_picks = [e for e in draft_picks if len(e) > 10]
# create dataframe for all draft_picks
draft_picks_df = pd.DataFrame(draft_picks, columns = column_names[1:])
print(f"total draft picks in the {y} draft:", len(draft_picks_df["Pk"]))
# create column for draft round and draft year
draft_picks_df["round"] = 1
draft_picks_df["year"] = y
# change column Pk to integer
draft_picks_df["Pk"] = pd.to_numeric(draft_picks_df["Pk"])
# assign correct draft round to each row
for index, picks in enumerate(round_cutoffs):
draft_picks_df.loc[(draft_picks_df.Pk > picks), "round"] = int(index)+2
# add draft picks to final_df (with all draft picks)
try:
final_df = final_df.append(draft_picks_df)
print(f"draft year {y} added to final dataframe")
except:
print(f"error with draft year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(2)
# rename final_df columns
final_df = final_df.rename(columns = {final_df.columns[0]: "Pick",
final_df.columns[1]: "Team",
final_df.columns[4]: "Years",
final_df.columns[5]: "Career_Games",
final_df.columns[8]: "Career_Rb",
final_df.columns[9]: "Career_Ast",
final_df.columns[13]: "MPG",
final_df.columns[14]: "PPG",
final_df.columns[15]: "RbsPG",
final_df.columns[16]: "AstPG",
final_df.columns[7]: "Career_Pts",
final_df.columns[6]: "Career_Minutes"})
# export and return the dataframe
if export == True:
export_name = f"nba_draft_data_{start_year}_to_{end_year}" + ".csv"
final_df.to_csv(export_name, index = False)
return final_df
# function to scrape a list of years for NBA PLayer total stats
def scrape_player_total_stats(start_year = 2017, end_year = 2020,
export = True, sleep_time = 2):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty final dataframe to append to in for loop
player_total_stats = pd.DataFrame(columns = ['Player', 'Pos', 'Age', 'Tm', 'G',
'GS', 'MP', 'FG', 'FGA', 'FG%', '3P',
'3PA', '3P%', '2P', '2PA', '2P%',
'eFG%', 'FT', 'FTA', 'FT%', 'ORB',
'DRB', 'TRB', 'AST', 'STL', 'BLK',
'TOV', 'PF', 'PTS', 'year'])
# loop through each year in the list
for y in years:
# grab URLs for year y
totals_url = f'https://www.basketball-reference.com/leagues/NBA_{y}_totals.html'
# create bs4 object using requests and bs4
totals_response = requests.get(totals_url)
print(f"totals year {y} url response code:", totals_response.status_code)
html = totals_response.text
soup = BeautifulSoup(totals_response.content, features = 'lxml')
# grab table column names and rows
column_names = [th.getText() for th in soup.findAll('tr', limit=2)[0].findAll('th')]
table_rows = soup.findAll('tr')[0:]
player_stats = [[td.getText() for td in table_rows[i].findAll('td')]
for i in range(len(table_rows))]
# drop empty rows
player_stats = [e for e in player_stats if len(e) > 10]
# create dataframe for stats
player_stats_df = pd.DataFrame(player_stats, columns = column_names[1:])
# add year to dataframe
player_stats_df["year"] = y
print(len(player_stats_df['Player']), f"in the {y} season added to dataframe")
non_dup_stats = player_stats_df.drop_duplicates(subset = 'Player',
keep = 'first')
# quick pause before scraping next year
#print(f"pausing for {sleep_time} seconds")
time.sleep(sleep_time)
try:
player_total_stats = player_total_stats.append(non_dup_stats)
print(f"{y} total player stats data added to dataset")
print("length of total dataframe:", len(player_total_stats['Player']))
except:
print(f"error with year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(sleep_time*.5)
# export and return the dataframe
if export == True:
export_name = f"player_totals_{start_year}_to_{end_year}" + ".csv"
player_total_stats.to_csv(export_name, index = False)
return player_total_stats
# function to scrape a list of years for NBA PLayer per game stats
def scrape_player_per_game_stats(start_year = 2018, end_year = 2021,
export = True, sleep_time = 2):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty final dataframe to append to in for loop
player_per_game_stats = pd.DataFrame(columns = ['Player', 'Pos', 'Age', 'Tm', 'G',
'GS', 'MP', 'FG', 'FGA', 'FG%', '3P',
'3PA', '3P%', '2P', '2PA', '2P%',
'eFG%', 'FT', 'FTA', 'FT%', 'ORB',
'DRB', 'TRB', 'AST', 'STL', 'BLK',
'TOV', 'PF', 'PTS', 'year'])
# loop through each year in the list
for y in years:
# grab URLs for year y
per_game_url = f'https://www.basketball-reference.com/leagues/NBA_{y}_per_game.html'
# create bs4 object using requests and bs4
per_game_response = requests.get(per_game_url)
print(f"per game stats year {y} url response code:", per_game_response.status_code)
html = per_game_response.text
soup = BeautifulSoup(per_game_response.content, features = 'lxml')
# grab table column names and rows
column_names = [th.getText() for th in soup.findAll('tr', limit=2)[0].findAll('th')]
table_rows = soup.findAll('tr')[0:]
player_stats = [[td.getText() for td in table_rows[i].findAll('td')]
for i in range(len(table_rows))]
# drop empty rows
player_stats = [e for e in player_stats if len(e) > 10]
# create dataframe for stats
player_stats_df = pd.DataFrame(player_stats, columns = column_names[1:])
# add year to dataframe
player_stats_df["year"] = y
print(len(player_stats_df['Player']), f"in the {y} season added to dataframe")
non_dup_stats = player_stats_df.drop_duplicates(subset = 'Player',
keep = 'first')
# quick pause before scraping next year
#print(f"pausing for {sleep_time} seconds")
time.sleep(sleep_time)
try:
player_per_game_stats = player_per_game_stats.append(non_dup_stats)
print(f"{y} player per game stats data added to dataset")
print("length of total dataframe:", len(player_per_game_stats['Player']))
except:
print(f"error with year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(sleep_time)
# export and return the dataframe
if export == True:
export_name = f"player_per_game_{start_year}_to_{end_year}" + ".csv"
player_per_game_stats.to_csv(export_name, index = False)
return player_per_game_stats
# function to scrape a list of years for NBA PLayer total stats
def scrape_player_advanced_stats(start_year = 2019, end_year = 2021,
export = True, sleep_time = 2):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty final dataframe to append to in for loop
player_advanced_stats = pd.DataFrame(columns = ['Player', 'Pos', 'Age', 'Tm', 'G',
'MP', 'PER', 'TS%', '3PAr', 'FTr',
'ORB%', 'DRB%', 'TRB%', 'AST%', 'STL%',
'BLK%', 'TOV%', 'USG%', 'OWS', 'DWS',
'WS', 'WS/48', 'OBPM', 'DBPM', 'BPM',
'VORP', 'year'])
# loop through each year in the list
for y in years:
# grab URLs for year y
advanced_url = f'https://www.basketball-reference.com/leagues/NBA_{y}_advanced.html'
# create bs4 object using requests and bs4
advanced_url = requests.get(advanced_url)
print(f"per game stats year {y} url response code:", advanced_url.status_code)
html = advanced_url.text
soup = BeautifulSoup(advanced_url.content, features = 'lxml')
# grab table column names and rows
column_names = [th.getText() for th in soup.findAll('tr', limit=2)[0].findAll('th')]
table_rows = soup.findAll('tr')[0:]
player_stats = [[td.getText() for td in table_rows[i].findAll('td')]
for i in range(len(table_rows))]
# drop empty rows
player_stats = [e for e in player_stats if len(e) > 10]
# create dataframe for stats
player_stats_df = pd.DataFrame(player_stats, columns = column_names[1:])
# drop empty columns
player_stats_df = player_stats_df.drop(player_stats_df.columns[18],
axis = 1)
# add year to dataframe
player_stats_df["year"] = y
print(len(player_stats_df['Player']), f"in the {y} season added to dataframe")
non_dup_stats = player_stats_df.drop_duplicates(subset = 'Player',
keep = 'first')
# quick pause before scraping next year
#print(f"pausing for {sleep_time} seconds")
time.sleep(sleep_time)
try:
player_advanced_stats = player_advanced_stats.append(non_dup_stats,
sort=False)
print(f"{y} advanced player stats data added to dataset")
print("length of total dataframe:", len(player_advanced_stats['Player']))
except:
print(f"error with year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(sleep_time)
# export and return the dataframe
if export == True:
export_name = f"player_advanced_{start_year}_to_{end_year}" + ".csv"
player_advanced_stats.to_csv(export_name, index = False)
return player_advanced_stats
# function to scrape a list of years for NBA PLayer shooting stats
def scrape_player_shooting_stats(start_year = 2019, end_year = 2021,
export = True, sleep_time = 2):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty final dataframe to append to in for loop
player_shooting_stats = pd.DataFrame(columns = ['Player', 'Pos', 'Age', 'Tm',
'G', 'MP', 'FG%', 'Avg_Distance',
'3P_FGassisted%', '3-10_FG%',
'10-16_FG%', '16-3P_FG%', '3P_FG%',
'Dunk_attempt%', '3P_FGassisted%',
'3-10_FG%', '10-16_FG%', '16-3P_FG%',
'3P_FG%', 'Dunk_attempt%',
'3P_FGassisted%', 'Dunk_attempt%',
'Dunk_attempts', 'Heave_makes',
'Corener3_3P_attempt%', 'Corner3_FG%',
'Heave_attempts', 'Heave_makes',
'year'])
# loop through each year in the list
for y in years:
# grab URLs for year y
shooting_url = f'https://www.basketball-reference.com/leagues/NBA_{y}_shooting.html'
# create bs4 object using requests and bs4
shooting_url = requests.get(shooting_url)
print(f"per game stats year {y} url response code:", shooting_url.status_code)
html = shooting_url.text
soup = BeautifulSoup(shooting_url.content, features = 'lxml')
# grab table column names and rows
column_names = [th.getText() for th in soup.findAll('tr', limit=2)[1].findAll('th')]
table_rows = soup.findAll('tr')[0:]
player_stats = [[td.getText() for td in table_rows[i].findAll('td')]
for i in range(len(table_rows))]
# drop empty rows
player_stats = [e for e in player_stats if len(e) > 10]
# create dataframe for stats
player_stats_df = pd.DataFrame(player_stats, columns = column_names[1:])
# drop empty columns
player_stats_df = player_stats_df.drop(player_stats_df.columns[8],
axis = 1)
# rename columns
column_mapping = {player_stats_df.columns[7] : 'Avg_Distance',
player_stats_df.columns[8] : '2P_attempt%',
player_stats_df.columns[9] : '0-3_attempt%',
player_stats_df.columns[10] : '3-10_attempt%',
player_stats_df.columns[11] : '10-16_attempt%',
player_stats_df.columns[12] : '16-3P_attempt%',
player_stats_df.columns[13] : '3P_attempt%',
player_stats_df.columns[14] : '2P_FG%', 9 : '0-3_attempt%',
player_stats_df.columns[15] : '3-10_FG%',
player_stats_df.columns[16] : '10-16_FG%',
player_stats_df.columns[17] : '16-3P_FG%',
player_stats_df.columns[18] : '3P_FG%',
player_stats_df.columns[19] : '2P_FGassisted%',
player_stats_df.columns[20] : '3P_FGassisted%',
player_stats_df.columns[21] : 'Dunk_attempt%',
player_stats_df.columns[22] : 'Dunk_attempts',
player_stats_df.columns[24] : 'Corener3_3P_attempt%',
player_stats_df.columns[25] : 'Corner3_FG%',
player_stats_df.columns[26] : 'Heave_attempts',
player_stats_df.columns[27] : 'Heave_makes'}
player_stats_df = player_stats_df.rename(columns = column_mapping)
# add year to dataframe
player_stats_df["year"] = y
print(len(player_stats_df['Player']), f"in the {y} season added to dataframe")
non_dup_stats = player_stats_df.drop_duplicates(subset = 'Player',
keep = 'first')
# quick pause before scraping next year
#print(f"pausing for {sleep_time} seconds")
time.sleep(sleep_time)
try:
player_shooting_stats = player_shooting_stats.append(non_dup_stats,
sort=False)
print(f"{y} player shooting stats data added to dataset")
print("length of total dataframe:", len(player_shooting_stats['Player']))
except:
print(f"error with year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(sleep_time)
# export and return the dataframe
if export == True:
export_name = f"player_shooting_{start_year}_to_{end_year}" + ".csv"
player_shooting_stats.to_csv(export_name, index = False)
return player_shooting_stats
# function to scrape All Stars by year
def scrape_all_stars(export = True):
# grab wikipedia URL of all-stars
url = 'https://en.wikipedia.org/wiki/List_of_NBA_All-Stars'
# create bs4 object using requests and bs4
response = requests.get(url)
print(f"all-stars url response code:", response.status_code)
soup = BeautifulSoup(response.text, 'html.parser')
# grab full table
nba_table = soup.findAll('table')[1]
# turn table to dataframe
all_stars_df = pd.read_html(str(nba_table))
all_stars_df = pd.DataFrame(all_stars_df[0])
# add hall of fame denomination to dataframe
for idx, row in all_stars_df.iterrows():
if '*' in row["Player"]:
all_stars_df.loc[idx, "hall_of_fame"] = 1
all_stars_df.loc[idx, "active_player"] = 0
all_stars_df.loc[idx, "hof_eligible"] = 1
elif '^' in row["Player"]:
all_stars_df.loc[idx, "hall_of_fame"] = 0
all_stars_df.loc[idx, "active_player"] = 1
all_stars_df.loc[idx, "hof_eligible"] = 0
elif '†' in row["Player"]:
all_stars_df.loc[idx, "hall_of_fame"] = 0
all_stars_df.loc[idx, "active_player"] = 0
all_stars_df.loc[idx, "hof_eligible"] = 0
else:
all_stars_df.loc[idx, "hall_of_fame"] = 0
all_stars_df.loc[idx, "active_player"] = 0
all_stars_df.loc[idx, "hof_eligible"] = 1
# remove extra characters from PLayer columns
for c in "*^†":
all_stars_df["Player"] = all_stars_df["Player"].str.replace(c, '')
#for a in [r"[a]", r"[b]"]:
# #substring = f"[{a}]"
# all_stars_df["Player"] = all_stars_df["Player"].str.replace(a, '')
# delete extra columns
all_stars_df = all_stars_df.drop('Reference', 1)
# rename columns
# rename final_df columns
all_stars_df = all_stars_df.rename(columns =
{all_stars_df.columns[1]: "Selections",
all_stars_df.columns[2]: "Years"})
# export and return the dataframe
if export == True:
export_name = "nba_all_stars.csv"
all_stars_df.to_csv(export_name, index = False)
return all_stars_df
# function to scrape a list of years for NBA PLayer shooting stats
def scrape_player_salaries(start_year = 2015, end_year = 2016,
export = True, sleep_time = 2):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty final dataframe to append to in for loop
player_contracts = pd.DataFrame(columns = ['Player', 'Salary', 'Rank', 'Year'])
# loop through each year in the list
for y in years:
# grab URLs for year y
y1 = y+1
contracts_url = f'https://hoopshype.com/salaries/players/{y}-{y1}/'
# create bs4 object using requests and bs4
response = requests.get(contracts_url)
print(f"contracts year {y} url response code:", response.status_code)
html = response.text
soup = BeautifulSoup(html, features = 'html.parser')
# grab table column names and rows
salary_table = soup.find('table')
length=len(salary_table.find_all("td"))
players = [salary_table.find_all("td")[i].text.strip() for i in range(5,length,4)]
salaries = [salary_table.find_all("td")[i].text.strip() for i in range(6,length,4)]
# turn rows into dataframe
salary_df = pd.DataFrame({"Player" : players,
"Salary" : salaries,
"Rank" : [i for i in range(1, len(salaries)+1)]})
salary_df["Year"] = y
# add year to dataframe
print(len(salary_df['Player']), f"in the {y} season added to dataframe")
# quick pause before scraping next year
time.sleep(sleep_time)
try:
player_contracts = player_contracts.append(salary_df, sort=False)
print(f"{y} player contracts added to dataset")
print("length of total dataframe:", len(player_contracts['Player']))
except:
print(f"error with year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(sleep_time)
# export and return the dataframe
if export == True:
export_name = f"player_contracts_{start_year}_to_{end_year}" + ".csv"
player_contracts.to_csv(export_name, index = False)
return player_contracts
| 43.675127
| 200
| 0.535371
| 3,135
| 25,812
| 4.207974
| 0.100797
| 0.051698
| 0.04336
| 0.029109
| 0.698302
| 0.658657
| 0.628563
| 0.605973
| 0.5708
| 0.558065
| 0
| 0.019417
| 0.359523
| 25,812
| 591
| 201
| 43.675127
| 0.77843
| 0.15578
| 0
| 0.461095
| 0
| 0
| 0.156032
| 0.011534
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023055
| false
| 0
| 0.011527
| 0
| 0.057637
| 0.10951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aee96f06eaeca2b9830d780ade1fc0b516e69f02
| 1,915
|
py
|
Python
|
sqlalchemy/sqlalchemy-0.3.6+codebay/sqlalchemy/ext/assignmapper.py
|
nakedible/vpnease-l2tp
|
0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c
|
[
"WTFPL"
] | 5
|
2015-04-16T08:36:17.000Z
|
2017-05-12T17:20:12.000Z
|
sqlalchemy/sqlalchemy-0.3.6+codebay/sqlalchemy/ext/assignmapper.py
|
nakedible/vpnease-l2tp
|
0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c
|
[
"WTFPL"
] | null | null | null |
sqlalchemy/sqlalchemy-0.3.6+codebay/sqlalchemy/ext/assignmapper.py
|
nakedible/vpnease-l2tp
|
0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c
|
[
"WTFPL"
] | 4
|
2015-03-19T14:39:51.000Z
|
2019-01-23T08:22:55.000Z
|
from sqlalchemy import mapper, util, Query, exceptions
import types
def monkeypatch_query_method(ctx, class_, name):
def do(self, *args, **kwargs):
query = Query(class_, session=ctx.current)
return getattr(query, name)(*args, **kwargs)
setattr(class_, name, classmethod(do))
def monkeypatch_objectstore_method(ctx, class_, name):
def do(self, *args, **kwargs):
session = ctx.current
if name == "flush":
# flush expects a list of objects
self = [self]
return getattr(session, name)(self, *args, **kwargs)
setattr(class_, name, do)
def assign_mapper(ctx, class_, *args, **kwargs):
validate = kwargs.pop('validate', False)
if not isinstance(getattr(class_, '__init__'), types.MethodType):
def __init__(self, **kwargs):
for key, value in kwargs.items():
if validate:
if not key in self.mapper.props:
raise exceptions.ArgumentError("Invalid __init__ argument: '%s'" % key)
setattr(self, key, value)
class_.__init__ = __init__
extension = kwargs.pop('extension', None)
if extension is not None:
extension = util.to_list(extension)
extension.append(ctx.mapper_extension)
else:
extension = ctx.mapper_extension
m = mapper(class_, extension=extension, *args, **kwargs)
class_.mapper = m
class_.query = classmethod(lambda cls: Query(class_, session=ctx.current))
for name in ['get', 'select', 'select_by', 'selectfirst', 'selectfirst_by', 'selectone', 'get_by', 'join_to', 'join_via', 'count', 'count_by', 'options', 'instances']:
monkeypatch_query_method(ctx, class_, name)
for name in ['flush', 'delete', 'expire', 'refresh', 'expunge', 'merge', 'save', 'update', 'save_or_update']:
monkeypatch_objectstore_method(ctx, class_, name)
return m
| 43.522727
| 171
| 0.632898
| 224
| 1,915
| 5.169643
| 0.348214
| 0.046632
| 0.048359
| 0.062176
| 0.252159
| 0.160622
| 0.063903
| 0.063903
| 0.063903
| 0
| 0
| 0
| 0.236031
| 1,915
| 43
| 172
| 44.534884
| 0.791524
| 0.016188
| 0
| 0.052632
| 0
| 0
| 0.118554
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.052632
| 0
| 0.289474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeed0b8abb2aadc143c55c6677cfe9445bb8a6a9
| 1,316
|
py
|
Python
|
examples/dictation grammar example.py
|
onchiptech/pyjsgf
|
f7ff26323e5e602ea10e7d302610c2fcb46234d6
|
[
"MIT"
] | 40
|
2018-01-24T23:01:27.000Z
|
2022-01-19T03:33:37.000Z
|
examples/dictation grammar example.py
|
onchiptech/pyjsgf
|
f7ff26323e5e602ea10e7d302610c2fcb46234d6
|
[
"MIT"
] | 31
|
2018-03-01T07:58:27.000Z
|
2022-01-13T12:07:45.000Z
|
examples/dictation grammar example.py
|
onchiptech/pyjsgf
|
f7ff26323e5e602ea10e7d302610c2fcb46234d6
|
[
"MIT"
] | 21
|
2017-11-14T09:11:17.000Z
|
2022-02-02T15:32:57.000Z
|
"""
Example showing use of the jsgf.ext DictationGrammar class for matching and
compiling rules that use regular JSGF expansions like Literal and Sequence as
well as Dictation expansions.
"""
from jsgf import PublicRule, Sequence
from jsgf.ext import Dictation, DictationGrammar
def main():
# Create a simple rule using a Dictation expansion.
rule = PublicRule("Hello_X", Sequence("hello", Dictation()))
# Create a new DictationGrammar using the simple rule.
grammar = DictationGrammar([rule])
# Print the compiled grammar
print(grammar.compile())
# Match against some speech strings.
# find_matching_rules has an optional second parameter for advancing to
# the next part of the rule, which is set to False here.
matching = grammar.find_matching_rules("hello", False)
print("Matching rule: %s" % matching[0]) # first part of rule
# Go to the next part of the rule.
matching[0].set_next()
# Match the dictation part. This can be anything.
matching = grammar.find_matching_rules("world")
print("Matching rule: %s" % matching[0])
# The entire match and the original rule's current_match value will both be
'hello world'
print(matching[0].entire_match)
print(rule.expansion.current_match)
if __name__ == '__main__':
main()
| 31.333333
| 79
| 0.718085
| 181
| 1,316
| 5.116022
| 0.430939
| 0.038877
| 0.055076
| 0.028078
| 0.174946
| 0.105832
| 0.047516
| 0
| 0
| 0
| 0
| 0.003791
| 0.198328
| 1,316
| 41
| 80
| 32.097561
| 0.873934
| 0.492401
| 0
| 0.125
| 0
| 0
| 0.115207
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.1875
| 0.3125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aef2d0ce8b0f340445dbcd09415dd30f5aa7f265
| 758
|
py
|
Python
|
material/curso_em_video/ex085.py
|
sergiodealencar/courses
|
c9d86b27b0185cc82624b01ed76653dbc12554a3
|
[
"MIT"
] | null | null | null |
material/curso_em_video/ex085.py
|
sergiodealencar/courses
|
c9d86b27b0185cc82624b01ed76653dbc12554a3
|
[
"MIT"
] | null | null | null |
material/curso_em_video/ex085.py
|
sergiodealencar/courses
|
c9d86b27b0185cc82624b01ed76653dbc12554a3
|
[
"MIT"
] | null | null | null |
lista = [[], []]
valor = 0
for c in range(1, 8):
valor = int(input(f'Digite o {c}o. valor: '))
if valor % 2 == 0:
lista[0].append(valor)
else:
lista[1].append(valor)
print('-=' * 30)
print(f'Os valores pares digitados foram: {sorted(lista[0])}')
print(f'Os valores ímpres digitados foram: {sorted(lista[1])}')
# meu código (funcionou também):
# lista = [[], []]
# temp = []
# for c in range(1, 8):
# temp.append(int(input(f'Digite o {c}o valor: ')))
# if temp[c] % 2 == 0:
# lista[0].append(temp[c])
# else:
# lista[1].append(temp[c])
# print('-=' * 30)
# sorted(lista)
# print(f'Os valores pares digitados foram: {sorted(lista[0])}')
# print(f'Os valores ímpres digitados foram: {sorted(lista[1])}')
| 29.153846
| 65
| 0.575198
| 115
| 758
| 3.791304
| 0.278261
| 0.126147
| 0.073395
| 0.137615
| 0.665138
| 0.600917
| 0.541284
| 0.541284
| 0.541284
| 0.426606
| 0
| 0.035058
| 0.209763
| 758
| 25
| 66
| 30.32
| 0.692821
| 0.51715
| 0
| 0
| 0
| 0
| 0.366477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|