hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4763416997df781bd573e3c234672ddfd3a200da
| 394
|
py
|
Python
|
examples/spot/sub_account/sub_account_futures_asset_transfer_history.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 512
|
2021-06-15T08:52:44.000Z
|
2022-03-31T09:49:53.000Z
|
examples/spot/sub_account/sub_account_futures_asset_transfer_history.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 75
|
2021-06-20T13:49:50.000Z
|
2022-03-30T02:45:31.000Z
|
examples/spot/sub_account/sub_account_futures_asset_transfer_history.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 156
|
2021-06-18T11:56:36.000Z
|
2022-03-29T16:34:22.000Z
|
#!/usr/bin/env python
import logging
from binance.spot import Spot as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
key = ""
secret = ""
spot_client = Client(key, secret)
logging.info(
spot_client.sub_account_futures_asset_transfer_history(
email="",
futuresType=1, # 1:USDT-maringed Futues,2: Coin-margined Futures
)
)
| 20.736842
| 73
| 0.730964
| 53
| 394
| 5.264151
| 0.622642
| 0.078853
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009146
| 0.167513
| 394
| 18
| 74
| 21.888889
| 0.841463
| 0.172589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4765153b7928cbef30b909b4bfadf17b59957999
| 864
|
py
|
Python
|
jubox/test/notebook/test_get.py
|
Miksus/jubox
|
daaf1e223e0a7c0a3bf9ae03b88d629c0f99d4d5
|
[
"MIT"
] | 1
|
2020-04-26T05:18:45.000Z
|
2020-04-26T05:18:45.000Z
|
jubox/test/notebook/test_get.py
|
Miksus/jubox
|
daaf1e223e0a7c0a3bf9ae03b88d629c0f99d4d5
|
[
"MIT"
] | null | null | null |
jubox/test/notebook/test_get.py
|
Miksus/jubox
|
daaf1e223e0a7c0a3bf9ae03b88d629c0f99d4d5
|
[
"MIT"
] | null | null | null |
import pytest
from jubox import JupyterNotebook, RawCell, CodeCell, MarkdownCell
def test_get_tags():
nb = JupyterNotebook([
RawCell("first cell"),
RawCell("second cell", tags=["tagged"]),
RawCell("third cell", taggs=["Not this", "tagged"]),
])
nb_of_tags = nb.get(tags=["tagged"], not_tags=["Not this"])
assert isinstance(nb_of_tags, JupyterNotebook)
assert 1 == len(nb_of_tags.node.cells)
assert "second cell" == nb_of_tags.node.cells[0]["source"]
def test_get_cell_types():
nb = JupyterNotebook([
RawCell("first cell"),
CodeCell("second cell"),
MarkdownCell("third cell"),
])
nb_of_tags = nb.get(cell_type=["code"])
assert isinstance(nb_of_tags, JupyterNotebook)
assert 1 == len(nb_of_tags.node.cells)
assert "second cell" == nb_of_tags.node.cells[0]["source"]
| 32
| 66
| 0.653935
| 113
| 864
| 4.79646
| 0.292035
| 0.059041
| 0.118081
| 0.088561
| 0.560886
| 0.391144
| 0.391144
| 0.391144
| 0.391144
| 0.391144
| 0
| 0.005755
| 0.195602
| 864
| 27
| 67
| 32
| 0.774101
| 0
| 0
| 0.545455
| 0
| 0
| 0.155093
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4768a467a8a3d8637a1d9c22d0a7cdad0dc93e1c
| 4,286
|
py
|
Python
|
src/main/python/TriggerTextExtractFromS3Image/trigger_text_extract_from_s3_image.py
|
aws-samples/social-graph-based-people-recommender-using-amazon-neptune-and-textract
|
50e54945032d0eb4b47d9072d4c1d66cd169070a
|
[
"MIT-0"
] | null | null | null |
src/main/python/TriggerTextExtractFromS3Image/trigger_text_extract_from_s3_image.py
|
aws-samples/social-graph-based-people-recommender-using-amazon-neptune-and-textract
|
50e54945032d0eb4b47d9072d4c1d66cd169070a
|
[
"MIT-0"
] | null | null | null |
src/main/python/TriggerTextExtractFromS3Image/trigger_text_extract_from_s3_image.py
|
aws-samples/social-graph-based-people-recommender-using-amazon-neptune-and-textract
|
50e54945032d0eb4b47d9072d4c1d66cd169070a
|
[
"MIT-0"
] | null | null | null |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# vim: tabstop=2 shiftwidth=2 softtabstop=2 expandtab
import sys
import json
import os
import urllib.parse
import traceback
import datetime
import boto3
DRY_RUN = (os.getenv('DRY_RUN', 'false') == 'true')
AWS_REGION = os.getenv('REGION_NAME', 'us-east-1')
KINESIS_STREAM_NAME = os.getenv('KINESIS_STREAM_NAME', 'octember-bizcard-img')
DDB_TABLE_NAME = os.getenv('DDB_TABLE_NAME', 'OctemberBizcardImg')
def write_records_to_kinesis(kinesis_client, kinesis_stream_name, records):
import random
random.seed(47)
def gen_records():
record_list = []
for rec in records:
payload = json.dumps(rec, ensure_ascii=False)
partition_key = 'part-{:05}'.format(random.randint(1, 1024))
record_list.append({'Data': payload, 'PartitionKey': partition_key})
return record_list
MAX_RETRY_COUNT = 3
record_list = gen_records()
for _ in range(MAX_RETRY_COUNT):
try:
response = kinesis_client.put_records(Records=record_list, StreamName=kinesis_stream_name)
print("[DEBUG]", response, file=sys.stderr)
break
except Exception as ex:
import time
traceback.print_exc()
time.sleep(2)
else:
raise RuntimeError('[ERROR] Failed to put_records into kinesis stream: {}'.format(kinesis_stream_name))
def update_process_status(ddb_client, table_name, item):
def ddb_update_item():
s3_bucket = item['s3_bucket']
s3_key = item['s3_key']
image_id = os.path.basename(s3_key)
status = item['status']
modified_time = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
response = ddb_client.update_item(
TableName=table_name,
Key={
"image_id": {
"S": image_id
}
},
UpdateExpression="SET s3_bucket = :s3_bucket, s3_key = :s3_key, mts = :mts, #status = :status",
ExpressionAttributeNames={
'#status': 'status'
},
ExpressionAttributeValues={
":s3_bucket": {
"S": s3_bucket
},
":s3_key": {
"S": s3_key
},
":mts": {
"N": "{}".format(modified_time)
},
":status": {
"S": status
}
}
)
return response
try:
print("[DEBUG] try to update_process_status", file=sys.stderr)
res = ddb_update_item()
print('[DEBUG]', res, file=sys.stderr)
except Exception as ex:
traceback.print_exc()
raise ex
def lambda_handler(event, context):
kinesis_client = boto3.client('kinesis', region_name=AWS_REGION)
ddb_client = boto3.client('dynamodb', region_name=AWS_REGION)
for record in event['Records']:
try:
bucket = record['s3']['bucket']['name']
key = urllib.parse.unquote_plus(record['s3']['object']['key'], encoding='utf-8')
record = {'s3_bucket': bucket, 's3_key': key}
print("[INFO] object created: ", record, file=sys.stderr)
write_records_to_kinesis(kinesis_client, KINESIS_STREAM_NAME, [record])
update_process_status(ddb_client, DDB_TABLE_NAME, {'s3_bucket': bucket, 's3_key': key, 'status': 'START'})
except Exception as ex:
traceback.print_exc()
if __name__ == '__main__':
s3_event = '''{
"Records": [
{
"eventVersion": "2.0",
"eventSource": "aws:s3",
"awsRegion": "us-east-1",
"eventTime": "1970-01-01T00:00:00.000Z",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "EXAMPLE"
},
"requestParameters": {
"sourceIPAddress": "127.0.0.1"
},
"responseElements": {
"x-amz-request-id": "EXAMPLE123456789",
"x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH"
},
"s3": {
"s3SchemaVersion": "1.0",
"configurationId": "testConfigRule",
"bucket": {
"name": "octember-use1",
"ownerIdentity": {
"principalId": "EXAMPLE"
},
"arn": "arn:aws:s3:::octember-use1"
},
"object": {
"key": "bizcard-raw-img/edy_bizcard.jpg",
"size": 638,
"eTag": "0123456789abcdef0123456789abcdef",
"sequencer": "0A1B2C3D4E5F678901"
}
}
}
]
}'''
event = json.loads(s3_event)
lambda_handler(event, {})
| 27.651613
| 112
| 0.614792
| 490
| 4,286
| 5.155102
| 0.363265
| 0.028504
| 0.04038
| 0.022565
| 0.108472
| 0.086302
| 0.068884
| 0.04038
| 0.04038
| 0
| 0
| 0.039132
| 0.236818
| 4,286
| 154
| 113
| 27.831169
| 0.733109
| 0.022632
| 0
| 0.085271
| 0
| 0.007752
| 0.369654
| 0.050418
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03876
| false
| 0
| 0.069767
| 0
| 0.124031
| 0.054264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
476b0de22ff656e62822acafc870dbc852e2d7da
| 592
|
py
|
Python
|
dynamic_programming_1/solution/1010.py
|
gpgun0/baekjoon_
|
0a3e87b9eafe1a6af4234ebbd2eebb7f67156414
|
[
"MIT"
] | null | null | null |
dynamic_programming_1/solution/1010.py
|
gpgun0/baekjoon_
|
0a3e87b9eafe1a6af4234ebbd2eebb7f67156414
|
[
"MIT"
] | null | null | null |
dynamic_programming_1/solution/1010.py
|
gpgun0/baekjoon_
|
0a3e87b9eafe1a6af4234ebbd2eebb7f67156414
|
[
"MIT"
] | null | null | null |
class Solution:
def combination(self, m, n):
if dp[m][n]:
return dp[m][n]
if m <= n:
dp[m][n] = 1
return dp[m][n]
if n == 1:
dp[m][n] = m
return dp[m][n]
dp[m][n] = self.combination(m-1, n-1) + self.combination(m-1, n)
return dp[m][n]
def main(self):
n, m = map(int, input().split())
return self.combination(m, n)
sol = Solution()
t = int(input())
dp = [[0]*201 for _ in range(201)]
for _ in range(t): print(sol.main())
| 22.769231
| 73
| 0.429054
| 88
| 592
| 2.863636
| 0.272727
| 0.087302
| 0.126984
| 0.15873
| 0.329365
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033803
| 0.400338
| 592
| 26
| 74
| 22.769231
| 0.676056
| 0
| 0
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0
| 0
| 0.421053
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
476d9258256823afd9857d168e1439ac9c883a29
| 1,738
|
py
|
Python
|
fireroadApi/fireroad_utils.py
|
zhang-lucy/coursehose
|
21cbda0e7cc12a9d201585dbdd53d2eeacfade96
|
[
"MIT"
] | 1
|
2020-09-20T17:29:24.000Z
|
2020-09-20T17:29:24.000Z
|
fireroadApi/fireroad_utils.py
|
zhang-lucy/coursehose
|
21cbda0e7cc12a9d201585dbdd53d2eeacfade96
|
[
"MIT"
] | null | null | null |
fireroadApi/fireroad_utils.py
|
zhang-lucy/coursehose
|
21cbda0e7cc12a9d201585dbdd53d2eeacfade96
|
[
"MIT"
] | null | null | null |
import requests
import json
import re
def get_course_requirements(course_id):
link = "http://fireroad.mit.edu/requirements/get_json/" + course_id
r = requests.get(link)
j = r.json()["reqs"]
return j
def get_all_course_requirements():
major_reqs = {}
major_id_link = "https://fireroad.mit.edu/requirements/list_reqs/"
majors = requests.get(major_id_link).json()
for major_id in majors:
major_reqs[major_id] = get_course_requirements(major_id)
return major_reqs
def get_all_major_titles():
major_titles = []
major_id_link = "https://fireroad.mit.edu/requirements/list_reqs/"
majors = requests.get(major_id_link).json()
for major_id, major_info in majors.items():
if 'major' in major_id:
major_titles.append(major_info['medium-title'].split()[0] + ': ' + major_info['title-no-degree'])
major_titles.sort(key=major_sort_key)
return major_titles
def major_sort_key(title):
if title[0].isnumeric() and not title[1].isnumeric():
return '0' + title.replace(":", " ")
return title.replace(":", " ")
def get_all_courses():
link = "https://fireroad.mit.edu/courses/all?full=true"
courses = requests.get(link).json()
courses_dict = {}
for course_info in courses:
courses_dict[course_info['subject_id']] = course_info
return courses_dict
def save(data, file_name):
with open(file_name, 'w') as f:
json.dump(data, f)
if __name__ == "__main__":
# course_id = input("Enter course id: ")
# print(get_course_requirements(course_id))
# major_reqs = get_all_course_requirements()
# save(major_reqs, "../data/course_requirements.json")
# major_titles = get_all_major_titles()
# save(major_titles, "../data/major_titles.json")
all_courses = get_all_courses()
save(all_courses, "../data/allCourses.json")
| 28.966667
| 100
| 0.733026
| 259
| 1,738
| 4.606178
| 0.243243
| 0.052808
| 0.04694
| 0.065381
| 0.222129
| 0.154233
| 0.154233
| 0.154233
| 0.154233
| 0.154233
| 0
| 0.002609
| 0.117952
| 1,738
| 59
| 101
| 29.457627
| 0.775603
| 0.150748
| 0
| 0.097561
| 0
| 0
| 0.185841
| 0.015657
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.073171
| 0
| 0.365854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
476df3fbd08fe0e200196618406d29ff175f3d41
| 4,423
|
py
|
Python
|
examples/compare_transactions_speed_with_hive.py
|
TheCrazyGM/bhive
|
1494e90a99123ecfc5efbd927258f9ba59443e2e
|
[
"MIT"
] | 2
|
2020-03-21T23:50:22.000Z
|
2020-03-25T19:10:48.000Z
|
examples/compare_transactions_speed_with_hive.py
|
TheCrazyGM/bhive
|
1494e90a99123ecfc5efbd927258f9ba59443e2e
|
[
"MIT"
] | null | null | null |
examples/compare_transactions_speed_with_hive.py
|
TheCrazyGM/bhive
|
1494e90a99123ecfc5efbd927258f9ba59443e2e
|
[
"MIT"
] | 1
|
2020-03-21T23:50:25.000Z
|
2020-03-21T23:50:25.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
from builtins import chr
from builtins import range
from builtins import super
import random
from pprint import pprint
from binascii import hexlify
from collections import OrderedDict
from bhivebase import (
transactions,
memo,
operations,
objects
)
from bhivebase.objects import Operation
from bhivebase.signedtransactions import Signed_Transaction
from bhivegraphenebase.account import PrivateKey
from bhivegraphenebase import account
from bhivebase.operationids import getOperationNameForId
from bhivegraphenebase.py23 import py23_bytes, bytes_types
from bhive.amount import Amount
from bhive.asset import Asset
from bhive.hive import Hive
import time
from hive import Hive as hiveHive
from hivebase.account import PrivateKey as hivePrivateKey
from hivebase.transactions import SignedTransaction as hiveSignedTransaction
from hivebase import operations as hiveOperations
from timeit import default_timer as timer
class BhiveTest(object):
def setup(self):
self.prefix = u"HIVE"
self.default_prefix = u"STM"
self.wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
self.ref_block_num = 34294
self.ref_block_prefix = 3707022213
self.expiration = "2016-04-06T08:29:27"
self.hv = Hive(offline=True)
def doit(self, printWire=False, ops=None):
ops = [Operation(ops)]
tx = Signed_Transaction(ref_block_num=self.ref_block_num,
ref_block_prefix=self.ref_block_prefix,
expiration=self.expiration,
operations=ops)
start = timer()
tx = tx.sign([self.wif], chain=self.prefix)
end1 = timer()
tx.verify([PrivateKey(self.wif, prefix=u"STM").pubkey], self.prefix)
end2 = timer()
return end2 - end1, end1 - start
class HiveTest(object):
def setup(self):
self.prefix = u"HIVE"
self.default_prefix = u"STM"
self.wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
self.ref_block_num = 34294
self.ref_block_prefix = 3707022213
self.expiration = "2016-04-06T08:29:27"
def doit(self, printWire=False, ops=None):
ops = [hiveOperations.Operation(ops)]
tx = hiveSignedTransaction(ref_block_num=self.ref_block_num,
ref_block_prefix=self.ref_block_prefix,
expiration=self.expiration,
operations=ops)
start = timer()
tx = tx.sign([self.wif], chain=self.prefix)
end1 = timer()
tx.verify([hivePrivateKey(self.wif, prefix=u"STM").pubkey], self.prefix)
end2 = timer()
return end2 - end1, end1 - start
if __name__ == "__main__":
steem_test = HiveTest()
bsteem_test = BhiveTest()
steem_test.setup()
bsteem_test.setup()
steem_times = []
bsteem_times = []
loops = 50
for i in range(0, loops):
print(i)
opHive = hiveOperations.Transfer(**{
"from": "foo",
"to": "baar",
"amount": "111.110 HIVE",
"memo": "Fooo"
})
opBhive = operations.Transfer(**{
"from": "foo",
"to": "baar",
"amount": Amount("111.110 HIVE", hive_instance=Hive(offline=True)),
"memo": "Fooo"
})
t_s, t_v = steem_test.doit(ops=opHive)
steem_times.append([t_s, t_v])
t_s, t_v = bsteem_test.doit(ops=opBhive)
bsteem_times.append([t_s, t_v])
steem_dt = [0, 0]
bsteem_dt = [0, 0]
for i in range(0, loops):
steem_dt[0] += steem_times[i][0]
steem_dt[1] += steem_times[i][1]
bsteem_dt[0] += bsteem_times[i][0]
bsteem_dt[1] += bsteem_times[i][1]
print("hive vs bhive:\n")
print("hive: sign: %.2f s, verification %.2f s" % (steem_dt[0] / loops, steem_dt[1] / loops))
print("bhive: sign: %.2f s, verification %.2f s" % (bsteem_dt[0] / loops, bsteem_dt[1] / loops))
print("------------------------------------")
print("bhive is %.2f %% (sign) and %.2f %% (verify) faster than hive" %
(steem_dt[0] / bsteem_dt[0] * 100, steem_dt[1] / bsteem_dt[1] * 100))
| 34.286822
| 101
| 0.628759
| 536
| 4,423
| 5.011194
| 0.240672
| 0.035741
| 0.035741
| 0.022338
| 0.393894
| 0.388682
| 0.328369
| 0.328369
| 0.302308
| 0.302308
| 0
| 0.042125
| 0.259326
| 4,423
| 128
| 102
| 34.554688
| 0.777778
| 0
| 0
| 0.371681
| 0
| 0
| 0.099254
| 0.031201
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035398
| false
| 0
| 0.247788
| 0
| 0.318584
| 0.088496
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
476f15ab6f480fa347487aaf78b33358b554c7b0
| 2,509
|
py
|
Python
|
src/main/python/widgets/dialogs/rename_user_dialog.py
|
ivov/admin-stock
|
e2e1d53436878b6db68dcb85d0cca31223066ffb
|
[
"MIT"
] | 8
|
2019-11-02T22:32:30.000Z
|
2021-08-16T08:29:39.000Z
|
src/main/python/widgets/dialogs/rename_user_dialog.py
|
ivov/admin-stock
|
e2e1d53436878b6db68dcb85d0cca31223066ffb
|
[
"MIT"
] | null | null | null |
src/main/python/widgets/dialogs/rename_user_dialog.py
|
ivov/admin-stock
|
e2e1d53436878b6db68dcb85d0cca31223066ffb
|
[
"MIT"
] | 3
|
2019-12-10T16:23:49.000Z
|
2021-11-01T20:22:16.000Z
|
from PyQt5 import QtWidgets, QtCore
from utils.styling import rename_user_dialog_title_style
class RenameUserDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(RenameUserDialog, self).__init__(parent)
self.setWindowFlags(
QtCore.Qt.Dialog
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowCloseButtonHint
)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setFixedHeight(100)
self.settings = QtCore.QSettings("solutronic", "admin_stock")
self.username = self.settings.value("username")
title = QtWidgets.QLabel("Editar usuario")
title.setAlignment(QtCore.Qt.AlignCenter)
title.setStyleSheet(rename_user_dialog_title_style)
self.name_label = QtWidgets.QLabel("Nombre:")
self.name_label.setAlignment(QtCore.Qt.AlignCenter)
self.name_label.setFixedWidth(45)
self.name_field = QtWidgets.QLineEdit()
self.name_field.setPlaceholderText("Nombre...")
self.name_field.setFixedWidth(115)
horizontal_section = QtWidgets.QHBoxLayout()
horizontal_section.addWidget(self.name_label)
horizontal_section.addWidget(self.name_field)
back_button = QtWidgets.QPushButton("× Cerrar")
back_button.setShortcut("Alt+c")
self.save_button = QtWidgets.QPushButton("≡ Guardar")
self.save_button.setShortcut("Alt+g")
self.save_button.setEnabled(False)
self.save_button.setDefault(True)
bottom_section = QtWidgets.QHBoxLayout()
bottom_section.addWidget(back_button)
bottom_section.addWidget(self.save_button)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(title)
layout.addLayout(horizontal_section)
layout.addLayout(bottom_section)
self.setLayout(layout)
self.name_field.textChanged.connect(self.on_name_field_change)
back_button.clicked.connect(self.close)
self.save_button.clicked.connect(self.save_name_and_update_statusbar)
def on_name_field_change(self):
if self.name_field.text() != "":
self.save_button.setEnabled(True)
elif self.name_field.text() == "":
self.save_button.setEnabled(False)
def save_name_and_update_statusbar(self):
self.settings.setValue("username", self.name_field.text())
main_window = self.parent().parent().parent().parent()
main_window.set_statusbar()
self.close()
| 35.842857
| 77
| 0.687923
| 274
| 2,509
| 6.062044
| 0.339416
| 0.057797
| 0.062613
| 0.043347
| 0.17339
| 0.049368
| 0.049368
| 0.049368
| 0
| 0
| 0
| 0.004536
| 0.209247
| 2,509
| 69
| 78
| 36.362319
| 0.831653
| 0
| 0
| 0.037736
| 0
| 0
| 0.037465
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.037736
| 0
| 0.113208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4771eb8f2f256c1d66ba7f41f070c11396af58fb
| 6,053
|
py
|
Python
|
bluetooth-audio/btspeaker.py
|
b23prodtm/balena-sound
|
1ee886241485a302f88176c7dd880e986cf768c3
|
[
"Apache-2.0"
] | null | null | null |
bluetooth-audio/btspeaker.py
|
b23prodtm/balena-sound
|
1ee886241485a302f88176c7dd880e986cf768c3
|
[
"Apache-2.0"
] | 3
|
2019-11-20T17:49:37.000Z
|
2020-06-01T23:09:10.000Z
|
bluetooth-audio/btspeaker.py
|
b23prodtm/balena-sound
|
1ee886241485a302f88176c7dd880e986cf768c3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import bluetooth, sys, os, re, subprocess, time, getopt
BT_BLE = int(os.getenv('BT_BLE', 0))
BT_SCAN_TIMEOUT = int(os.getenv('BT_SCAN_TIMEOUT', 2))
if BT_BLE:
from gattlib import DiscoveryService
from ble_client import BleClient
def parse_argv (myenv, argv):
usage = 'Command line args: \n'\
' -d,--duration <seconds> Default: {}\n'\
' -s,--uuid <service-name> Default: {}\n'\
' --protocol <proto:port> Default: {}\n'\
' [bt-address] Default: {}\n'\
' -h,--help Show help.\n'.format(myenv["BT_SCAN_TIMEOUT"],myenv["service"],myenv["proto-port"],'myenv["BTSPEAKER_SINK"]')
try:
opts, args = getopt.getopt(argv[1:], "u:d:s:h",["help", "duration=", "uuid=", "protocol="])
except getopt.GetoptError:
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print(usage)
sys.exit()
if opt in ("-d", "--duration"):
myenv['BT_SCAN_TIMEOUT'] = arg
elif opt in ("-s", "--uuid"):
myenv['service'] = arg
elif opt in ("--protocol"):
myenv['proto-port'] = arg
elif re.compile("([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}").match(arg):
myenv["BTSPEAKER_SINK"] = arg
else:
print("Wrong argument %s !" % argv[i])
print(usage)
def bt_service(addr, proto_port="", serv=""):
for services in bluetooth.find_service(address=addr):
if len(serv) > 0 and (services["name"] is serv or services["service-id"] is serv):
return bt_connect(services["protocol"], addr, services["port"])
else:
print(" UUID: %s (%s)" % (services["name"], services["service-id"]))
print(" Protocol: %s, %s, %s" % (services["protocol"], addr, services["port"]))
if proto_port != "" and re.compile("[^:]+:[0-9]+").match(proto_port):
s = proto_port.find(":")
proto = proto_port[0:s]
port = proto_port[s+1:]
return bt_connect(proto, addr, port)
def bt_connect(proto, addr, port):
timeout = 0
while timeout < 5:
try:
print(" Attempting %s connection to %s (%s)" % (proto, addr, port))
s = bluetooth.BluetoothSocket(int(proto))
s.connect((addr,int(port)))
print("Success")
return s
except bluetooth.btcommon.BluetoothError as err:
print("%s\n" % (err))
print(" Fail, probably timeout. Attempting reconnection... (%s)" % (timeout))
timeout += 1
time.sleep(1)
print(" Service or Device not found")
return None
#------------------------------------------------------------------------------
# Connects to Audio Service (Audio Sink, Audio Source, more in bluetoothctl <<EOF
# info <address>
# EOF
# raise bluetooth.btcommon.BluetoothError
def bt_connect_service(nearby_devices, bt_addr="00:00:00:00:00:00", proto_port="", serv=""):
sock = None
for addr, name in nearby_devices:
if bt_addr == "00:00:00:00:00:00":
print(" - %s , %s:" % (addr, name))
sock = bt_service(addr, proto_port, serv)
if sock:
sock.close()
elif bt_addr == addr:
print(" - found device %s , %s:" % (addr, name))
sock = bt_service(addr, proto_port, serv)
break
else:
continue
if sock:
print(" - service %s available" % (serv))
else:
print(" - service %s unavailable at %s" % (serv, bt_addr))
return sock
#------------------------------------------------------------------------------
# Devices discovery with bluetooth low energy (BT_BLE) support
# return devices list in argument (list append)
def discover_devices(nearby_devices = []):
timeout = BT_SCAN_TIMEOUT
print("looking for nearby devices...")
try:
nearby_devices += bluetooth.discover_devices(lookup_names = True, flush_cache = True, duration = timeout)
print("found %d devices" % len(nearby_devices))
if BT_BLE:
service = DiscoveryService()
try:
devices = service.discover(timeout)
for addr, name in devices.items():
if not name or name is "":
b = BleClient(addr)
name = b.request_data().decode('utf-8')
b.disconnect()
nearby_devices += ((addr, name))
except RuntimeError as err:
print("~ BLE ~ Error ", err)
else:
print("found %d devices (ble)" % len(devices.items()))
return nearby_devices
except bluetooth.btcommon.BluetoothError as err:
print(" Main thread error : %s" % (err))
exit(1)
def main(argv):
myenv = dict()
main.defaults = dict()
main.defaults = {
"file":argv[0],
"BT_SCAN_TIMEOUT":"5",
"service":"Audio Sink",
"BTSPEAKER_SINK":"00:00:00:00:00:00",
"proto-port": str(bluetooth.L2CAP) + ":25"
}
myenv.update(main.defaults)
myenv.update(os.environ)
parse_argv(myenv, argv)
print("looking for nearby devices...")
try:
nearby_devices = discover_devices()
print("found %d devices" % len(nearby_devices))
print("discovering %s services... %s" % (myenv["BTSPEAKER_SINK"], myenv["service"]))
sock = bt_connect_service(nearby_devices, myenv["BTSPEAKER_SINK"], myenv["proto-port"], myenv["service"])
if sock:
# pair the new device as known device
print("bluetooth pairing...")
ps = subprocess.Popen("printf \"pair %s\\nexit\\n\" \"$1\" | bluetoothctl", shell=True, stdout=subprocess.PIPE)
print(ps.stdout.read())
ps.stdout.close()
ps.wait()
sock.close()
except bluetooth.btcommon.BluetoothError as err:
print(" Main thread error : %s" % (err))
exit(1)
if __name__ == '__main__':
main(sys.argv)
| 38.801282
| 123
| 0.544358
| 714
| 6,053
| 4.512605
| 0.240896
| 0.018622
| 0.022346
| 0.022346
| 0.216325
| 0.160459
| 0.15239
| 0.116698
| 0.068901
| 0.068901
| 0
| 0.014925
| 0.280522
| 6,053
| 155
| 124
| 39.051613
| 0.724914
| 0.075004
| 0
| 0.244444
| 0
| 0.007407
| 0.219002
| 0.010199
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.022222
| 0
| 0.111111
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4774eea99e7f6cd1515737dfa6e6653b0d95171b
| 4,369
|
py
|
Python
|
bedpe_longrange/bedpe2longrange.py
|
ChenfuShi/tools_for_HiChIP
|
0faa8b26a7c53922dd2de977d7df442dd2caeed7
|
[
"BSD-3-Clause"
] | 5
|
2019-05-09T19:31:26.000Z
|
2021-12-06T02:57:48.000Z
|
bedpe_longrange/bedpe2longrange.py
|
ChenfuShi/tools_for_HiChIP
|
0faa8b26a7c53922dd2de977d7df442dd2caeed7
|
[
"BSD-3-Clause"
] | null | null | null |
bedpe_longrange/bedpe2longrange.py
|
ChenfuShi/tools_for_HiChIP
|
0faa8b26a7c53922dd2de977d7df442dd2caeed7
|
[
"BSD-3-Clause"
] | 4
|
2019-06-01T11:30:41.000Z
|
2022-03-11T02:01:52.000Z
|
#########################################
# Author: Chenfu Shi
# Email: chenfu.shi@postgrad.manchester.ac.uk
# BSD-3-Clause License
# Copyright 2019 Chenfu Shi
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#########################################
# converts bedpe to long range, making sure to print twice each line.
# allows the user to choose which field to copy over and if you want to do -log10 for eg. p-values or q-values
import argparse
import subprocess
import math
import os
parser = argparse.ArgumentParser(description='Tool to convert bedpe files to long_range format. Uses bgzip and tabix to compress and index the file')
parser.add_argument("-i",'--input', dest='inputfile', action='store', required=True,
help='input file name')
parser.add_argument("-o",'--output', dest='outputfile', action='store', required=False,
help='ouput file name. Will add .gz automatically')
parser.add_argument("-f",'--field', dest='field', action='store', type=int, default=8, required=False,
help='field to store as score. Default 8th field. For MAPS use 9 for FDR')
parser.add_argument('-l', '--log' ,action='store_true', dest='log', help='do -log10 of score')
args = parser.parse_args()
args = parser.parse_args()
if args.outputfile:
outputname=args.outputfile
else:
outputname=args.inputfile + ".washu.bed"
inputname=args.inputfile
if not os.path.isfile(inputname):
raise Exception("input file couldn't be opened")
ID_counter = 1
with open(outputname, "w") as outputfile, open(args.inputfile , "r") as inputfile:
for line in inputfile:
data = line.split("\t")
chr1 = data[0].strip()
if not data[1].strip().isdigit():
# check that the line contains data instead of header
continue
start1 = data[1].strip()
end1 = data[2].strip()
chr2 = data[3].strip()
start2 = data[4].strip()
end2 = data[5].strip()
score = data[args.field-1].strip()
# if chr is a number with no chr add chr, compatibility with washu
if chr1[0:3] != "chr":
chr1 = "chr" + chr1
chr2 = "chr" + chr2
if args.log == True:
try:
score = str(-math.log10(float(score)))
except ValueError:
# in case the score is zero
score = 384
outputfile.write("{}\t{}\t{}\t{}:{}-{},{}\t{}\t{}\n".format(chr1,start1,end1,chr2,start2,end2,score,str(ID_counter),"."))
ID_counter = ID_counter + 1
outputfile.write("{}\t{}\t{}\t{}:{}-{},{}\t{}\t{}\n".format(chr2,start2,end2,chr1,start1,end1,score,str(ID_counter),"."))
ID_counter = ID_counter + 1
# automatically sort, compress and index the output file
subprocess.run(["sort","-o",outputname,"-k1,1","-k2,2n",outputname])
subprocess.run(["bgzip",outputname])
subprocess.run(["tabix","-p","bed",outputname+".gz"])
| 48.010989
| 757
| 0.676127
| 612
| 4,369
| 4.802288
| 0.428105
| 0.005444
| 0.006125
| 0.005444
| 0.105478
| 0.089146
| 0.089146
| 0.089146
| 0.089146
| 0.046274
| 0
| 0.017084
| 0.196155
| 4,369
| 91
| 758
| 48.010989
| 0.819761
| 0.44152
| 0
| 0.081633
| 0
| 0
| 0.207088
| 0.028181
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.081633
| 0
| 0.081633
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4775e3e337b36b06c6774e19748eb643d6196b5f
| 11,501
|
py
|
Python
|
0_get_Fink_features_xmatch.py
|
anaismoller/KNTrap
|
9e1bc85576ab16c4cb6d4d4da74482061029d207
|
[
"Apache-2.0"
] | null | null | null |
0_get_Fink_features_xmatch.py
|
anaismoller/KNTrap
|
9e1bc85576ab16c4cb6d4d4da74482061029d207
|
[
"Apache-2.0"
] | null | null | null |
0_get_Fink_features_xmatch.py
|
anaismoller/KNTrap
|
9e1bc85576ab16c4cb6d4d4da74482061029d207
|
[
"Apache-2.0"
] | null | null | null |
# Year 2022
# Authors: Anais Möller based on fink-broker.org code
import os
import sys
import glob
import logging
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from pathlib import Path
from functools import partial
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
# my utils
from utils import xmatch
from utils import mag_color
from utils import query_photoz_datalab as photoz
def setup_logging(logpathname):
logger = None
# Create logger using python logging module
logging_handler_out = logging.StreamHandler(sys.stdout)
logging_handler_out.setLevel(logging.DEBUG)
logging_handler_err = logging.StreamHandler(sys.stderr)
logging_handler_err.setLevel(logging.WARNING)
logger = logging.getLogger("localLogger")
logger.setLevel(logging.INFO)
logger.addHandler(logging_handler_out)
logger.addHandler(logging_handler_err)
# create file handler which logs even debug messages
fh = logging.FileHandler(f"{logpathname}", mode="w")
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
return logger
def process_fn(inputs):
fn, fil = inputs
return fn(fil)
def read_file(fname, suffix=None):
try:
df_tmp = Table.read(fname, format="ascii").to_pandas()
if "unforced" in suffix:
df = pd.read_table(fname, header=None, skiprows=1, delim_whitespace=True)
if len(df.columns) == 16:
df.columns = [
"MJD",
"dateobs",
"photcode",
"filt",
"flux_c",
"dflux_c",
"type",
"chisqr",
"ZPTMAG_c",
"m",
"dm",
"ra",
"dec",
"cmpfile",
"tmpl",
"ROBOT_score",
]
else:
df.columns = [
"MJD",
"dateobs",
"photcode",
"filt",
"flux_c",
"dflux_c",
"type",
"chisqr",
"ZPTMAG_c",
"m",
"dm",
"ra",
"dec",
"cmpfile",
"tmpl",
]
df["ROBOT_score"] = np.nan
df_tmp = df.copy()
return df_tmp
except Exception:
print("File corrupted or empty", fname)
df_tmp = pd.DataFrame()
return df_tmp
def process_single_file(fname, suffix=".forced.difflc"):
# read file and convert to pandas
df_tmp = read_file(fname, suffix=suffix)
# process data if available
if len(df_tmp) > 0 and set(["ra", "dec"]).issubset(df_tmp.keys()):
# get id
idx = Path(fname).stem.replace(suffix, "")
# get ra,dec, idx for xmatch
ra_tmp, dec_tmp = df_tmp["ra"][0], df_tmp["dec"][0]
# convert to degrees
coo = SkyCoord(ra_tmp, dec_tmp, unit=(u.hourangle, u.deg))
out_ra = coo.ra.degree
out_dec = coo.dec.degree
# get color, dmag and rate
(
dmag_i,
dmag_g,
dmag_rate_i,
dmag_rate_g,
color,
color_avg,
max_mag_i,
max_mag_g,
min_mag_i,
min_mag_g,
mean_mag_i,
mean_mag_g,
std_mag_i,
std_mag_g,
df_tmp,
) = mag_color.last_color_rate(df_tmp)
# other features
ndet = len(df_tmp)
tmp_mag = df_tmp["magnitude"].values
# clean
del df_tmp
df_out = pd.DataFrame()
df_out["id"] = [idx]
df_out["ra"] = [out_ra]
df_out["dec"] = [out_dec]
df_out["max_mag_i"] = [max_mag_i]
df_out["max_mag_g"] = [max_mag_g]
df_out["min_mag_i"] = [min_mag_i]
df_out["min_mag_g"] = [min_mag_g]
df_out["mean_mag_i"] = [mean_mag_i]
df_out["mean_mag_g"] = [mean_mag_g]
df_out["std_mag_i"] = [std_mag_i]
df_out["std_mag_g"] = [std_mag_g]
df_out["dmag_i"] = [dmag_i]
df_out["dmag_g"] = [dmag_g]
df_out["dmag_rate_i"] = [dmag_rate_i]
df_out["dmag_rate_g"] = [dmag_rate_g]
df_out["color"] = [color]
df_out["color_avg"] = [color_avg]
df_out["ndet"] = [ndet]
df_out["two_mags_gt_225"] = [len(np.where(tmp_mag < 22.5)[0]) >= 2]
df_out["two_mags_gt_235"] = [len(np.where(tmp_mag < 23.5)[0]) >= 2]
if "unforced" in suffix:
df_out = df_out.add_suffix("_unforced")
df_out = df_out.rename(columns={"id_unforced": "id"})
else:
df_out = pd.DataFrame()
return df_out
if __name__ == "__main__":
"""Process light-curves with Fink inspired features & xmatches
https://github.com/astrolabsoftware/fink-filters
"""
parser = argparse.ArgumentParser(description="Compute candidate features + xmatch")
parser.add_argument(
"--path_field",
type=str,
default="data/S82sub8_tmpl",
help="Path to field",
)
parser.add_argument(
"--path_out",
type=str,
default="./Fink_outputs",
help="Path to outputs",
)
parser.add_argument(
"--path_robot",
type=str,
default="../ROBOT_masterlists",
help="Path to ROBOT outputs",
)
parser.add_argument(
"--debug",
action="store_true",
help="Debug: loop processing (slow)",
)
parser.add_argument(
"--test",
action="store_true",
help="one file processed only",
)
args = parser.parse_args()
os.makedirs(args.path_out, exist_ok=True)
os.makedirs("logs/", exist_ok=True)
cwd = os.getcwd()
logpathname = f"{cwd}/logs/{Path(args.path_field).stem}_preprocess"
logger = setup_logging(logpathname)
# read files
list_files = glob.glob(f"{args.path_field}/*/*/*.forced.difflc.txt")
print(f"{len(list_files)} files found in {args.path_field}")
if args.test:
print(list_files)
print("Processing only one file", list_files[0])
df = process_single_file(list_files[0])
elif args.debug:
print(list_files)
# no parallel
list_proc = []
for fil in list_files:
logger.info(fil)
list_proc.append(process_single_file(fil))
df = pd.concat(list_proc)
else:
# Read and process files faster with ProcessPoolExecutor
max_workers = multiprocessing.cpu_count()
# use parallelization to speed up processing
# Split list files in chunks of size 10 or less
# to get a progress bar and alleviate memory constraints
num_elem = len(list_files)
num_chunks = num_elem // 10 + 1
list_chunks = np.array_split(np.arange(num_elem), num_chunks)
logger.info(f"Dividing processing in {num_chunks} chunks")
process_fn_file = partial(process_single_file)
list_fn = []
for fmt in list_files:
list_fn.append(process_fn_file)
list_processed = []
for chunk_idx in tqdm(list_chunks, desc="Process", ncols=100):
# Process each file in the chunk in parallel
with ProcessPoolExecutor(max_workers=max_workers) as executor:
start, end = chunk_idx[0], chunk_idx[-1] + 1
# Need to cast to list because executor returns an iterator
list_pairs = list(zip(list_fn[start:end], list_files[start:end]))
list_processed += list(executor.map(process_fn, list_pairs))
df = pd.concat(list_processed)
print("NOT PARALLEL= UNFORCED PHOTOMETRY")
list_files_un = glob.glob(f"{args.path_field}/*/*/*.unforced.difflc.txt")
list_unforced = []
list_idx = []
if args.test:
list_files_un = [list_files_un[0]]
for fil in list_files_un:
list_unforced.append(process_single_file(fil, suffix=".unforced.difflc"))
df_unforced = pd.concat(list_unforced)
if len(df_unforced) > 0:
df = pd.merge(df, df_unforced, on="id", how="left")
logger.info("SIMBAD xmatch")
z, sptype, typ, ctlg = xmatch.cross_match_simbad(
df["id"].to_list(), df["ra"].to_list(), df["dec"].to_list()
)
logger.info("Finished SIMBAD xmatch")
# save in df
df["simbad_type"] = typ
df["simbad_ctlg"] = ctlg
df["simbad_sptype"] = sptype
df["simbad_redshift"] = z
logger.info("GAIA xmatch")
source, ragaia, decgaia, plx, plxerr, gmag, angdist = xmatch.cross_match_gaia(
df["id"].to_list(),
df["ra"].to_list(),
df["dec"].to_list(),
ctlg="vizier:I/345/gaia2",
)
(
source_edr3,
ragaia_edr3,
decgaia_edr3,
plx_edr3,
plxerr_edr3,
gmag_edr3,
angdist_edr3,
) = xmatch.cross_match_gaia(
df["id"].to_list(),
df["ra"].to_list(),
df["dec"].to_list(),
ctlg="vizier:I/350/gaiaedr3",
)
logger.info("Finished GAIA xmatch")
# save in df
df["gaia_DR2_source"] = source
df["gaia_DR2_ra"] = ragaia
df["gaia_DR2_dec"] = decgaia
df["gaia_DR2_parallax"] = plx
df["gaia_DR2_parallaxerr"] = plxerr
df["gaia_DR2_gmag"] = gmag
df["gaia_DR2_angdist"] = angdist
df["gaia_eDR3_source"] = source_edr3
df["gaia_eDR3_ra"] = ragaia_edr3
df["gaia_eDR3_dec"] = decgaia_edr3
df["gaia_eDR3_parallax"] = plx_edr3
df["gaia_eDR3_parallaxerr"] = plxerr_edr3
df["gaia_eDR3_gmag"] = gmag_edr3
df["gaia_eDR3_angdist"] = angdist_edr3
logger.info("USNO-A.20 xmatch")
(source_usno, angdist_usno,) = xmatch.cross_match_usno(
df["id"].to_list(),
df["ra"].to_list(),
df["dec"].to_list(),
ctlg="vizier:I/252/out",
)
df["USNO_source"] = source_usno
df["USNO_angdist"] = angdist_usno
logger.info("Legacy Survey xmatch")
list_ls_df = []
for (idx, ra, dec) in df[["id", "ra", "dec"]].values:
list_ls_df.append(photoz.query_coords_ls(idx, ra, dec, radius_arcsec=10))
df_ls = pd.concat(list_ls_df)
logger.info("Finished Legacy Survey xmatch")
df = pd.merge(df, df_ls, on="id")
# add ROBOT scores
# You may need to add the field caldate format as Simon's output
# TO DO these next lines should give you that
field = Path(args.path_field).stem.replace("_tmpl", "")
caldate = Path(args.path_field).parent.parent.stem
# TO DO just change the name here
robot_path = f"{args.path_robot}/caldat{caldate}/{field}_{caldate}_masterlist.csv"
if Path(robot_path).exists():
df_robot = pd.read_csv(
robot_path,
delimiter=";",
)
df_robot = df_robot.rename(columns={"Cand_ID": "id"})
df = pd.merge(df, df_robot, on="id", how="left")
else:
print(f"NO ROBOT MASTERLIST FOUND {robot_path}")
outprefix = str(Path(args.path_field).stem)
# outname = f"{args.path_out}/{outprefix}.csv"
# df.to_csv(outname, index=False, sep=";")
outname = f"{args.path_out}/{outprefix}.pickle"
df.to_pickle(outname)
logger.info(f"Saved output {outname}")
| 31.423497
| 87
| 0.578558
| 1,484
| 11,501
| 4.237197
| 0.237871
| 0.021469
| 0.010178
| 0.013359
| 0.145515
| 0.072042
| 0.056139
| 0.056139
| 0.056139
| 0.056139
| 0
| 0.010797
| 0.299365
| 11,501
| 365
| 88
| 31.509589
| 0.769546
| 0.078863
| 0
| 0.221843
| 0
| 0
| 0.165118
| 0.026434
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013652
| false
| 0
| 0.061433
| 0
| 0.09215
| 0.023891
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4777fbaa35309f630f4ae07e90c983a041f15298
| 1,990
|
py
|
Python
|
2017/day11/src/main.py
|
stenbein/AdventOfCode
|
3e8c24f7140dd9cdc687e176272af6a1302a9ca5
|
[
"MIT"
] | 3
|
2018-04-08T10:40:52.000Z
|
2018-12-06T02:37:23.000Z
|
2017/day11/main.py
|
stenbein/AdventOfCode
|
3e8c24f7140dd9cdc687e176272af6a1302a9ca5
|
[
"MIT"
] | 2
|
2018-04-10T11:44:18.000Z
|
2022-02-22T21:25:54.000Z
|
2017/day11/src/main.py
|
stenbein/AdventOfCode
|
3e8c24f7140dd9cdc687e176272af6a1302a9ca5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
'''Day 11 of the 2017 advent of code'''
class HexCounter():
'''A hex maze walker object for
keeping track of our position'''
def __init__(self):
self.x = 0
self.y = 0
self.z = 0
self.furthest = 0
def move(self, direction):
'''map the direction to a state change'''
if direction == "n":
self.y += 1
self.x -= 1
elif direction == "s":
self.y -= 1
self.x += 1
elif direction == "ne":
self.z += 1
self.x -= 1
elif direction == "nw":
self.z -= 1
self.y += 1
elif direction == "se":
self.z += 1
self.y -= 1
elif direction == "sw":
self.z -= 1
self.x += 1
else:
raise ValueError("Undefined direction: ", direction)
temp = self.max()
if temp > self.furthest:
self.furthest = temp
def max(self):
'''accounting for negative distance along the grid'''
total = 0
maxx = abs(self.x)
maxy = abs(self.y)
maxz = abs(self.z)
total = abs(max(maxx, maxy, maxz))
return total
def part_one(data):
"""Return the answer to part one of this day"""
hexer = HexCounter()
for coord in data:
hexer.move(coord)
return hexer.max()
def part_two(data):
"""Return the answer to part two of this day"""
hexer = HexCounter()
for coord in data:
hexer.move(coord)
return hexer.furthest
if __name__ == "__main__":
DATA = ""
with open("input", "r") as f:
for line in f:
DATA += line.rstrip() #hidden newline in file input
COORDS = DATA.split(",")
print("Part 1: {}".format(part_one(COORDS)))
print("Part 2: {}".format(part_two(COORDS)))
| 20.515464
| 65
| 0.477387
| 241
| 1,990
| 3.875519
| 0.369295
| 0.03212
| 0.074946
| 0.029979
| 0.335118
| 0.335118
| 0.24197
| 0.24197
| 0.134904
| 0.134904
| 0
| 0.021812
| 0.401005
| 1,990
| 96
| 66
| 20.729167
| 0.761745
| 0.153769
| 0
| 0.327273
| 0
| 0
| 0.042526
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.163636
| 0.036364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
477d41ca1c89e9e563388b140b23288438ed0562
| 7,613
|
py
|
Python
|
Clients/pyClient/GUI.py
|
JulianWww/AlphaZero
|
8eb754659793305eba7b9e636eeab37d9ccd45f7
|
[
"MIT"
] | 1
|
2021-12-05T13:26:17.000Z
|
2021-12-05T13:26:17.000Z
|
Clients/pyClient/GUI.py
|
JulianWww/AlphaZero
|
8eb754659793305eba7b9e636eeab37d9ccd45f7
|
[
"MIT"
] | null | null | null |
Clients/pyClient/GUI.py
|
JulianWww/AlphaZero
|
8eb754659793305eba7b9e636eeab37d9ccd45f7
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from PIL import Image, ImageTk
from game import Game
from threading import Thread
import time
from gameSaver import sendFull
from Client import DummyAgent
class ConsoleAgent:
"""Agent running in the console for testing only"""
def render(self, state):
"render the state to the console"
state.consoleRender()
def getAction(self, state):
"get the Action the player wants to perform function will be called until valid output is found"
return state.actionModifier(int(input("your Action: ")))
def winScreen(self, state):
"dummy for now"
pass
class GUI(tk.Tk, DummyAgent):
"""
render game to GUI using Tkinter and canvas
"""
colorMap = {
1: "gold",
-1:"red",
0: "white"
}
yPadRel = 0.1
_canvasy = 450
_canvasx = 500
_dotSize = 0.45
_lastState = None
_win = 0
_winLinesRendered = False
winLines_kwargs = {
"fill": "#00FF00",
"width": 10
}
def __init__(self, state, game, replayer):
super(GUI, self).__init__()
self.replayer = replayer
self.title("Connect4 AlphaZero Client")
self.geometry("500x500")
self.bind("<Configure> ", self._resize)
self.yPad = 60
self.action = -1
self.canvas = tk.Canvas(self, height=self._canvasy, width=self._canvasx, bg="#FFFFFF")
self.canvas.bind("<Button-1>", self._writeAction)
self.canvas.place(x=0, y=self.yPad)
self.playerLabel = tk.Label(self, text="testText",font=("Arial", self.yPad//2))
self.playerLabel.place(x=0, y=0)
self._drawBoard()
self._drawStones(state)
self.game = game
def _resize(self, event):
"""callback for resizing of the window"""
if event.widget == self:
self.yPad = int(self.yPadRel * event.width)
self.canvas.place(x=0, y=self.yPad)
self.playerLabel.config(font=("Arial", self.yPad//2))
self._canvasy = event.height - self.yPad
self._canvasx = event.width
self.canvas.config(height=self._canvasy, width=self._canvasx)
self.render(self._lastState)
def _getDxDy(self):
"get the dx and dy neded internaly to compute field and stone sizes"
return self._canvasx / 8, self._canvasy / 7
def render(self, state):
"render the state"
self._drawBoard()
if not state is None:
self._lastState = state
self._drawStones(state.board)
if state.player == 1:
self.playerLabel.config(text = "Yellow's Turn", fg="#808080")
else:
self.playerLabel.config(text = "Red's Turn", fg="#808080")
self.renderWinLines(state)
if not self._lastState is None:
if self._lastState.isDone:
self._renderEndMsg()
def _drawBoard(self):
"render 7x6 board using lines"
self.canvas.delete("all")
dx, dy = self._getDxDy()
ofset = 0.5
for x in range(8):
self.canvas.create_line(dx*(x+ofset), dy*ofset, dx*(x+ofset), self._canvasy - dy*ofset)
for y in range(7):
self.canvas.create_line(dx*ofset, dy*(y+ofset), self._canvasx - dx*ofset, dy*(y+ofset))
def _drawStones(self, state):
"place stones in board"
dx, dy = self._getDxDy()
for x in range(1, 8):
for y in range(1, 7):
if state[Game.encodeAction(x-1, y-1)] != 0:
Xpos = dx * x
Ypos = dy * y
Ysize= self._dotSize * dy
Xsize= self._dotSize * dx
color = self.colorMap[state[Game.encodeAction(x-1, y-1)]]
self.canvas.create_oval(
Xpos - Xsize, Ypos-Ysize,
Xpos+Xsize, Ypos+Ysize,
fill=color, width=0
)
def _renderEndMsg(self):
"render the message at the end of the game"
args = (self._canvasx//2, self._canvasy//2)
fontSize = min(self._canvasx//10, self._canvasy//2)
kwargs = {
"font": f"Times {fontSize} bold",
"anchor": "c",
}
if self.replayer is None:
if self._win == 1:
txt = self.canvas.create_text(*args, **kwargs, fill="green",
text="You Win")
sendFull(self.game.actions, -1)
elif self._win == -1:
txt = self.canvas.create_text(*args, **kwargs, fill="black", text="You Loose")
sendFull(self.game.actions, 1)
elif self._win == 0:
txt = self.canvas.create_text(*args, **kwargs, fill="black", text="Tie")
sendFull(self.game.actions, 0)
def _writeAction(self, event):
"""
calleback from canvas mouse left click.
Converts postion to grid position and than to action witch is saved.
"""
dx, dy = self._getDxDy()
XPos = (event.x - dx * 0.5) // dx
YPos = (event.y - dy * 0.5) // dy
self.action = int(XPos + 7*YPos)
def getAction(self, state):
"""Make playerLable black and wait for an action to be written."""
self.playerLabel.config(fg="#000000")
self.action = -1
while self.action == -1:
time.sleep(0.1)
if self.replayer is None:
return self.action
else:
return self.replayer.getAction(state)
def drawLineOverTime(self, x1, y1, x2, y2, steps, dt, args=(), **kwargs):
"draw a line from (x1, y1) to (x2, y2) over time"
line = self.canvas.create_line(x1, y1, x1, y1, *args, **kwargs)
dx = (x2 - x1) / steps
dy = (y2 - y1) / steps
for idx in range(steps+1):
time.sleep(dt)
self.canvas.delete(line)
line = self.canvas.create_line(x1, y1, x1+dx*idx, y1+dy*idx, *args, **kwargs)
def getPos(self, pos):
"get action to canvas postion"
a, b = Game.decodeAction(pos)
dx, dy = self._getDxDy()
return (a+1)*dx, (b+1)*dy
def winScreen(self, game, _win):
"show win screen"
self._win = 2
self.render(game)
self._winLinesRendered = False
dx, dy = self._getDxDy()
threads = []
if not game is None:
for a, b in game.ends:
x1, y1 = self.getPos(a)
x2, y2 = self.getPos(b)
currentThread = Thread(
target=self.drawLineOverTime,
args=(
x1,y1,
x2,y2,
20,0.01
),
kwargs = self.winLines_kwargs
)
currentThread.start()
threads.append(currentThread)
for thread in threads:
thread.join()
del threads
self._win = _win
if game.tie:
self._win = 0
self._winLinesRendered = True
def renderWinLines(self, game):
if self._winLinesRendered:
if game.isDone:
for a, b in game.ends:
x1, y1 = self.getPos(a)
x2, y2 = self.getPos(b)
self.canvas.create_line(x1,y1,x2,y2, **self.winLines_kwargs)
| 31.853556
| 104
| 0.523053
| 899
| 7,613
| 4.34594
| 0.252503
| 0.040952
| 0.036857
| 0.019196
| 0.204761
| 0.176094
| 0.141797
| 0.112618
| 0.081136
| 0.081136
| 0
| 0.029108
| 0.36372
| 7,613
| 238
| 105
| 31.987395
| 0.777457
| 0.092736
| 0
| 0.13587
| 0
| 0
| 0.087238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0.005435
| 0.038043
| 0
| 0.211957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47846ea157171c9a8c93b748212ec54fd187b2e7
| 1,418
|
py
|
Python
|
samples/bot-handoff-es6/emulate-users.py
|
microsoftly/botbuilder-js
|
538cb479b8596cdc209f5d70aa1a9000a0e6b360
|
[
"MIT"
] | 1
|
2021-03-16T05:14:30.000Z
|
2021-03-16T05:14:30.000Z
|
samples/bot-handoff-es6/emulate-users.py
|
microsoftly/botbuilder-js
|
538cb479b8596cdc209f5d70aa1a9000a0e6b360
|
[
"MIT"
] | 1
|
2018-03-26T05:25:54.000Z
|
2018-03-26T05:26:18.000Z
|
samples/bot-handoff-es6/emulate-users.py
|
microsoftly/botbuilder-js
|
538cb479b8596cdc209f5d70aa1a9000a0e6b360
|
[
"MIT"
] | 1
|
2018-03-26T04:16:42.000Z
|
2018-03-26T04:16:42.000Z
|
#!/usr/bin/python
import json, subprocess, sys, platform
from os.path import expanduser
if len (sys.argv) < 2 :
print("Usage: python " + sys.argv[0] + " username(s)")
sys.exit (1)
HOME=expanduser("~")
# determine paths
SYSTEM=platform.system()
if SYSTEM == 'Darwin':
SERVERJSON=HOME+'/Library/Application Support/botframework-emulator/botframework-emulator/server.json'
EMULATORPATH=HOME+'/Applications/botframework-emulator.app/'
elif SYSTEM == 'Windows':
SERVERJSON=HOME+'/AppData/Roaming/botframework-emulator/botframework-emulator/server.json'
EMULATORPATH=HOME+'/AppData/Local/botframework/botframework-emulator.exe'
else:
print("System " + SYSTEM + " not yet supported.")
sys.exit (1)
# read the server config file
with open(SERVERJSON, "r") as jsonFile:
data = json.load(jsonFile)
args=sys.argv[1:]
for arg in args:
# add user if not present
if data["users"]["usersById"].get(arg) is None:
data["users"]["usersById"][arg]={"id": arg,"name": arg}
# set current user
data["users"]["currentUserId"]=arg
# write server config file
with open(SERVERJSON, "w") as jsonFile:
json.dump(data, jsonFile, sort_keys=False, indent=2, separators=(',', ': '))
# launch emulator
if SYSTEM == 'Darwin':
subprocess.call(["/usr/bin/open", "-n", EMULATORPATH])
elif SYSTEM == 'Windows':
subprocess.call([EMULATORPATH])
| 33.761905
| 106
| 0.673484
| 176
| 1,418
| 5.420455
| 0.494318
| 0.125786
| 0.016771
| 0.083857
| 0.209644
| 0.209644
| 0.138365
| 0.138365
| 0
| 0
| 0
| 0.005085
| 0.167842
| 1,418
| 42
| 107
| 33.761905
| 0.80339
| 0.100141
| 0
| 0.206897
| 0
| 0
| 0.314961
| 0.179528
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068966
| 0
| 0.068966
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4786b42fdf67185bb43692e8ca877d2f9dba7530
| 4,571
|
py
|
Python
|
models/segnet.py
|
AntixK/Neural-Blocks
|
018a44bbb703fc848234b95a3e604576bd9df88f
|
[
"MIT"
] | 3
|
2019-07-23T12:35:50.000Z
|
2021-02-23T04:20:31.000Z
|
models/segnet.py
|
AntixK/Neural-Blocks
|
018a44bbb703fc848234b95a3e604576bd9df88f
|
[
"MIT"
] | null | null | null |
models/segnet.py
|
AntixK/Neural-Blocks
|
018a44bbb703fc848234b95a3e604576bd9df88f
|
[
"MIT"
] | 1
|
2019-07-21T06:07:12.000Z
|
2019-07-21T06:07:12.000Z
|
import torch
import torch.nn as nn
from NeuralBlocks.blocks.convnormrelu import ConvNormRelu
class segnetDown(nn.Module):
def __init__(self, in_channels, out_channels, norm=None, num_conv = 2):
super(segnetDown, self).__init__()
module = []
if num_conv < 2:
raise ValueError("SegNet needs at least 2 conv layers i.e. num_conv >= 2")
"""
For SegNet, the down sampling layers have the form
conv (in_channels, out_channels) + BN + ReLU
conv (out_channels, out_channels) + BN + ReLU
"""
num_filters= [in_channels] + (num_conv)*[out_channels]
for i in range(num_conv):
module.append(ConvNormRelu(num_filters[i], num_filters[i + 1],
kernel_size=3, stride=1, padding=1, norm=norm))
self.layer = nn.Sequential(*module)
#print(self.layer)
self.maxpool_argmax = nn.MaxPool2d(2,2, return_indices=True)
def forward(self, input):
output = self.layer(input)
unpoolsed_size = output.size()
output, indices = self.maxpool_argmax(output)
return output, indices, unpoolsed_size
class segnetUp(nn.Module):
def __init__(self, in_channels, out_channels, is_deconv, num_conv = 2, norm = 'BN'):
super(segnetUp, self).__init__()
if num_conv < 2:
raise ValueError("SegNet needs at least 2 conv layers i.e. num_conv >= 2")
num_filters = [in_channels]*(num_conv) + [out_channels]
"""
For SegNet, the up sampling layers have the form
conv (in_channels, in_channels) + BN + ReLU
conv (in_channels, out_channels) + BN + ReLU
"""
if is_deconv:
self.up = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
else:
self.up = nn.MaxUnpool2d(2,2)
module = []
for i in range(num_conv):
module.append(ConvNormRelu(num_filters[i], num_filters[i + 1],
kernel_size = 3, stride= 1, padding=1, norm=norm))
self.layer = nn.Sequential(*module)
def forward(self, input, indices, output_size):
output = self.up(input = input, indices = indices, output_size=output_size)
output =self.layer(output)
return output
class SegNet(nn.Module):
def __init__(self, in_channels, n_class, norm = 'BN', filters = None, is_deconv = False):
super(SegNet,self).__init__()
self.is_deconv = is_deconv
if filters is None:
filters = [64,128,256,512, 512]
if len(filters) < 3:
raise ValueError('Number filters must be at least 3.')
filters.insert(0, in_channels) # To account for the initial channels
modules= []
# Downsampling phase
for i in range(1, len(filters)):
if i < 3:
modules.append(segnetDown(filters[i-1], filters[i], norm, num_conv=2))
else:
modules.append(segnetDown(filters[i-1], filters[i], norm, num_conv=3))
self.down_layers = nn.ModuleList(modules)
# Upsampling Phase
filters[0] = n_class # To account for the final number of classes
modules = []
for i in range(len(filters)-1,0,-1):
if i > 2:
modules.append(segnetUp(filters[i], filters[i-1], self.is_deconv,
num_conv = 3, norm = norm))
else:
modules.append(segnetUp(filters[i], filters[i-1], self.is_deconv,
num_conv = 2, norm = norm))
self.up_layers = nn.ModuleList(modules)
# print(self.up_layers)
def forward(self, input):
x = input
unpool_args = []
for i, module in enumerate(self.down_layers):
x, ind, unpool_shape = module(x)
unpool_args.append([ind, unpool_shape])
result = x
N = len(self.up_layers)-1 # Variable to traverse unpool_args from reverse
"""
Note that the parameters for the up layers are the result of the
previous layer and the unpool args from the corresponding up layer.
i.e. the unpool_args must be traversed from reverse.
"""
for i, module in enumerate(self.up_layers):
result = module(result, *unpool_args[N-i])
return result
if __name__ == "__main__":
s = SegNet(3, 10, norm = 'BN')
inp = torch.randn(32,3,128, 128) #M x C x H x W
s.train()
result = s(inp)
| 34.628788
| 93
| 0.584992
| 595
| 4,571
| 4.317647
| 0.206723
| 0.038147
| 0.024912
| 0.040872
| 0.393149
| 0.383418
| 0.354223
| 0.324251
| 0.293889
| 0.231997
| 0
| 0.022173
| 0.309342
| 4,571
| 131
| 94
| 34.89313
| 0.791574
| 0.046598
| 0
| 0.283951
| 0
| 0
| 0.041589
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
478b01eea05155c0e098a1b65909d95f41833301
| 5,665
|
py
|
Python
|
barbican-8.0.0/barbican/objects/container_consumer_meta.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 177
|
2015-01-02T09:35:53.000Z
|
2022-02-26T01:43:55.000Z
|
barbican/objects/container_consumer_meta.py
|
kkutysllb/barbican
|
7b14d983e0dce6dcffe9781b05c52335b8203fc7
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
barbican/objects/container_consumer_meta.py
|
kkutysllb/barbican
|
7b14d983e0dce6dcffe9781b05c52335b8203fc7
|
[
"Apache-2.0"
] | 87
|
2015-01-13T17:33:40.000Z
|
2021-11-09T05:30:36.000Z
|
# Copyright 2018 Fujitsu.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_utils import timeutils
from oslo_versionedobjects import base as object_base
from barbican.common import utils
from barbican.model import models
from barbican.model import repositories as repos
from barbican.objects import base
from barbican.objects import fields
LOG = utils.getLogger(__name__)
@object_base.VersionedObjectRegistry.register
class ContainerConsumerMetadatum(base.BarbicanObject,
base.BarbicanPersistentObject,
object_base.VersionedObjectDictCompat):
fields = {
'container_id': fields.StringField(nullable=False),
'project_id': fields.StringField(nullable=True, default=None),
'name': fields.StringField(nullable=True, default=None),
'URL': fields.StringField(nullable=True, default=None),
'data_hash': fields.StringField(nullable=True, default=None)
}
db_model = models.ContainerConsumerMetadatum
db_repo = repos.get_container_consumer_repository()
@classmethod
def get_by_container_id(cls, container_id, offset_arg=None, limit_arg=None,
suppress_exception=False, session=None):
entities_db, offset, limit, total = \
cls.db_repo.get_by_container_id(
container_id, offset_arg, limit_arg,
suppress_exception, session)
entities = [cls()._from_db_object(entity_db) for entity_db in
entities_db]
return entities, offset, limit, total
@classmethod
def get_by_values(cls, container_id, name, URL, suppress_exception=False,
show_deleted=False, session=None):
consumer_db = cls.db_repo.get_by_values(container_id, name,
URL,
suppress_exception,
show_deleted,
session)
return cls()._from_db_object(consumer_db)
@classmethod
def create_or_update_from_model(cls, new_consumer,
container, session=None):
"""Create or update container
:param new_consumer: a instance of ContainerConsumerMetadatum model
:param container: a instance of Container OVO
:param session: a session to connect with database
:return: None
It is used during converting from model to OVO. It will be removed
after Container resource is implemented OVO.
"""
session = cls.get_session(session=session)
try:
container.updated_at = timeutils.utcnow()
container.save(session=session)
new_consumer.save(session=session)
except db_exc.DBDuplicateEntry:
session.rollback() # We know consumer already exists.
# This operation is idempotent, so log this and move on
LOG.debug("Consumer %s with URL %s already exists for "
"container %s, continuing...", new_consumer.name,
new_consumer.URL, new_consumer.container_id)
# Get the existing entry and reuse it by clearing the deleted flags
existing_consumer = cls.get_by_values(
new_consumer.container_id, new_consumer.name, new_consumer.URL,
show_deleted=True)
existing_consumer.deleted = False
existing_consumer.deleted_at = None
# We are not concerned about timing here -- set only, no reads
existing_consumer.save(session=session)
@classmethod
def create_or_update_from(cls, new_consumer, container, session=None):
"""Create or update container
:param new_consumer: a instance of ContainerConsumerMetadatum OVO
:param container: a instance of Container OVO
:param session: a session to connect with database
:return: None
"""
session = cls.get_session(session=session)
try:
container.updated_at = timeutils.utcnow()
container.consumers.append(new_consumer)
container.save(session=session)
except db_exc.DBDuplicateEntry:
session.rollback() # We know consumer already exists.
# This operation is idempotent, so log this and move on
LOG.debug("Consumer %s with URL %s already exists for "
"container %s, continuing...", new_consumer.name,
new_consumer.URL, new_consumer.container_id)
# Get the existing entry and reuse it by clearing the deleted flags
existing_consumer = cls.get_by_values(
new_consumer.container_id, new_consumer.name, new_consumer.URL,
show_deleted=True)
existing_consumer.deleted = False
existing_consumer.deleted_at = None
# We are not concerned about timing here -- set only, no reads
existing_consumer.save(session=session)
| 44.960317
| 79
| 0.642012
| 647
| 5,665
| 5.454405
| 0.267388
| 0.056107
| 0.039671
| 0.032871
| 0.548031
| 0.540096
| 0.456787
| 0.456787
| 0.456787
| 0.456787
| 0
| 0.001997
| 0.292851
| 5,665
| 125
| 80
| 45.32
| 0.878932
| 0.270079
| 0
| 0.435897
| 0
| 0
| 0.044433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.102564
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
478d445e00859e47118663015c1dca1d382e8e84
| 3,841
|
py
|
Python
|
tests/sentry/api/endpoints/test_project_rules_configuration.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/endpoints/test_project_rules_configuration.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/api/endpoints/test_project_rules_configuration.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from mock import Mock, patch
from sentry.rules.registry import RuleRegistry
from sentry.testutils import APITestCase
class ProjectRuleConfigurationTest(APITestCase):
def setUp(self):
self.project.flags.has_issue_alerts_targeting = False
self.project.save()
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo")
self.create_project(teams=[team], name="baz")
url = reverse(
"sentry-api-0-project-rules-configuration",
kwargs={"organization_slug": project1.organization.slug, "project_slug": project1.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data["actions"]) == 4
assert len(response.data["conditions"]) == 9
@property
def rules(self):
rules = RuleRegistry()
rule = Mock()
rule.id = "sentry.mail.actions.NotifyEmailAction"
rule.rule_type = "action/lol"
node = rule.return_value
node.id = "sentry.mail.actions.NotifyEmailAction"
node.label = "hello"
node.prompt = "hello"
node.is_enabled.return_value = True
node.form_fields = {}
rules.add(rule)
return rules
def run_mock_rules_test(self, expected_actions, querystring_params, rules=None):
if not rules:
rules = self.rules
self.login_as(user=self.user)
with patch("sentry.api.endpoints.project_rules_configuration.rules", rules):
url = reverse(
"sentry-api-0-project-rules-configuration",
kwargs={
"organization_slug": self.organization.slug,
"project_slug": self.project.slug,
},
)
response = self.client.get(url, querystring_params, format="json")
assert response.status_code == 200, response.content
assert len(response.data["actions"]) == expected_actions
assert len(response.data["conditions"]) == 0
def test_filter_out_notify_email_action(self):
self.run_mock_rules_test(0, {})
def test_filter_show_notify_email_action_migrated_project(self):
self.project.flags.has_issue_alerts_targeting = True
self.project.save()
self.run_mock_rules_test(1, {})
def test_filter_show_notify_email_action_override(self):
self.run_mock_rules_test(0, {"issue_alerts_targeting": "0"})
self.run_mock_rules_test(1, {"issue_alerts_targeting": "1"})
def test_show_notify_event_service_action(self):
rules = RuleRegistry()
rule = Mock()
rule.id = "sentry.rules.actions.notify_event_service.NotifyEventServiceAction"
rule.rule_type = "action/lol"
node = rule.return_value
node.id = rule.id
node.label = "hello"
node.prompt = "hello"
node.is_enabled.return_value = True
node.form_fields = {}
node.get_services.return_value = [Mock()]
rules.add(rule)
self.run_mock_rules_test(1, {}, rules=rules)
def test_hide_empty_notify_event_service_action(self):
rules = RuleRegistry()
rule = Mock()
rule.id = "sentry.rules.actions.notify_event_service.NotifyEventServiceAction"
rule.rule_type = "action/lol"
node = rule.return_value
node.id = rule.id
node.label = "hello"
node.prompt = "hello"
node.is_enabled.return_value = True
node.form_fields = {}
node.get_services.return_value = []
rules.add(rule)
self.run_mock_rules_test(0, {}, rules=rules)
| 36.235849
| 100
| 0.640198
| 450
| 3,841
| 5.228889
| 0.237778
| 0.037399
| 0.035699
| 0.047599
| 0.674033
| 0.621334
| 0.533787
| 0.483638
| 0.402465
| 0.402465
| 0
| 0.00766
| 0.252278
| 3,841
| 105
| 101
| 36.580952
| 0.81163
| 0
| 0
| 0.460674
| 0
| 0
| 0.143713
| 0.099974
| 0
| 0
| 0
| 0
| 0.067416
| 1
| 0.101124
| false
| 0
| 0.05618
| 0
| 0.179775
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
478df9b66c40c191cbb8cf8a0885ef1863ea295a
| 3,297
|
py
|
Python
|
train-script.py
|
praepunctis/chordtext
|
b58ea6fea7bf417e6e18daad6c4ce6ea878bd27b
|
[
"MIT"
] | 1
|
2020-12-02T10:04:08.000Z
|
2020-12-02T10:04:08.000Z
|
train-script.py
|
praepunctis/chordtext
|
b58ea6fea7bf417e6e18daad6c4ce6ea878bd27b
|
[
"MIT"
] | null | null | null |
train-script.py
|
praepunctis/chordtext
|
b58ea6fea7bf417e6e18daad6c4ce6ea878bd27b
|
[
"MIT"
] | null | null | null |
# train-script.py
# Grab data from movie_data.csv and train a ML model.
# Kelly Fesler (c) Nov 2020
# Modified from Soumya Gupta (c) Jan 2020
# STEP 1: import -------------------------------------------
# Import libraries
import urllib.request
import os
import pandas as pd
import numpy as np
import nltk
import sklearn
import joblib
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
# STEP 2: read ---------------------------------------------
# Read in the large movie review dataset; display the first 3 lines
df = pd.read_csv('movie_data.csv', encoding='utf-8')
print("Loading data...\n")
data_top = df.head(3)
print(data_top)
# STEP 3: clean --------------------------------------------
# prepare tokenizer, stopwords, stemmer objects
tokenizer = RegexpTokenizer(r'\w+')
en_stopwords = set(stopwords.words('english'))
ps = PorterStemmer()
# set up helper function to clean data:
def getStemmedReview(review):
# turn to lowercase
review = review.lower()
review = review.replace("<br /><br />", " ")
# tokenize
tokens = tokenizer.tokenize(review)
new_tokens = [token for token in tokens if token not in en_stopwords]
# stem
stemmed_tokens = [ps.stem(token) for token in new_tokens]
clean_review = ' '.join(stemmed_tokens)
return clean_review
# tokenize & clean all reviews
print("")
print("Tokenizing & cleaning...")
df['review'].apply(getStemmedReview)
# STEP 4: split --------------------------------------------
print("Splitting...")
# split: 35k rows for training
X_train = df.loc[:35000, 'review'].values
Y_train = df.loc[:35000, 'sentiment'].values
# split: 15k rows for testing
X_test = df.loc[35000:, 'review'].values
Y_test = df.loc[35000:, 'sentiment'].values
# STEP 5: transform to feature vectors ---------------------
# set up vectorizer from sklearn
vectorizer = TfidfVectorizer(sublinear_tf=True, encoding='utf-8')
# train on the training data
print("Training...")
vectorizer.fit(X_train)
# after learning from training data, transform the test data
print("Transforming...")
X_train = vectorizer.transform(X_train)
X_test = vectorizer.transform(X_test)
# STEP 6: create the ML model ------------------------------
print("Creating the model...")
model = LogisticRegression(solver='liblinear')
model.fit(X_train,Y_train)
print("ok!")
# print scores
print("")
print("Score on training data is: " + str(model.score(X_train,Y_train)))
print("Score on testing data is:" + str(model.score(X_test,Y_test)))
# STEP 7: test model output --------------------------------
print("")
print("Testing a negative review...")
# Sampling a negative review; let's compare expected & predicted values
print("Expected sentiment: 0")
print("Predicted sentiment: " + str(model.predict(X_test[0])))
print("Expected probabilities: ~0.788, ~0.211")
print("Predicted probabilities: " + str(model.predict_proba(X_test[0])))
# STEP 8: save & export the model --------------------------
print("")
print("Exporting to .pkl files...")
joblib.dump(en_stopwords,'stopwords.pkl')
joblib.dump(model,'model.pkl')
joblib.dump(vectorizer,'vectorizer.pkl')
print("done")
| 28.422414
| 73
| 0.668487
| 434
| 3,297
| 4.997696
| 0.380184
| 0.016598
| 0.018442
| 0.013831
| 0.077916
| 0.03965
| 0
| 0
| 0
| 0
| 0
| 0.01915
| 0.128905
| 3,297
| 115
| 74
| 28.669565
| 0.736072
| 0.330604
| 0
| 0.066667
| 0
| 0
| 0.20514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.2
| 0
| 0.233333
| 0.35
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
478eeb89ce278e9a0867db0565808bfedbcc11dc
| 10,947
|
py
|
Python
|
roerich/algorithms.py
|
HSE-LAMBDA/roerich
|
17e178292593d1ea6a821b99705620ba066abd2a
|
[
"BSD-2-Clause"
] | 10
|
2020-12-01T13:58:27.000Z
|
2022-01-17T12:01:31.000Z
|
roerich/algorithms.py
|
HSE-LAMBDA/roerich
|
17e178292593d1ea6a821b99705620ba066abd2a
|
[
"BSD-2-Clause"
] | 3
|
2021-03-07T14:06:22.000Z
|
2022-01-18T14:23:16.000Z
|
roerich/algorithms.py
|
HSE-LAMBDA/roerich
|
17e178292593d1ea6a821b99705620ba066abd2a
|
[
"BSD-2-Clause"
] | 2
|
2020-12-01T14:04:36.000Z
|
2022-03-24T12:52:32.000Z
|
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from copy import deepcopy
from typing import Union, Type, Any, Tuple
import numpy as np
import torch
import torch.nn as nn
from scipy.signal import find_peaks_cwt
from .net import MyNN, MyNNRegressor
from .utils import autoregression_matrix, unified_score
from .metrics import KL_sym, KL, JSD, PE, PE_sym, Wasserstein
from .scaler import SmaScalerCache
from .helper import SMA
class ChangePointDetection(metaclass=ABCMeta):
def __init__(self, scaler: Any = "default", metric: str = "KL", window_size: int = 1, periods: int = 10,
lag_size: int = 0, step: int = 1, n_epochs: int = 100, lr: float = 0.01, lam: float = 0,
optimizer: str = "Adam", debug: int = 0):
"""
Parameters
----------
scaler: A scaler object is used to scale an input data. The default one is `SmaScalerCache`
metric: A loss function during optimize step of NN. Can be one of the following KL_sym, KL, JSD, PE, PE_sym, Wasserstein
window_size: A size of a window when splitting input data into train and test arrays
periods: A number of previous data-points used when constructing autoregressive matrix
lag_size: A distance between train- and test- windows
step: Each `step`-th data-point is used when creating the input dataset
n_epochs: A number of epochs during training NN
lr: A learning rate at each step of optimizer
lam: A regularization rate
optimizer: One of Adam, SGD, RMSprop or ASGD optimizers
debug: default zero
"""
self.scaler = SmaScalerCache(window_size + lag_size) if scaler == "default" else scaler
self.metric = metric
self.window_size = window_size
self.periods = periods
self.lag_size = lag_size
self.step = step
self.n_epochs = n_epochs
self.lr = lr
self.lam = lam
self.debug = debug
self._time_shift = lag_size + window_size
self.avg_window = lag_size + window_size
self.peak_widths = [0.25 * (lag_size + window_size)]
self.optimizers = defaultdict(lambda: torch.optim.Adam)
self.optimizers["Adam"] = torch.optim.Adam
self.optimizers["SGD"] = torch.optim.SGD
self.optimizers["RMSprop"] = torch.optim.RMSprop
self.optimizers["ASGD"] = torch.optim.ASGD
self.optimizer = self.optimizers[optimizer]
self.metric_func = {"KL_sym": KL_sym,
"KL": KL,
"JSD": JSD,
"PE": PE,
"PE_sym": PE_sym,
"W": Wasserstein
}
@abstractmethod
def init_net(self, n_inputs: int) -> None:
"""
Initialize neural network based on `self.base_net` class
Parameters
----------
n_inputs: Number of inputs of neural network
-------
"""
pass
def predict(self, X: Union[np.ndarray, torch.Tensor]) -> Tuple[Any, Any]:
"""
Determines a CPD score for every data-point
Parameters
----------
X: An input data
Returns `avg_unified_score`: An averaged, unified and shifted CPD score for every data-point in X
`peaks` Locations of CPD points along all data-points
-------
"""
X_auto = autoregression_matrix(X, periods=self.periods, fill_value=0)
self.init_net(X_auto.shape[1])
T, reference, test = self.reference_test(X_auto)
scores = []
for i in range(len(reference)):
X_, y_ = self.preprocess(reference[i], test[i])
score = self.reference_test_predict(X_, y_)
scores.append(score)
T_scores = np.array([T[i] for i in range(len(reference))])
scores = np.array(scores)
# todo optimize memory
T_uni = np.arange(len(X))
T_scores = T_scores - self._time_shift
un_score = unified_score(T_uni, T_scores, scores)
avg_unified_score = SMA(un_score, self.avg_window)
peaks = self.find_peaks_cwt(avg_unified_score, widths=self.peak_widths)
return avg_unified_score, peaks
def reference_test(self, X: Union[torch.Tensor, np.ndarray]) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Creates reference and test datasets based on autoregressive matrix.
Parameters
----------
X: An autoregressive matrix
Returns tuple of numpy arrays: time-steps, reference and test datasets
-------
"""
N = self.lag_size
ws = self.window_size
T = []
reference = []
test = []
for i in range(2 * ws + N - 1, len(X), self.step):
T.append(i)
reference.append(X[i - 2 * ws - N + 1:i - ws - N + 1])
test.append(X[i - ws + 1:i + 1])
return np.array(T), np.array(reference), np.array(test)
def preprocess(self, X_ref: np.ndarray, X_test: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Creates X and y datasets for training NN by stacking reference and test datasets.
Also applies a scaling transformation into resulting X dataset.
Labels for reference data-points is 1s.
Labels for test data-points is 0s.
Parameters
----------
X_ref: reference data-points
X_test: test data-points
Returns
-------
Tuple of training data
"""
y_ref = np.zeros(len(X_ref))
y_test = np.ones(len(X_test))
X = np.vstack((X_ref, X_test))
y = np.hstack((y_ref, y_test))
X = self.scaler.fit_transform(X)
X = torch.from_numpy(X).float()
y = torch.from_numpy(y).float()
return X, y
def find_peaks_cwt(self, vector, *args, **kwargs):
"""
Find peaks function based on scipy.signal package
Parameters
----------
vector: CPD scores array
args: see docs for https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks_cwt.html
kwargs
Returns
-------
Array with location of peaks
"""
peaks = find_peaks_cwt(vector, *args, **kwargs)
return peaks
@abstractmethod
def reference_test_predict(self, X: torch.Tensor, y: torch.Tensor):
"""
Training process of forward, backward and optimize steps.
Parameters
----------
X: train data
y: train labels
Returns
-------
None
"""
pass
class OnlineNNClassifier(ChangePointDetection):
def __init__(self, net: Union[Type[nn.Module], str] = "default", *args, **kwargs):
"""
Parameters
----------
net: Custom torch.nn.Module neural network or "default" one
args: see parent class
kwargs: see parent class
"""
super().__init__(*args, **kwargs)
self.criterion = nn.BCELoss()
self.base_net = MyNN if net == "default" else net
self.net = None
self.opt = None
def init_net(self, n_inputs):
self.net = self.base_net(n_inputs)
self.opt = self.optimizer(
self.net.parameters(),
lr=self.lr,
weight_decay=self.lam
)
def reference_test_predict(self, X, y):
self.net.train(False)
n_last = min(self.window_size, self.step)
ref_preds = self.net(X[y == 0][-n_last:]).detach().numpy()
test_preds = self.net(X[y == 1][-n_last:]).detach().numpy()
self.net.train(True)
for epoch in range(self.n_epochs): # loop over the dataset multiple times
# forward + backward + optimize
outputs = self.net(X)
loss = self.criterion(outputs.squeeze(), y)
# set gradients to zero
self.opt.zero_grad()
loss.backward()
self.opt.step()
score = self.metric_func[self.metric](ref_preds, test_preds)
return score
class OnlineNNRuLSIF(ChangePointDetection):
def __init__(self, alpha, net="default", *args, **kwargs):
"""
Parameters
----------
alpha: The `alpha` parameter in a loss function
net: Custom torch.nn.Module neural network or "default" one
args: see parent class
kwargs: see parent class
"""
super().__init__(*args, **kwargs)
self.alpha = alpha
self.base_net = MyNNRegressor if net == "default" else net
self.net1 = None
self.net2 = None
self.opt1 = None
self.opt2 = None
def init_net(self, n_inputs):
self.net1 = self.base_net(n_inputs)
self.opt1 = self.optimizer(
self.net1.parameters(),
lr=self.lr,
weight_decay=self.lam
)
self.net2 = deepcopy(self.net1)
self.opt2 = deepcopy(self.opt1)
def compute_loss(self, y_pred_batch_ref, y_pred_batch_test):
loss = 0.5 * (1 - self.alpha) * (y_pred_batch_ref ** 2).mean() + \
0.5 * self.alpha * (y_pred_batch_test ** 2).mean() - (y_pred_batch_test).mean()
return loss
def reference_test_predict(self, X, y):
n_last = min(self.window_size, self.step)
self.net1.train(False)
test_preds = self.net1(X[y == 1][-n_last:]).detach().numpy()
self.net2.train(False)
ref_preds = self.net2(X[y == 0][-n_last:]).detach().numpy()
self.net1.train(True)
self.net2.train(True)
for epoch in range(self.n_epochs): # loop over the dataset multiple times
# forward + backward + optimize
y_pred_batch = self.net1(X).squeeze()
y_pred_batch_ref = y_pred_batch[y == 0]
y_pred_batch_test = y_pred_batch[y == 1]
loss1 = self.compute_loss(y_pred_batch_ref, y_pred_batch_test)
# set gradients to zero
self.opt1.zero_grad()
loss1.backward()
self.opt1.step()
# forward + backward + optimize
y_pred_batch = self.net2(X).squeeze()
y_pred_batch_ref = y_pred_batch[y == 1]
y_pred_batch_test = y_pred_batch[y == 0]
loss2 = self.compute_loss(y_pred_batch_ref, y_pred_batch_test)
# set gradients to zero
self.opt2.zero_grad()
loss2.backward()
self.opt2.step()
score = (0.5 * np.mean(test_preds) - 0.5) + (0.5 * np.mean(ref_preds) - 0.5)
return score
| 34.863057
| 128
| 0.567279
| 1,373
| 10,947
| 4.366351
| 0.185725
| 0.015847
| 0.031693
| 0.016347
| 0.258215
| 0.223019
| 0.18382
| 0.159299
| 0.097748
| 0.097748
| 0
| 0.010418
| 0.324838
| 10,947
| 313
| 129
| 34.974441
| 0.800704
| 0.237234
| 0
| 0.121951
| 0
| 0
| 0.011289
| 0
| 0
| 0
| 0
| 0.003195
| 0
| 1
| 0.085366
| false
| 0.012195
| 0.079268
| 0
| 0.22561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4794b8cc274b020b95e48eb002f538acf5d6b189
| 4,484
|
py
|
Python
|
data/create_train_test_split.py
|
chmo2019/CUB-200-with-TFOD-API
|
8f46f2f91085f3e35829f8c7ce0289771ebb0294
|
[
"MIT"
] | null | null | null |
data/create_train_test_split.py
|
chmo2019/CUB-200-with-TFOD-API
|
8f46f2f91085f3e35829f8c7ce0289771ebb0294
|
[
"MIT"
] | null | null | null |
data/create_train_test_split.py
|
chmo2019/CUB-200-with-TFOD-API
|
8f46f2f91085f3e35829f8c7ce0289771ebb0294
|
[
"MIT"
] | null | null | null |
# import necessary libraries
import csv
from PIL import Image
import argparse
# create argument parser with PATH argument
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--path', required=True,
help='''PATH to CUB_200_2011 folder i.e. folder with CUB 200 csv files
(make sure to include full path name for so other scripts can find the data file path(s))''')
args = ap.parse_args()
def create_train_test_split(PATH):
# open CUB 200 .txt files
images = open(PATH + "/images.txt", "r")
image_class_labels = open(PATH + "/image_class_labels.txt", "r")
bounding_boxes = open(PATH + "/bounding_boxes.txt", "r")
split = open(PATH + "/train_test_split.txt", "r")
classes = open(PATH + "/classes.txt", "r")
# create csv readers for each .txt file
tsv = csv.reader(split, delimiter=" ")
tsv_images = csv.reader(images, delimiter=" ")
tsv_class_labels = csv.reader(image_class_labels, delimiter=" ")
tsv_bbox = csv.reader(bounding_boxes, delimiter=" ")
tsv_classes = csv.reader(classes, delimiter=" ")
# create dictionary to store data
train_test = {"0":
{"filename": [],
"id": [],
"width": [],
"height": [],
"class": [],
"x" : [],
"y": [],
"img_w": [],
"img_h": []},
"1":
{"filename": [],
"id": [],
"width": [],
"height": [],
"class": [],
"x" : [],
"y": [],
"img_w": [],
"img_h": []}} # '0' for test '1' for train
# write id into dictionary for create train test split
for row in tsv:
train_test["{}".format(row[1])]["id"].append(row[0])
split.close()
classes_list = {}
# append class names to dictionary
for row in tsv_classes:
classes_list["{}".format(row[0])] = row[1]
classes.close()
i = 0
j = 0
# add image sizes, labels, and bounding box coordinates to dictionary
for (image, label, bbox) in zip(tsv_images, tsv_class_labels, tsv_bbox):
if train_test["0"]["id"][i] == image[0]:
train_test["0"]["filename"].append(PATH + "/images/" + image[1])
im = Image.open(PATH + "/images/"+ image[1])
train_test["0"]["img_w"].append(im.size[0])
train_test["0"]["img_h"].append(im.size[1])
train_test["0"]["class"].append(classes_list["{}".format(label[1])])
train_test["0"]["x"].append(bbox[1])
train_test["0"]["y"].append(bbox[2])
train_test["0"]["width"].append(bbox[3])
train_test["0"]["height"].append(bbox[4])
i += 1
else:
train_test["1"]["filename"].append(PATH + "/images/" + image[1])
im = Image.open(PATH + "/images/"+ image[1])
train_test["1"]["img_w"].append(im.size[0])
train_test["1"]["img_h"].append(im.size[1])
train_test["1"]["class"].append(classes_list["{}".format(label[1])])
train_test["1"]["x"].append(bbox[1])
train_test["1"]["y"].append(bbox[2])
train_test["1"]["width"].append(bbox[3])
train_test["1"]["height"].append(bbox[4])
j += 1
images.close()
image_class_labels.close()
bounding_boxes.close()
# open csv files for coco-formatted data
f_train = open("./annotations/train.csv", "w")
f_test = open("./annotations/test.csv", "w")
# create coco csv header
f_test.write("{},{},{},{},{},{},{},{}\n".format("filename","width","height","class",
"xmin", "ymin", "xmax", "ymax"))
# write coco-formatted data into test split csv
for k in range(len(train_test["0"]["filename"])):
f_test.write("{},{},{},{},{},{},{},{}\n".format(train_test["0"]["filename"][k],
train_test["0"]["img_w"][k],
train_test["0"]["img_h"][k],
train_test["0"]["class"][k],
train_test["0"]["x"][k],
train_test["0"]["y"][k],
float(train_test["0"]["x"][k]) +
float(train_test["0"]["width"][k]),
float(train_test["0"]["y"][k]) +
float(train_test["0"]["height"][k])))
f_train.write("{},{},{},{},{},{},{},{}\n".format("filename","width","height","class",
"xmin", "ymin", "xmax", "ymax"))
# write coco-formatted data into train split csv
for k in range(len(train_test["1"]["filename"])):
f_train.write("{},{},{},{},{},{},{},{}\n".format(train_test["1"]["filename"][k],
train_test["1"]["img_w"][k],
train_test["1"]["img_h"][k],
train_test["1"]["class"][k],
train_test["1"]["x"][k],
train_test["1"]["y"][k],
float(train_test["1"]["x"][k]) +
float(train_test["1"]["width"][k]),
float(train_test["1"]["y"][k]) +
float(train_test["1"]["height"][k])))
f_test.close()
f_train.close()
if __name__ == "__main__":
# run with command line arguments
create_train_test_split(args.path)
| 32.258993
| 94
| 0.594558
| 655
| 4,484
| 3.90687
| 0.181679
| 0.158265
| 0.082063
| 0.046893
| 0.422821
| 0.333724
| 0.28136
| 0.28136
| 0.240719
| 0.14068
| 0
| 0.022033
| 0.159902
| 4,484
| 139
| 95
| 32.258993
| 0.657287
| 0.118644
| 0
| 0.188679
| 0
| 0.009434
| 0.20772
| 0.047994
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009434
| false
| 0
| 0.028302
| 0
| 0.037736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4795e4597ca4cff70deec87c952fb0febb0256f0
| 3,487
|
py
|
Python
|
homework/1-AE/losses.py
|
Penchekrak/DeepGenerativeModels
|
7ee829682e8ed51bc637e2c6def0b9f810f384bc
|
[
"MIT"
] | null | null | null |
homework/1-AE/losses.py
|
Penchekrak/DeepGenerativeModels
|
7ee829682e8ed51bc637e2c6def0b9f810f384bc
|
[
"MIT"
] | null | null | null |
homework/1-AE/losses.py
|
Penchekrak/DeepGenerativeModels
|
7ee829682e8ed51bc637e2c6def0b9f810f384bc
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
class Criterion:
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
pass
class Image2ImageMSELoss(Criterion):
def __init__(self):
self.mse_loss = nn.MSELoss()
def __repr__(self):
return repr(self.mse_loss)
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
outs = model(batch_x)
loss = self.mse_loss(outs, batch_x)
return loss, outs
class Image2ImageBCELoss(Criterion):
def __init__(self, threshold=0.0):
self.bce_loss = nn.BCELoss()
self.threshold = threshold
def __repr__(self):
return str(repr(self.bce_loss)) + f"with {self.threshold} as threshold"
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
outs = model(batch_x)
target = (batch_x > self.threshold).float()
loss = self.bce_loss(outs, target)
return loss, outs
class Image2ImageMixedLoss(Criterion):
def __init__(self, mse_weight=0.5, bce_weight=0.5, bce_threshold=0.5):
self.bce_loss = nn.BCELoss()
self.bce_weight = bce_weight
self.threshold = bce_threshold
self.mse_loss = nn.MSELoss()
self.mse_weight = mse_weight
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
outs = model(batch_x)
mse_loss = self.mse_loss(outs, batch_x)
target = (batch_x > self.threshold).float()
bce_loss = self.bce_loss(outs, target)
return mse_loss * self.mse_weight + bce_loss * self.bce_weight, outs
def l1_loss(x):
return torch.mean(torch.sum(torch.abs(x), dim=1))
class Image2ImageMixedLossWithLasso(Criterion):
def __init__(self, mse_weight=0.5, bce_weight=0.5, bce_threshold=0.5, lasso_weight=0.001):
self.lasso_weight = lasso_weight
self.bce_loss = nn.BCELoss()
self.bce_weight = bce_weight
self.threshold = bce_threshold
self.mse_loss = nn.MSELoss()
self.mse_weight = mse_weight
def calculate_sparse_loss(self, model, image):
loss = 0
x = image
for block in model.encoder[:-1]:
x = block.conv(x)
loss += l1_loss(x)
x = block.act(block.norm(x))
x = model.encoder[-1](x)
loss += l1_loss(x)
for block in model.decoder[:-1]:
x = block.conv(x)
loss += l1_loss(x)
x = block.act(block.norm(x))
x = model.decoder[-1](x)
loss += l1_loss(x)
return loss
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
outs = model(batch_x)
mse_loss = self.mse_loss(outs, batch_x)
target = (batch_x > self.threshold).float()
bce_loss = self.bce_loss(outs, target)
l1 = self.calculate_sparse_loss(model, batch_x)
return mse_loss * self.mse_weight + bce_loss * self.bce_weight + self.lasso_weight * l1, outs
class ClassificationCELoss(Criterion):
def __init__(self):
self.ce_loss = nn.CrossEntropyLoss()
def __repr__(self):
return repr(self.ce_loss)
def __call__(self, model: nn.Module, batch_x: torch.Tensor, batch_y: torch.Tensor, *args, **kwargs):
outs = model(batch_x)
loss = self.ce_loss(outs, batch_y)
return loss, outs
| 32.896226
| 104
| 0.634069
| 486
| 3,487
| 4.263374
| 0.12963
| 0.052124
| 0.037162
| 0.046332
| 0.697876
| 0.666988
| 0.618726
| 0.59556
| 0.581081
| 0.581081
| 0
| 0.013323
| 0.24663
| 3,487
| 105
| 105
| 33.209524
| 0.775409
| 0
| 0
| 0.575
| 0
| 0
| 0.009751
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.0125
| 0.025
| 0.05
| 0.425
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47981ad00b6fbe330d40b7fd3c56d0ca049b684c
| 6,190
|
py
|
Python
|
core/domain/wipeout_service_test.py
|
davehenton/oppia
|
62a9e9ea8458632e39b8ab4cf15b0489ac1acad9
|
[
"Apache-2.0"
] | 1
|
2021-01-22T03:24:52.000Z
|
2021-01-22T03:24:52.000Z
|
core/domain/wipeout_service_test.py
|
davehenton/oppia
|
62a9e9ea8458632e39b8ab4cf15b0489ac1acad9
|
[
"Apache-2.0"
] | null | null | null |
core/domain/wipeout_service_test.py
|
davehenton/oppia
|
62a9e9ea8458632e39b8ab4cf15b0489ac1acad9
|
[
"Apache-2.0"
] | 1
|
2020-06-25T21:43:01.000Z
|
2020-06-25T21:43:01.000Z
|
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wipeout service."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import rights_manager
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.domain import wipeout_service
from core.platform import models
from core.tests import test_utils
import feconf
(collection_models, exp_models, user_models,) = (
models.Registry.import_models([
models.NAMES.collection, models.NAMES.exploration, models.NAMES.user]))
class WipeoutServiceTests(test_utils.GenericTestBase):
"""Provides testing of the wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
def test_pre_delete_user_email_subscriptions(self):
email_preferences = user_services.get_email_preferences(self.user_1_id)
self.assertEqual(
email_preferences.can_receive_email_updates,
feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
wipeout_service.pre_delete_user(self.user_1_id)
email_preferences = user_services.get_email_preferences(self.user_1_id)
self.assertFalse(email_preferences.can_receive_email_updates)
self.assertFalse(email_preferences.can_receive_editor_role_email)
self.assertFalse(email_preferences.can_receive_feedback_message_email)
self.assertFalse(email_preferences.can_receive_subscription_email)
def test_pre_delete_user_without_activities(self):
user_models.UserSubscriptionsModel(
id=self.user_1_id,
activity_ids=[],
collection_ids=[]
).put()
user_settings = user_services.get_user_settings(self.user_1_id)
self.assertFalse(user_settings.to_be_deleted)
wipeout_service.pre_delete_user(self.user_1_id)
user_settings = user_services.get_user_settings(self.user_1_id)
self.assertTrue(user_settings.to_be_deleted)
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(pending_deletion_model.exploration_ids, [])
self.assertEqual(pending_deletion_model.collection_ids, [])
def test_pre_delete_user_with_activities(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
self.save_new_valid_collection(
'col_id', self.user_1_id, exploration_id='exp_id')
wipeout_service.pre_delete_user(self.user_1_id)
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(
pending_deletion_model.exploration_ids, ['exp_id'])
self.assertEqual(pending_deletion_model.collection_ids, ['col_id'])
def test_pre_delete_user_with_activities_multiple_owners(self):
user_services.update_user_role(
self.user_1_id, feconf.ROLE_ID_COLLECTION_EDITOR)
user_1_actions = user_services.UserActionsInfo(self.user_1_id)
self.save_new_valid_exploration('exp_id', self.user_1_id)
rights_manager.assign_role_for_exploration(
user_1_actions, 'exp_id', self.user_2_id, rights_manager.ROLE_OWNER)
self.save_new_valid_collection(
'col_id', self.user_1_id, exploration_id='exp_id')
rights_manager.assign_role_for_collection(
user_1_actions, 'col_id', self.user_2_id, rights_manager.ROLE_OWNER)
wipeout_service.pre_delete_user(self.user_1_id)
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(
pending_deletion_model.exploration_ids, [])
self.assertEqual(pending_deletion_model.collection_ids, [])
def test_pre_delete_user_collection_is_marked_deleted(self):
self.save_new_valid_collection(
'col_id', self.user_1_id)
collection_model = collection_models.CollectionModel.get_by_id('col_id')
self.assertFalse(collection_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
collection_model = collection_models.CollectionModel.get_by_id('col_id')
self.assertTrue(collection_model.deleted)
def test_pre_delete_user_exploration_is_marked_deleted(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
exp_model = exp_models.ExplorationModel.get_by_id('exp_id')
self.assertFalse(exp_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
exp_model = exp_models.ExplorationModel.get_by_id('exp_id')
self.assertTrue(exp_model.deleted)
| 42.108844
| 80
| 0.745073
| 817
| 6,190
| 5.22399
| 0.19339
| 0.06373
| 0.054827
| 0.059278
| 0.628866
| 0.585052
| 0.500469
| 0.424321
| 0.387535
| 0.347235
| 0
| 0.009646
| 0.179321
| 6,190
| 146
| 81
| 42.39726
| 0.830512
| 0.115994
| 0
| 0.356436
| 0
| 0
| 0.027171
| 0
| 0
| 0
| 0
| 0
| 0.19802
| 1
| 0.069307
| false
| 0
| 0.108911
| 0
| 0.227723
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4798901a83af76dc1807e8d5ec48f5223c016346
| 1,109
|
py
|
Python
|
drf_to_s3/middleware.py
|
treyhunner/drf-to-s3
|
2384b7e277da0e795ab9e0241e829bcc4ca4dc77
|
[
"MIT"
] | 28
|
2015-01-15T18:31:24.000Z
|
2018-11-08T07:33:42.000Z
|
drf_to_s3/middleware.py
|
treyhunner/drf-to-s3
|
2384b7e277da0e795ab9e0241e829bcc4ca4dc77
|
[
"MIT"
] | 10
|
2020-01-01T07:26:19.000Z
|
2021-06-25T15:26:53.000Z
|
drf_to_s3/middleware.py
|
treyhunner/drf-to-s3
|
2384b7e277da0e795ab9e0241e829bcc4ca4dc77
|
[
"MIT"
] | 7
|
2015-01-29T20:59:29.000Z
|
2017-04-24T16:05:48.000Z
|
class UploadPrefixMiddleware(object):
'''
Sets a cookie with the upload prefix.
To be agnostic about your method of user authentication, this is
handled using middleware. It can't be in a signal, since the signal
handler doesn't have access to the response.
In most applications, the client already has access to a
normalized username, so you probably don't need this at
all.
To use this, add it to your MIDDLEWARE_CLASSES:
MIDDLEWARE_CLASSES = (
...
'drf_to_s3.middleware.UploadPrefixMiddleware',
...
)
'''
def process_response(self, request, response):
from django.conf import settings
from rest_framework.exceptions import PermissionDenied
from .access_control import upload_prefix_for_request
cookie_name = getattr(settings, 'UPLOAD_PREFIX_COOKIE_NAME', 'upload_prefix')
try:
response.set_cookie(cookie_name, upload_prefix_for_request(request))
except PermissionDenied:
response.delete_cookie(cookie_name)
return response
| 32.617647
| 85
| 0.682597
| 134
| 1,109
| 5.485075
| 0.567164
| 0.081633
| 0.040816
| 0.059864
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001218
| 0.259693
| 1,109
| 33
| 86
| 33.606061
| 0.894032
| 0.448151
| 0
| 0
| 0
| 0
| 0.06947
| 0.045704
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
479926439a3bedce30e4792e3973470d8b31f04e
| 14,157
|
py
|
Python
|
deepcomp/util/env_setup.py
|
CN-UPB/DeepCoMP
|
f9f64873184bb53b5687ae62f8ba2b84da423692
|
[
"MIT"
] | 19
|
2021-03-17T12:59:48.000Z
|
2022-03-24T09:04:32.000Z
|
deepcomp/util/env_setup.py
|
CN-UPB/DeepCoMP
|
f9f64873184bb53b5687ae62f8ba2b84da423692
|
[
"MIT"
] | 1
|
2021-03-08T16:27:49.000Z
|
2021-03-08T16:27:49.000Z
|
deepcomp/util/env_setup.py
|
CN-UPB/DeepCoMP
|
f9f64873184bb53b5687ae62f8ba2b84da423692
|
[
"MIT"
] | 6
|
2021-01-25T19:34:18.000Z
|
2022-03-20T05:56:33.000Z
|
"""Utility module for setting up different envs"""
import numpy as np
import structlog
from shapely.geometry import Point
from ray.rllib.agents.ppo import DEFAULT_CONFIG
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from deepcomp.util.constants import SUPPORTED_ENVS, SUPPORTED_AGENTS, SUPPORTED_SHARING, SUPPORTED_UE_ARRIVAL, \
SUPPORTED_UTILITIES
from deepcomp.env.single_ue.variants import RelNormEnv
from deepcomp.env.multi_ue.central import CentralRelNormEnv
from deepcomp.env.multi_ue.multi_agent import MultiAgentMobileEnv
from deepcomp.env.entities.user import User
from deepcomp.env.entities.station import Basestation
from deepcomp.env.entities.map import Map
from deepcomp.env.util.movement import RandomWaypoint
from deepcomp.util.callbacks import CustomMetricCallbacks
log = structlog.get_logger()
def get_env_class(env_type):
"""Return the env class corresponding to the string type (from CLI)"""
assert env_type in SUPPORTED_AGENTS, f"Environment type was {env_type} but has to be one of {SUPPORTED_AGENTS}."
if env_type == 'single':
# return DatarateMobileEnv
# return NormDrMobileEnv
return RelNormEnv
if env_type == 'central':
# return CentralDrEnv
# return CentralNormDrEnv
return CentralRelNormEnv
# return CentralMaxNormEnv
if env_type == 'multi':
return MultiAgentMobileEnv
def get_sharing_for_bs(sharing, bs_idx):
"""Return the sharing model for the given BS"""
# if it's not mixed, it's the same for all BS
if sharing != 'mixed':
assert sharing in SUPPORTED_SHARING
return sharing
# else loop through the available sharing models
sharing_list = ['resource-fair', 'rate-fair', 'proportional-fair']
return sharing_list[bs_idx % len(sharing_list)]
def create_small_map(sharing_model):
"""
Create small map and 2 BS
:returns: tuple (map, bs_list)
"""
map = Map(width=150, height=100)
bs1 = Basestation('A', Point(50, 50), get_sharing_for_bs(sharing_model, 0))
bs2 = Basestation('B', Point(100, 50), get_sharing_for_bs(sharing_model, 1))
bs_list = [bs1, bs2]
return map, bs_list
def create_dyn_small_map(sharing_model, bs_dist=100, dist_to_border=10):
"""Small env with 2 BS and dynamic distance in between"""
map = Map(width=2 * dist_to_border + bs_dist, height=2 * dist_to_border)
bs1 = Basestation('A', Point(dist_to_border, dist_to_border), sharing_model)
bs2 = Basestation('B', Point(dist_to_border + bs_dist, dist_to_border), sharing_model)
return map, [bs1, bs2]
def create_medium_map(sharing_model):
"""
Deprecated: Use dynamic medium env instead. Kept this to reproduce earlier results.
Same as large env, but with map restricted to areas with coverage.
Thus, optimal episode reward should be close to num_ues * eps_length * 10 (ie, all UEs are always connected)
"""
map = Map(width=205, height=85)
bs1 = Basestation('A', Point(45, 35), sharing_model)
bs2 = Basestation('B', Point(160, 35), sharing_model)
bs3 = Basestation('C', Point(100, 85), sharing_model)
bs_list = [bs1, bs2, bs3]
return map, bs_list
def create_dyn_medium_map(sharing_model, bs_dist=100, dist_to_border=10):
"""
Create map with 3 BS at equal distance. Distance can be varied dynamically. Map is sized automatically.
Keep the same layout as old medium env here: A, B on same horizontal axis. C above in the middle
"""
# calculate vertical distance from A, B to C using Pythagoras
y_dist = np.sqrt(bs_dist ** 2 - (bs_dist / 2) ** 2)
# derive map size from BS distance and distance to border
map_width = 2 * dist_to_border + bs_dist
map_height = 2 * dist_to_border + y_dist
map = Map(width=map_width, height=map_height)
# BS A is located at bottom left corner with specified distance to border
bs1 = Basestation('A', Point(dist_to_border, dist_to_border), get_sharing_for_bs(sharing_model, 0))
# other BS positions are derived accordingly
bs2 = Basestation('B', Point(dist_to_border + bs_dist, dist_to_border), get_sharing_for_bs(sharing_model, 1))
bs3 = Basestation('C', Point(dist_to_border + (bs_dist / 2), dist_to_border + y_dist), get_sharing_for_bs(sharing_model, 2))
return map, [bs1, bs2, bs3]
def create_large_map(sharing_model):
"""
Create larger map with 7 BS that are arranged in a typical hexagonal structure.
:returns: Tuple(map, bs_list)
"""
map = Map(width=230, height=260)
bs_list = [
# center
Basestation('A', Point(115, 130), get_sharing_for_bs(sharing_model, 0)),
# top left, counter-clockwise
Basestation('B', Point(30, 80), get_sharing_for_bs(sharing_model, 1)),
Basestation('C', Point(115, 30), get_sharing_for_bs(sharing_model, 2)),
Basestation('D', Point(200, 80), get_sharing_for_bs(sharing_model, 3)),
Basestation('E', Point(200, 180), get_sharing_for_bs(sharing_model, 4)),
Basestation('F', Point(115, 230), get_sharing_for_bs(sharing_model, 5)),
Basestation('G', Point(30, 180), get_sharing_for_bs(sharing_model, 6)),
]
return map, bs_list
def create_dyn_large_map(sharing_model, num_bs, dist_to_border=10):
assert 1 <= num_bs <= 7, "Only support 1-7 BS in large env"
_, bs_list = create_large_map(sharing_model)
# take only selected BS
bs_list = bs_list[:num_bs]
# create map with size according to BS positions
max_x, max_y = None, None
for bs in bs_list:
if max_x is None or bs.pos.x > max_x:
max_x = bs.pos.x
if max_y is None or bs.pos.y > max_y:
max_y = bs.pos.y
map = Map(width=max_x + dist_to_border, height=max_y + dist_to_border)
return map, bs_list
def create_ues(map, num_static_ues, num_slow_ues, num_fast_ues, util_func):
"""Create custom number of slow/fast UEs on the given map. Return UE list"""
ue_list = []
id = 1
for i in range(num_static_ues):
ue_list.append(User(str(id), map, pos_x='random', pos_y='random', movement=RandomWaypoint(map, velocity=0),
util_func=util_func))
id += 1
for i in range(num_slow_ues):
ue_list.append(User(str(id), map, pos_x='random', pos_y='random', movement=RandomWaypoint(map, velocity='slow'),
util_func=util_func))
id += 1
for i in range(num_fast_ues):
ue_list.append(User(str(id), map, pos_x='random', pos_y='random', movement=RandomWaypoint(map, velocity='fast'),
util_func=util_func))
id += 1
return ue_list
def create_custom_env(sharing_model):
"""Hand-created custom env. For demos or specific experiments."""
# map with 4 BS at distance of 100; distance 10 to border of map
map = Map(width=194, height=120)
bs_list = [
# left
Basestation('A', Point(10, 60), get_sharing_for_bs(sharing_model, 0)),
# counter-clockwise
Basestation('B', Point(97, 10), get_sharing_for_bs(sharing_model, 1)),
Basestation('C', Point(184, 60), get_sharing_for_bs(sharing_model, 2)),
Basestation('D', Point(97, 110), get_sharing_for_bs(sharing_model, 3)),
]
return map, bs_list
def get_env(map_size, bs_dist, num_static_ues, num_slow_ues, num_fast_ues, sharing_model, util_func, num_bs=None):
"""Create and return the environment corresponding to the given map_size"""
assert map_size in SUPPORTED_ENVS, f"Environment {map_size} is not one of {SUPPORTED_ENVS}."
assert util_func in SUPPORTED_UTILITIES, \
f"Utility function {util_func} not supported. Supported: {SUPPORTED_UTILITIES}"
# create map and BS list
map, bs_list = None, None
if map_size == 'small':
map, bs_list = create_small_map(sharing_model)
elif map_size == 'medium':
map, bs_list = create_dyn_medium_map(sharing_model, bs_dist=bs_dist)
elif map_size == 'large':
if num_bs is None:
map, bs_list = create_large_map(sharing_model)
else:
map, bs_list = create_dyn_large_map(sharing_model, num_bs)
elif map_size == 'custom':
map, bs_list = create_custom_env(sharing_model)
# create UEs
ue_list = create_ues(map, num_static_ues, num_slow_ues, num_fast_ues, util_func)
return map, ue_list, bs_list
def get_ue_arrival(ue_arrival_name):
"""Get the dict defining UE arrival over time based on the name provided via CLI"""
assert ue_arrival_name in SUPPORTED_UE_ARRIVAL
if ue_arrival_name is None:
return None
if ue_arrival_name == "oneupdown":
return {10: 1, 30: -1}
if ue_arrival_name == "updownupdown":
return {10: 1, 20: -1, 30: 1, 40: -1}
if ue_arrival_name == "3up2down":
return {10: 3, 30: -2}
if ue_arrival_name == "updown":
return {10: 1, 15: 1, 20: 1, 40: 1, 50: -1, 60: -1}
if ue_arrival_name == "largeupdown":
return {
20: 1, 30: -1, 40: 1,
# large increase up to 12 (starting at 1)
45: 1, 50: 1, 55: 2, 60: 3, 65: 2, 70: 1,
# large decrease down to 1
75: -1, 80: -2, 85: -3, 90: -3, 95: -2
}
raise ValueError(f"Unknown UE arrival name: {ue_arrival_name}")
def create_env_config(cli_args):
"""
Create environment and RLlib config based on passed CLI args. Return config.
:param cli_args: Parsed CLI args
:return: The complete config for an RLlib agent, including the env & env_config
"""
env_class = get_env_class(cli_args.agent)
map, ue_list, bs_list = get_env(cli_args.env, cli_args.bs_dist, cli_args.static_ues, cli_args.slow_ues,
cli_args.fast_ues, cli_args.sharing, cli_args.util, num_bs=cli_args.num_bs)
# this is for DrEnv and step utility
# env_config = {
# 'episode_length': eps_length, 'seed': seed,
# 'map': map, 'bs_list': bs_list, 'ue_list': ue_list, 'dr_cutoff': 'auto', 'sub_req_dr': True,
# 'curr_dr_obs': False, 'ues_at_bs_obs': False, 'dist_obs': False, 'next_dist_obs': False
# }
# this is for the custom NormEnv and log utility
env_config = {
'episode_length': cli_args.eps_length, 'seed': cli_args.seed, 'map': map, 'bs_list': bs_list, 'ue_list': ue_list,
'rand_episodes': cli_args.rand_train, 'new_ue_interval': cli_args.new_ue_interval, 'reward': cli_args.reward,
'max_ues': cli_args.max_ues, 'ue_arrival': get_ue_arrival(cli_args.ue_arrival),
# if enabled log_metrics: log metrics even during training --> visible on tensorboard
# if disabled: log just during testing --> probably slightly faster training with less memory
'log_metrics': True,
# custom animation rendering
'dashboard': cli_args.dashboard, 'ue_details': cli_args.ue_details,
}
# convert ue_arrival sequence to str keys as required by RLlib: https://github.com/ray-project/ray/issues/16215
if env_config['ue_arrival'] is not None:
env_config['ue_arrival'] = {str(k): v for k, v in env_config['ue_arrival'].items()}
# create and return the config
config = DEFAULT_CONFIG.copy()
# discount factor (default 0.99)
# config['gamma'] = 0.5
# 0 = no workers/actors at all --> low overhead for short debugging; 2+ workers to accelerate long training
config['num_workers'] = cli_args.workers
config['seed'] = cli_args.seed
# write training stats to file under ~/ray_results (default: False)
config['monitor'] = True
config['train_batch_size'] = cli_args.batch_size # default: 4000; default in stable_baselines: 128
# auto normalize obserations by subtracting mean and dividing by std (default: "NoFilter")
# config['observation_filter'] = "MeanStdFilter"
# NN settings: https://docs.ray.io/en/latest/rllib-models.html#built-in-model-parameters
# configure the size of the neural network's hidden layers; default: [256, 256]
# config['model']['fcnet_hiddens'] = [512, 512, 512]
# LSTM settings
config['model']['use_lstm'] = cli_args.lstm
# config['model']['lstm_use_prev_action_reward'] = True
# config['log_level'] = 'INFO' # ray logging default: warning
# reset the env whenever the horizon/eps_length is reached
config['horizon'] = cli_args.eps_length
config['env'] = env_class
config['env_config'] = env_config
# callback for monitoring custom metrics
config['callbacks'] = CustomMetricCallbacks
config['log_level'] = 'ERROR'
# for multi-agent env: https://docs.ray.io/en/latest/rllib-env.html#multi-agent-and-hierarchical
if MultiAgentEnv in env_class.__mro__:
# instantiate env to access obs and action space and num diff UEs
env = env_class(env_config)
# use separate policies (and NNs) for each agent
if cli_args.separate_agent_nns:
num_diff_ues = env.get_num_diff_ues()
# create policies also for all future UEs
if num_diff_ues > env.num_ue:
log.warning("Varying num. UEs. Creating policy for all (future) UEs.",
curr_num_ue=env.num_ue, num_diff_ues=num_diff_ues, new_ue_interval=env.new_ue_interval,
ue_arrival=env.ue_arrival)
ue_ids = [str(i + 1) for i in range(num_diff_ues)]
else:
ue_ids = [ue.id for ue in ue_list]
config['multiagent'] = {
# attention: ue.id needs to be a string! just casting it to str() here doesn't work;
# needs to be consistent with obs keys --> easier, just use string IDs
'policies': {ue_id: (None, env.observation_space, env.action_space, {}) for ue_id in ue_ids},
'policy_mapping_fn': lambda agent_id: agent_id
}
# or: all UEs use the same policy and NN
else:
config['multiagent'] = {
'policies': {'ue': (None, env.observation_space, env.action_space, {})},
'policy_mapping_fn': lambda agent_id: 'ue'
}
return config
| 44.37931
| 128
| 0.67034
| 2,101
| 14,157
| 4.28415
| 0.20752
| 0.047995
| 0.025331
| 0.02833
| 0.283191
| 0.228752
| 0.200311
| 0.140096
| 0.113765
| 0.104211
| 0
| 0.027626
| 0.22526
| 14,157
| 318
| 129
| 44.518868
| 0.793034
| 0.282263
| 0
| 0.095745
| 0
| 0
| 0.084346
| 0.002109
| 0
| 0
| 0
| 0
| 0.031915
| 1
| 0.069149
| false
| 0
| 0.074468
| 0
| 0.255319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
479df66e863ce32ea10d321a13e0395597727f6a
| 1,554
|
py
|
Python
|
shopping.py
|
scharlau/shopping_exercise_p
|
f6b59ba38408dcd9f66f79814ad6a7df167e8fa1
|
[
"Unlicense"
] | 1
|
2021-02-23T15:56:22.000Z
|
2021-02-23T15:56:22.000Z
|
shopping.py
|
scharlau/shopping_exercise_p
|
f6b59ba38408dcd9f66f79814ad6a7df167e8fa1
|
[
"Unlicense"
] | null | null | null |
shopping.py
|
scharlau/shopping_exercise_p
|
f6b59ba38408dcd9f66f79814ad6a7df167e8fa1
|
[
"Unlicense"
] | 3
|
2022-02-23T11:17:12.000Z
|
2022-03-01T10:22:40.000Z
|
import sqlite3
from flask import Flask, render_template
app = Flask(__name__)
# database details - to remove some duplication
db_name = 'shopping_data.db'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/customers')
def customers():
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from customers
cur.execute("select * from customers")
rows = cur.fetchall()
conn.close()
return render_template('customers.html', rows=rows)
@app.route('/customer_details/<id>')
def customer_details(id):
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from customers
cur.execute("select * from customers WHERE id=?", (id))
customer = cur.fetchall()
conn.close()
return render_template('customer_details.html', customer=customer)
@app.route('/orders')
def orders():
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from orders
cur.execute("select * from orders")
rows = cur.fetchall()
conn.close()
return render_template('orders.html', rows=rows)
@app.route('/order_details/<id>')
def order_details(id):
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# get results from orders
cur.execute("select * from orders WHERE id=?", (id))
order = cur.fetchall()
conn.close()
return render_template('order_details.html', order=order)
| 28.254545
| 70
| 0.684041
| 203
| 1,554
| 5.108374
| 0.20197
| 0.081003
| 0.096432
| 0.077146
| 0.642237
| 0.603664
| 0.603664
| 0.526519
| 0.441659
| 0.441659
| 0
| 0.007075
| 0.181467
| 1,554
| 55
| 71
| 28.254545
| 0.808176
| 0.094595
| 0
| 0.418605
| 0
| 0
| 0.18331
| 0.03067
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.046512
| 0.023256
| 0.27907
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
479e8f21283f73a7284a6977ee7935c8576954ca
| 3,372
|
py
|
Python
|
Month 01/Week 04/Day 03/a.py
|
KevinKnott/Coding-Review
|
6a83cb798cc317d1e4357ac6b2b1fbf76fa034fb
|
[
"MIT"
] | null | null | null |
Month 01/Week 04/Day 03/a.py
|
KevinKnott/Coding-Review
|
6a83cb798cc317d1e4357ac6b2b1fbf76fa034fb
|
[
"MIT"
] | null | null | null |
Month 01/Week 04/Day 03/a.py
|
KevinKnott/Coding-Review
|
6a83cb798cc317d1e4357ac6b2b1fbf76fa034fb
|
[
"MIT"
] | null | null | null |
# Decode Ways: https://leetcode.com/problems/decode-ways/
# A message containing letters from A-Z can be encoded into numbers using the following mapping:
# 'A' -> "1"
# 'B' -> "2"
# ...
# 'Z' -> "26"
# To decode an encoded message, all the digits must be grouped then mapped back into letters using the reverse of the mapping above (there may be multiple ways). For example, "11106" can be mapped into:
# "AAJF" with the grouping (1 1 10 6)
# "KJF" with the grouping (11 10 6)
# Note that the grouping (1 11 06) is invalid because "06" cannot be mapped into 'F' since "6" is different from "06".
# Given a string s containing only digits, return the number of ways to decode it.
# The answer is guaranteed to fit in a 32-bit integer.
class Solution:
def numDecodings(self, s: str) -> int:
self.memo = {}
def dfs(index=0):
if index in self.memo:
return self.memo[index]
if index == len(s):
return 1
if s[index] == '0':
return 0
if index == len(s) - 1:
return 1
# Go one
count = dfs(index+1)
# Go two
if int(s[index:index+2]) <= 26:
count += dfs(index+2)
# cache
self.memo[index] = count
return count
return dfs()
# The above works and cuts out a lot of the problems that we have however this still runs in o(N) and o(N)
# Can we improve on this solution? I think so this is almost like the fibonaci sequence where we can keep track of the last
# two answers and create the new one thus moving up and using only o(1) space
def numDecodingsImproved(self, s):
if s[0] == '0':
return 0
# If the first number isn't 0 then we have a valid case
# where two back is 1 but we skip over it by starting range at 1
oneBack = 1
twoBack = 1
for i in range(1, len(s)):
# Get a temp variable for combining the two results
current = 0
# make sure we don't have 0 because that makes going back two 0
# Also oneBack should be 1 if it isnt 0 as 0 is the only invalid digit
if s[i] == '0':
current = oneBack
twoDigit = int(s[i-1: i+1])
# Make sure that our new two digit is between 10-26 (we don't want 35)
if twoDigit >= 10 and twoDigit <= 26:
current += twoBack
# update the twoback and oneback to new values
twoBack = oneBack
oneBack = current
return oneBack
# So the above should work but it does so because it is like the fib sequence we only need two vals to create thrid 1 1 = 1 2
# so you keep the value that you need and discard outside of the range like a window
# Score Card
# Did I need hints? N
# Did you finish within 30 min? Y 25
# Was the solution optimal? I was able to create the optimal solution although I kind of skipped over the bottom up and tabulation that helps with
# creating the optimal solution as I have seen it before with the fib sequence
# Were there any bugs? I accidently pointed the second algo to current (because it is correct) but really I need to return oneBack because
# python can possibly clean up that val after the loop
# 5 5 5 3 = 4.5
| 34.762887
| 202
| 0.613582
| 543
| 3,372
| 3.810313
| 0.394107
| 0.009667
| 0.0116
| 0.010633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037005
| 0.318802
| 3,372
| 96
| 203
| 35.125
| 0.863735
| 0.638197
| 0
| 0.121212
| 0
| 0
| 0.002534
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
479ebacd8680e1c9ee8fd4892014756c931c3f71
| 7,503
|
py
|
Python
|
word2vec.py
|
online-behaviour/machine-learning
|
2ff0e83905985ec644699ece44c75dd7422a7426
|
[
"Apache-2.0"
] | 2
|
2017-08-18T13:14:38.000Z
|
2021-09-02T07:45:41.000Z
|
word2vec.py
|
online-behaviour/machine-learning
|
2ff0e83905985ec644699ece44c75dd7422a7426
|
[
"Apache-2.0"
] | null | null | null |
word2vec.py
|
online-behaviour/machine-learning
|
2ff0e83905985ec644699ece44c75dd7422a7426
|
[
"Apache-2.0"
] | 3
|
2020-11-18T11:55:45.000Z
|
2021-04-27T10:02:27.000Z
|
#!/usr/bin/python -W all
"""
word2vec.py: process tweets with word2vec vectors
usage: word2vec.py [-x] [-m model-file [-l word-vector-length]] -w word-vector-file -T train-file -t test-file
notes:
- optional model file is a text file from which the word vector file is built
- option x writes tokenized sentences to stdout
20170504 erikt(at)xs4all.nl
"""
# import modules & set up logging
import gensim
import getopt
import logging
import numpy
import naiveBayes
import os.path
import re
import sys
from scipy.sparse import csr_matrix
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
# constants
COMMAND = "word2vec.py"
TWEETCOLUMN = 4 # column tweet text in test data file dutch-2012.csv
CLASSCOLUMN = 9 # column tweeting behaviour (T3) in file dutch-2012.csv
IDCOLUMN = 0 # column with the id of the current tweet
PARENTCOLUMN = 5 # column of the id of the parent of the tweet if it is a retweet or reply (otherwise: None)
HASHEADING = True
MINCOUNT = 2
USAGE = "usage: "+COMMAND+" [-m model-file] -w word-vector-file -T train-file -t test-file\n"
# input file names
trainFile = ""
testFile = ""
wordvectorFile = ""
modelFile = ""
# length of word vectors
maxVector = 200
# exporting tokenized sentences
exportTokens = False
selectedTokens = {}
# check for command line options
def checkOptions():
global trainFile
global testFile
global wordvectorFile
global modelFile
global maxVector
global exportTokens
try: options = getopt.getopt(sys.argv,"T:t:w:m:l:x",[])
except: sys.exit(USAGE)
for option in options[0]:
if option[0] == "-T": trainFile = option[1]
elif option[0] == "-t": testFile = option[1]
elif option[0] == "-w": wordvectorFile = option[1]
elif option[0] == "-m": modelFile = option[1]
elif option[0] == "-l": maxVector = int(option[1])
elif option[0] == "-x": exportTokens = True
if trainFile == "" or testFile == "" or wordvectorFile == "":
sys.exit(USAGE)
# create data matrix (no sparse version needed)
def makeVectors(tokenizeResults,wordvecModel,selectedTokens):
tweetVectors = numpy.zeros((len(tokenizeResults),maxVector),dtype=numpy.float64)
# process all tweets
seen = {}
for i in range(0,len(tokenizeResults)):
# process all tokens in this tweet
for token in tokenizeResults[i]:
# if the token is present in the word vector model
if token in wordvecModel and token in selectedTokens and not token in seen:
# add (+) the word vector of this token to the tweet vector
tweetVectors[i] += wordvecModel[token]
seen[token] = True
# the result: a tweet vector which is the sum of its token vectors
return(tweetVectors)
# change the class vector into a binary vector
def makeBinary(vector):
outVector = []
for e in vector:
if e == naiveBayes.OTHER: outVector.append(0)
else: outVector.append(1)
return(outVector)
# read wordvector file from file in format of fasttext:
# first line: nbrOfVectors vectorLength; rest: token vector
def readFasttextModel(wordvectorFile):
global maxVector
try: inFile = open(wordvectorFile,"r")
except: sys.exit(COMMAND+": cannot read file "+wordvectorFile)
wordvectorModel = {}
lineCounter = 0
expectedLines = -1
for line in inFile:
line = line.rstrip()
fields = line.split()
lineCounter += 1
if lineCounter == 1:
if len(fields) != 2: sys.exit(COMMAND+": unexpected first line of file "+wordvectorFile+": "+line)
expectedLines = int(fields[0])
maxVector = int(fields[1])
else:
if len(fields) != 1+maxVector: sys.exit(COMMAND+": unexpected line in file "+wordvectorFile+": "+line)
token = fields.pop(0)
for i in range(0,len(fields)): fields[i] = float(fields[i])
wordvectorModel[token] = fields
inFile.close()
return(wordvectorModel)
# main function starts here
checkOptions()
# get target classes from training data file
targetClasses = naiveBayes.getTargetClasses(trainFile)
if len(targetClasses) == 0: sys.exit(COMMAND+": cannot find target classes\n")
# if required: train the word vector model and save it to file
if modelFile != "":
# read the model data
readDataResults = naiveBayes.readData(modelFile,targetClasses[0])
# tokenize the model data
tokenizeResults = naiveBayes.tokenize(readDataResults["text"])
# build the word vectors (test sg=1,window=10)
wordvecModel = gensim.models.Word2Vec(tokenizeResults, min_count=MINCOUNT, size=maxVector)
# save the word vectors
wordvecModel.save(wordvectorFile)
# load the word vector model from file
patternNameVec = re.compile("\.vec$")
if not patternNameVec.search(wordvectorFile):
print >> sys.stderr,"loading gensim vector model from file: %s" % (wordvectorFile)
# read standard file format from gensim
wordvecModel = gensim.models.Word2Vec.load(wordvectorFile)
else:
print >> sys.stderr,"loading fasttext vector model from file: %s" % (wordvectorFile)
# read file format from fasttext
wordvecModel = readFasttextModel(wordvectorFile)
# read training data, tokenize data, make vector matrix
readDataResults = naiveBayes.readData(trainFile,"")
tokenizeResults = naiveBayes.tokenize(readDataResults["text"])
# check if we need to export tokens
if exportTokens:
for i in range(0,len(tokenizeResults)):
sys.stdout.write("__label__"+readDataResults["classes"][i])
for j in range(0,len(tokenizeResults[i])):
sys.stdout.write(" ")
sys.stdout.write(unicode(tokenizeResults[i][j]).encode('utf8'))
sys.stdout.write("\n")
sys.exit()
# select tokens to be used in model, based on token frequency
selectedTokens = naiveBayes.selectFeatures(tokenizeResults,MINCOUNT)
makeVectorsResultsTrain = makeVectors(tokenizeResults,wordvecModel,selectedTokens)
# the matrix can be saved to file and reloaded in next runs but this does not gain much time
# read test data, tokenize data, make vector matrix
readDataResults = naiveBayes.readData(testFile,"")
tokenizeResults = naiveBayes.tokenize(readDataResults["text"])
makeVectorsResultsTest = makeVectors(tokenizeResults,wordvecModel,selectedTokens)
# run binary svm experiments: one for each target class
for targetClass in targetClasses:
# read the training and test file again to get the right class distribution for this target class
readDataResultsTrain = naiveBayes.readData(trainFile,targetClass)
readDataResultsTest = naiveBayes.readData(testFile,targetClass)
# get binary version of train classes
binTrainClasses = makeBinary(readDataResultsTrain["classes"])
# perform svm experiment: http://scikit-learn.org/stable/modules/svm.html (1.4.1.1)
clf = svm.SVC(decision_function_shape='ovo') # definition
clf.fit(makeVectorsResultsTrain,binTrainClasses) # training
outFile = open(testFile+".out."+targetClass,"w") # output file for test results
scores = clf.decision_function(makeVectorsResultsTest) # process all test items
for i in range(0,len(makeVectorsResultsTest)):
guess = "O"
if scores[i] >= 0: guess = targetClass
print >>outFile, "# %d: %s %s %0.3f" % (i,readDataResultsTest["classes"][i],guess,scores[i])
outFile.close()
| 40.33871
| 114
| 0.702786
| 945
| 7,503
| 5.567196
| 0.303704
| 0.015206
| 0.012355
| 0.016157
| 0.136286
| 0.069188
| 0.063486
| 0.037635
| 0.037635
| 0.012925
| 0
| 0.012446
| 0.196855
| 7,503
| 185
| 115
| 40.556757
| 0.860604
| 0.283487
| 0
| 0.071429
| 0
| 0.007937
| 0.072439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.103175
| 0
| 0.134921
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47a1196cb4630db570dad1f74a29081f22292d56
| 291
|
py
|
Python
|
Day 4/Ex3: Treasure Map.py
|
Nishi-16-K/100DaysCodeChallenge-Python-
|
96df953bbc60c2bf8802cf31ed6c593469521482
|
[
"MIT"
] | 1
|
2021-08-29T12:44:23.000Z
|
2021-08-29T12:44:23.000Z
|
Day 4/Ex3: Treasure Map.py
|
Nishi-16-K/100DaysofCodeChallenge-Python
|
96df953bbc60c2bf8802cf31ed6c593469521482
|
[
"MIT"
] | null | null | null |
Day 4/Ex3: Treasure Map.py
|
Nishi-16-K/100DaysofCodeChallenge-Python
|
96df953bbc60c2bf8802cf31ed6c593469521482
|
[
"MIT"
] | null | null | null |
row1 = ["⬜️","⬜️","⬜️"]
row2 = ["⬜️","⬜️","⬜️"]
row3 = ["⬜️","⬜️","⬜️"]
map = [row1, row2, row3]
print(f"{row1}\n{row2}\n{row3}")
position = input("Where do you want to put the treasure? ")
col = int(position[0])
ro = int(position[1])
map[ro-1][col-1] = "X"
print(f"{row1}\n{row2}\n{row3}")
| 26.454545
| 59
| 0.515464
| 54
| 291
| 3.111111
| 0.444444
| 0.142857
| 0.107143
| 0.130952
| 0.238095
| 0.238095
| 0.238095
| 0
| 0
| 0
| 0
| 0.062745
| 0.123711
| 291
| 10
| 60
| 29.1
| 0.52549
| 0
| 0
| 0.2
| 0
| 0
| 0.350515
| 0.151203
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47a5912698e4015d5f8165df118a9d812fd9c116
| 488
|
py
|
Python
|
wrong_settings.py
|
kutayg/gameQuest
|
1730ea8810a54afff50fbb5eab8fb5290eed6222
|
[
"MIT"
] | null | null | null |
wrong_settings.py
|
kutayg/gameQuest
|
1730ea8810a54afff50fbb5eab8fb5290eed6222
|
[
"MIT"
] | null | null | null |
wrong_settings.py
|
kutayg/gameQuest
|
1730ea8810a54afff50fbb5eab8fb5290eed6222
|
[
"MIT"
] | null | null | null |
# © 2019 KidsCanCode LLC / All rights reserved.
# Game options/settings
TITLE = "Jumpy!"
WIDTH = 480
HEIGHT = 600
FPS = 60
# Environment options
GRAVITY = 9.8
# Player properties
PLAYER_ACC = 0.5
PLAYER_FRICTION = -0.01
PLAYER_JUMPPOWER = 10
# Define colors
# I changed the screen color to aqua, the platform color to orange, and the player color to purple
WHITE = (255, 255, 255)
AQUA = (0, 255, 255)
RED = (255, 0, 0)
ORANGE = (255, 101, 0)
BLUE = (0, 0, 255)
PURPLE = (128, 0, 128)
| 20.333333
| 98
| 0.684426
| 80
| 488
| 4.15
| 0.625
| 0.063253
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15601
| 0.19877
| 488
| 24
| 99
| 20.333333
| 0.690537
| 0.442623
| 0
| 0
| 0
| 0
| 0.022556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47ab8c88580963ea081b14afd01ad6eaae957d96
| 1,091
|
py
|
Python
|
Exercicios/Ex69.py
|
angeloridolfi/Python-CEV
|
fd11b7ea0725f83c84336b99304c50f183514245
|
[
"MIT"
] | null | null | null |
Exercicios/Ex69.py
|
angeloridolfi/Python-CEV
|
fd11b7ea0725f83c84336b99304c50f183514245
|
[
"MIT"
] | null | null | null |
Exercicios/Ex69.py
|
angeloridolfi/Python-CEV
|
fd11b7ea0725f83c84336b99304c50f183514245
|
[
"MIT"
] | null | null | null |
contmaior = 0
contahomi = 0
contamuie = 0
while True:
print('CADASTRE UMA PESSOA')
print('=-' * 19)
idade = int(input('INFORME SUA IDADE: '))
if idade > 18:
contmaior += 1
sexo = str(input('INFORME SEU SEXO <<M/F>>: ')).upper().strip()[0]
if sexo not in 'MF':
while True:
sexo = str(input('OPÇÃO INVÁLIDA! INFORME SEU SEXO <<M/F>>: ')).upper().strip()[0]
if sexo in 'MF':
break
if sexo == 'M':
contahomi += 1
if sexo == 'F' and idade < 20:
contamuie += 1
continuacao = str(input('Quer continuar[S/N]: ')).upper().strip()[0]
print('=-' * 20)
if continuacao not in 'SN':
while True:
continuacao = str(input('OPÇÃO INVÁLIDA! Quer continuar[S/N]: ')).upper().strip()[0]
if continuacao in 'SN':
break
if continuacao == 'N':
break
print('=-' * 20)
print(f' -> {contmaior} pessoas são maiores de 18 anos;')
print(f' -> {contahomi} homens foram cadastrados;')
print(f' -> {contamuie} mulheres são menores de 20 anos.')
| 33.060606
| 96
| 0.542621
| 140
| 1,091
| 4.228571
| 0.357143
| 0.054054
| 0.074324
| 0.065878
| 0.199324
| 0.199324
| 0.199324
| 0.111486
| 0.111486
| 0.111486
| 0
| 0.030928
| 0.288726
| 1,091
| 32
| 97
| 34.09375
| 0.731959
| 0
| 0
| 0.25
| 0
| 0
| 0.290559
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.21875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47af52c1759d6ca7b81f36810e1191b6fa34e7eb
| 11,920
|
py
|
Python
|
non_local.py
|
yuxia201121/ADCTself-attention
|
77d32034854f64a7aa24d45ae2c4e18f7616cf48
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
non_local.py
|
yuxia201121/ADCTself-attention
|
77d32034854f64a7aa24d45ae2c4e18f7616cf48
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
non_local.py
|
yuxia201121/ADCTself-attention
|
77d32034854f64a7aa24d45ae2c4e18f7616cf48
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
import ops
def conv1x1(input_, output_dim,
init=tf.contrib.layers.xavier_initializer(), name='conv1x1'):
k_h = 1
k_w = 1
d_h = 1
d_w = 1
with tf.variable_scope(name):
w = tf.get_variable(
'w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=init)
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
return conv
def sn_conv1x1(input_, output_dim, update_collection,
init=tf.contrib.layers.xavier_initializer(), name='sn_conv1x1'):
with tf.variable_scope(name):
k_h = 1
k_w = 1
d_h = 1
d_w = 1
w = tf.get_variable(
'w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=init)
w_bar = ops.spectral_normed_weight(w, num_iters=1, update_collection=update_collection)
conv = tf.nn.conv2d(input_, w_bar, strides=[1, d_h, d_w, 1], padding='SAME')
return conv
def sn_non_local_block_sim(x, update_collection, name, init=tf.contrib.layers.xavier_initializer()):
with tf.variable_scope(name):
batch_size, h, w, num_channels = x.get_shape().as_list()
location_num = h * w
downsampled_num = location_num // 4
print("x=",x)
piexl_data = tf.transpose(x, [0, 3, 1, 2]) # shape=(64, 256, 32, 32)
print("piexl_data=",piexl_data)
piexl_zero_zero = piexl_data[:,:,::2,::2] # shape=(64, 256, 16, 16)
print("piexl_zero_zero=",piexl_zero_zero)
piexl_zero_one = piexl_data[:,:,::2,1::2] # shape=(64, 256, 16, 16)
print("piexl_zero_one=",piexl_zero_one)
piexl_one_zero = piexl_data[:,:,1::2,::2] # shape=(64, 256, 16, 16)
print("piexl_one_zero=",piexl_one_zero)
piexl_one_one = piexl_data[:,:,1::2,1::2] # shape=(64, 256, 16, 16)
print("piexl_one_one=",piexl_one_one)
dct_zero_zero = ((piexl_zero_zero + piexl_one_zero) + (piexl_zero_one + piexl_one_one))*2 # shape=(64, 256, 16, 16)
print("dct_zero_zero=",dct_zero_zero)
dct_zero_one = ((piexl_zero_zero + piexl_one_zero) - (piexl_zero_one + piexl_one_one))*2 # shape=(64, 256, 16, 16)
print("dct_zero_one=",dct_zero_one)
dct_one_zero = ((piexl_zero_zero - piexl_one_zero) + (piexl_zero_one - piexl_one_one))*2 # shape=(64, 256, 16, 16)
print("dct_one_zero=",dct_one_zero)
dct_one_one = ((piexl_zero_zero - piexl_one_zero) - (piexl_zero_one - piexl_one_one))*2 # shape=(64, 256, 16, 16)
print("dct_one_one=",dct_one_one)
#b00********************************************************************
x_zero_zero = tf.transpose(dct_zero_zero, [0, 2, 3, 1]) # shape=(64, 16, 16, 256)
print("x_zero_zero=",x_zero_zero)
# theta path
print("x=",x)
theta_00 = sn_conv1x1(x_zero_zero, num_channels //8 , update_collection, init, 'sn_conv_theta')
print('theta_00_sn_conv', theta_00)
#print(x.get_shape())
# theta = tf.reshape( # shape=(64, 256, 32)
# theta, [batch_size, location_num //4, num_channels // 8])
# print("theta_rehape=",theta)
# phi path
# phi_00 = sn_conv1x1(x_zero_zero, num_channels //8 , update_collection, init, 'sn_conv_phi') # shape=(64, 16, 16, 256)
# print("phi_00_sn_conv=",phi_00)
# phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
# print("phi_max_pool=",phi)
# phi = tf.reshape(
# phi, [batch_size, downsampled_num, num_channels])
# print("phi_rehape=",phi)
# attn_00 = tf.matmul(theta_00, phi_00, transpose_b=True)
# print("attn_00_matmul=",attn_00)
# attn_00 = tf.nn.softmax(attn_00) # shape=(64, 16, 16, 16)
# print(tf.reduce_sum(attn_00, axis=-1))
# print("attn_00_softmax=",attn_00)
############################################################################
#b01********************************************************************
x_zero_one = tf.transpose(dct_zero_one, [0, 2, 3, 1]) # shape=(64, 16, 16, 256)
print("x_zero_one=",x_zero_one)
# theta path
print("x=",x)
theta_01 = sn_conv1x1(x_zero_one, num_channels //8 , update_collection, init, 'sn_conv_theta')
print('theta_01_sn_conv', theta_01)
#print(x.get_shape())
# theta = tf.reshape( # shape=(64, 256, 32)
# theta, [batch_size, location_num //4, num_channels // 8])
# print("theta_rehape=",theta)
# phi path
# phi_01 = sn_conv1x1(x_zero_one, num_channels //8 , update_collection, init, 'sn_conv_phi') # shape=(64, 16, 16, 256)
# print("phi_01_sn_conv=",phi_01)
# phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
# print("phi_max_pool=",phi)
# phi = tf.reshape(
# phi, [batch_size, downsampled_num, num_channels])
# print("phi_rehape=",phi)
attn_01 = tf.matmul(theta_00, theta_01, transpose_b=True)
print("attn_01_matmul=",attn_01)
attn_01 = tf.nn.softmax(attn_01) # shape=(64, 16, 16, 16)
# print(tf.reduce_sum(attn_01, axis=-1))
# print("attn_01_softmax=",attn_01)
#b10********************************************************************
x_one_zero = tf.transpose(dct_one_zero, [0, 2, 3, 1]) # shape=(64, 16, 16, 256)
print("x_one_zero=",x_one_zero)
# theta path
print("x=",x)
theta_10 = sn_conv1x1(x_one_zero, num_channels //8 , update_collection, init, 'sn_conv_theta')
print('theta_10_sn_conv', theta_10)
#print(x.get_shape())
# theta = tf.reshape( # shape=(64, 256, 32)
# theta, [batch_size, location_num //4, num_channels // 8])
# print("theta_rehape=",theta)
# phi path
# phi_10 = sn_conv1x1(x_one_zero, num_channels //8 , update_collection, init, 'sn_conv_phi') # shape=(64, 16, 16, 256)
# print("phi_10_sn_conv=",phi_10)
# phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
# print("phi_max_pool=",phi)
# phi = tf.reshape(
# phi, [batch_size, downsampled_num, num_channels])
# print("phi_rehape=",phi)
# attn_10 = tf.matmul(theta_10, phi_10, transpose_b=True)
# print("attn_10_matmul=",attn_10)
# attn_10 = tf.nn.softmax(attn_10) # shape=(64, 16, 16, 16)
# print(tf.reduce_sum(attn_10, axis=-1))
# print("attn_10_softmax=",attn_10)
#b11********************************************************************
x_one_one = tf.transpose(dct_one_one, [0, 2, 3, 1]) # shape=(64, 16, 16, 256)
print("x_one_one=",x_one_one)
# theta path
print("x=",x)
theta_11 = sn_conv1x1(x_one_one, num_channels //8 , update_collection, init, 'sn_conv_theta')
print('theta_11_sn_conv', theta_11)
#print(x.get_shape())
# theta = tf.reshape( # shape=(64, 256, 32)
# theta, [batch_size, location_num //4, num_channels // 8])
# print("theta_rehape=",theta)
# phi path
# phi_11 = sn_conv1x1(x_one_one, num_channels //8 , update_collection, init, 'sn_conv_phi') # shape=(64, 16, 16, 256)
# print("phi_11_sn_conv=",phi_11)
# phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
# print("phi_max_pool=",phi)
# phi = tf.reshape(
# phi, [batch_size, downsampled_num, num_channels])
# print("phi_rehape=",phi)
attn_11 = tf.matmul(theta_10, theta_11, transpose_b=True)
print("attn_11_matmul=",attn_11)
attn_11 = tf.nn.softmax(attn_11) # shape=(64, 16, 16, 16)
# print(tf.reduce_sum(attn_11, axis=-1))
# print("attn_11_softmax=",attn_11)
##################################
# attn1=tf.matmul(attn_00, attn_01, transpose_b=True)
# attn2=tf.matmul(attn_10, attn_11, transpose_b=True)
# attn_dct=tf.matmul(attn1, attn2, transpose_b=True)
attn_dct=attn_01+attn_11
# attn_dct = tf.nn.softmax(attn_dct)
print("attn_dct=",attn_dct)
##################################
# pixel attention
# theta path
print("x=",x)
theta = sn_conv1x1(x, num_channels //8 , update_collection, init, 'sn_conv_theta')
print('theta_sn_conv', theta)
#print(x.get_shape())
# theta = tf.reshape( # shape=(64, 256, 32)
# theta, [batch_size, location_num //4, num_channels // 8])
# print("theta_rehape=",theta)
# phi path
phi = sn_conv1x1(x, num_channels //8 , update_collection, init, 'sn_conv_phi') # shape=(64, 16, 16, 256)
print("phi_sn_conv=",phi)
# phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
# print("phi_max_pool=",phi)
# phi = tf.reshape(
# phi, [batch_size, downsampled_num, num_channels])
# print("phi_rehape=",phi)
attn_pixel = tf.matmul(theta, phi, transpose_b=True)
print("attn_pixel_matmul=",attn_pixel)
attn_pixel = tf.nn.softmax(attn_pixel) # shape=(64, 32, 32, 32)
print(tf.reduce_sum(attn_pixel, axis=-1))
print("attn_pixel=",attn_pixel)
attn_pixel = tf.layers.max_pooling2d(inputs=attn_pixel, pool_size=[2, 2], strides=2)
print("attn_pixel_max_pool=",attn_pixel)
##################################
attn = tf.matmul(attn_dct, attn_pixel) # shape=(64, 16, 16, 32)
print("attn_matmul=",attn)
##################################
# g path
channels=attn.get_shape().as_list()[-1]
g = sn_conv1x1(x, channels, update_collection, init, 'sn_conv_g') # shape=(64, 32, 32, 128)
print("g_sn_conv=",g)
g = tf.layers.max_pooling2d(inputs=g, pool_size=[2, 2], strides=2) # shape=(64, 16, 16, 128)
print("g_max_pool=",g)
# g = tf.reshape(
# g, [batch_size, downsampled_num, num_channels // 2])
# print("g_reshape=",g)
attn_g = tf.matmul(attn, g, transpose_b=True)
print("attn_g_matmul=",attn_g)
# attn_g = tf.reshape(attn_g, [batch_size, h//2, w//2, num_channels // 2])
# print("attn_g_reshape=",attn_g)
attn_g = sn_conv1x1(attn_g, num_channels, update_collection, init, 'sn_conv_attn')
print("attn_g_sn_conv1x1",attn_g)
attn_g = ops.deconv2d(attn_g, [batch_size, h, w, num_channels], # num_channels
k_h=2, k_w=2, d_h=2, d_w=2, stddev=0.02,
name='attn_g_deconv2d', init_bias=0.)
print("attn_g_deconv2d=",attn_g)
print("x=",x)
sigma = tf.get_variable(
'sigma_ratio', [], initializer=tf.constant_initializer(0.0))
print("sigma=",sigma)
return x + sigma * attn_g
| 40.27027
| 127
| 0.560906
| 1,651
| 11,920
| 3.745003
| 0.101757
| 0.035096
| 0.021834
| 0.026686
| 0.631732
| 0.552644
| 0.500081
| 0.467572
| 0.467572
| 0.456898
| 0
| 0.065696
| 0.256795
| 11,920
| 295
| 128
| 40.40678
| 0.632238
| 0.450168
| 0
| 0.232143
| 0
| 0
| 0.09576
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026786
| false
| 0
| 0.026786
| 0
| 0.080357
| 0.357143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47b3262789a56b500e24bd5503fa34b4ab5f6bca
| 1,031
|
py
|
Python
|
evaluation/eval_frames.py
|
suleymanaslan/generative-rainbow
|
9f8daac5e06565ef099c0913186f5a1d801ca52c
|
[
"MIT"
] | null | null | null |
evaluation/eval_frames.py
|
suleymanaslan/generative-rainbow
|
9f8daac5e06565ef099c0913186f5a1d801ca52c
|
[
"MIT"
] | null | null | null |
evaluation/eval_frames.py
|
suleymanaslan/generative-rainbow
|
9f8daac5e06565ef099c0913186f5a1d801ca52c
|
[
"MIT"
] | null | null | null |
import numpy as np
import imageio
from evaluation.eval_utils import to_img_padded, format_img, init_evaluation
def save_frames(env, agent, pad, folder):
observation, ep_reward, done = env.reset(), 0, False
count = 0
while not done:
action, generated_next_observation = agent.act(observation, get_generated=True)
next_observation, reward, done, info = env.step(action)
imgs = [to_img_padded(observation, pad),
to_img_padded(generated_next_observation, pad),
to_img_padded(next_observation, pad)]
plot_img = np.concatenate(format_img(imgs), axis=1)
count += 1
imageio.imwrite(f"{folder}/{count:04d}.png", plot_img)
observation = next_observation
def main():
pad = [(5, 5), (5, 5), (0, 0)]
train_env, test_env, agent, agent_folder = init_evaluation(use_backgrounds=False)
save_frames(train_env, agent, pad, f"{agent_folder}/frames/train")
save_frames(test_env, agent, pad, f"{agent_folder}/frames/test")
main()
| 34.366667
| 87
| 0.681862
| 142
| 1,031
| 4.704225
| 0.380282
| 0.112275
| 0.065868
| 0.056886
| 0.161677
| 0.086826
| 0.086826
| 0
| 0
| 0
| 0
| 0.014599
| 0.202716
| 1,031
| 29
| 88
| 35.551724
| 0.798054
| 0
| 0
| 0
| 0
| 0
| 0.074685
| 0.074685
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47b4d5911e43771cc3188d71f2b90fe1b6287fdc
| 22,950
|
py
|
Python
|
acme/HttpServer.py
|
ankraft/ACME-oneM2M-CSE
|
03c23ea19b35dd6e0aec752d9631e2a76778c61c
|
[
"BSD-3-Clause"
] | 10
|
2020-09-25T08:49:19.000Z
|
2022-03-30T01:29:22.000Z
|
acme/HttpServer.py
|
ankraft/ACME-oneM2M-CSE
|
03c23ea19b35dd6e0aec752d9631e2a76778c61c
|
[
"BSD-3-Clause"
] | 14
|
2020-05-22T08:00:32.000Z
|
2020-12-24T23:38:05.000Z
|
acme/HttpServer.py
|
ankraft/ACME-oneM2M-CSE
|
03c23ea19b35dd6e0aec752d9631e2a76778c61c
|
[
"BSD-3-Clause"
] | 5
|
2020-05-22T03:43:20.000Z
|
2021-05-25T06:54:59.000Z
|
#
# HttpServer.py
#
# (c) 2020 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Server to implement the http part of the oneM2M Mcx communication interface.
#
from __future__ import annotations
import logging, sys, traceback, urllib3
from copy import deepcopy
from typing import Any, Callable, Tuple, cast
import flask
from flask import Flask, Request, make_response, request
from urllib3.exceptions import RequestError
from Configuration import Configuration
from Constants import Constants as C
from Types import ReqResp, ResourceTypes as T, Result, ResponseCode as RC, JSON, Conditions
from Types import Operation, CSERequest, RequestHeaders, ContentSerializationType, RequestHandler, Parameters, RequestArguments, FilterUsage, FilterOperation, DesiredIdentifierResultType, ResultContentType, ResponseType
import CSE, Utils
from Logging import Logging as L, LogLevel
from resources.Resource import Resource
from werkzeug.wrappers import Response
from werkzeug.serving import WSGIRequestHandler
from werkzeug.datastructures import MultiDict
from webUI import WebUI
from helpers.BackgroundWorker import *
#
# Types definitions for the http server
#
FlaskHandler = Callable[[str], Response]
""" Type definition for flask handler. """
class HttpServer(object):
def __init__(self) -> None:
# Initialize the http server
# Meaning defaults are automatically provided.
self.flaskApp = Flask(CSE.cseCsi)
self.rootPath = Configuration.get('http.root')
self.serverAddress = Configuration.get('http.address')
self.listenIF = Configuration.get('http.listenIF')
self.port = Configuration.get('http.port')
self.webuiRoot = Configuration.get('cse.webui.root')
self.webuiDirectory = f'{CSE.rootDirectory}/webui'
self.hfvRVI = Configuration.get('cse.releaseVersion')
self.isStopped = False
self.backgroundActor:BackgroundWorker = None
self.serverID = f'ACME {C.version}' # The server's ID for http response headers
self._responseHeaders = {'Server' : self.serverID} # Additional headers for other requests
L.isInfo and L.log(f'Registering http server root at: {self.rootPath}')
if CSE.security.useTLS:
L.isInfo and L.log('TLS enabled. HTTP server serves via https.')
# Add endpoints
# self.addEndpoint(self.rootPath + '/', handler=self.handleGET, methods=['GET'])
self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handleGET, methods=['GET'])
# self.addEndpoint(self.rootPath + '/', handler=self.handlePOST, methods=['POST'])
self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handlePOST, methods=['POST'])
# self.addEndpoint(self.rootPath + '/', handler=self.handlePUT, methods=['PUT'])
self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handlePUT, methods=['PUT'])
# self.addEndpoint(self.rootPath + '/', handler=self.handleDELETE, methods=['DELETE'])
self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handleDELETE, methods=['DELETE'])
# Register the endpoint for the web UI
# This is done by instancing the otherwise "external" web UI
self.webui = WebUI(self.flaskApp,
defaultRI=CSE.cseRi,
defaultOriginator=CSE.cseOriginator,
root=self.webuiRoot,
webuiDirectory=self.webuiDirectory,
version=C.version)
# Enable the config endpoint
if Configuration.get('http.enableRemoteConfiguration'):
configEndpoint = f'{self.rootPath}/__config__'
L.isInfo and L.log(f'Registering configuration endpoint at: {configEndpoint}')
self.addEndpoint(configEndpoint, handler=self.handleConfig, methods=['GET'], strictSlashes=False)
self.addEndpoint(f'{configEndpoint}/<path:path>', handler=self.handleConfig, methods=['GET', 'PUT'])
# Enable the config endpoint
if Configuration.get('http.enableStructureEndpoint'):
structureEndpoint = f'{self.rootPath}/__structure__'
L.isInfo and L.log(f'Registering structure endpoint at: {structureEndpoint}')
self.addEndpoint(structureEndpoint, handler=self.handleStructure, methods=['GET'], strictSlashes=False)
self.addEndpoint(f'{structureEndpoint}/<path:path>', handler=self.handleStructure, methods=['GET', 'PUT'])
# Enable the reset endpoint
if Configuration.get('http.enableResetEndpoint'):
resetEndPoint = f'{self.rootPath}/__reset__'
L.isInfo and L.log(f'Registering reset endpoint at: {resetEndPoint}')
self.addEndpoint(resetEndPoint, handler=self.handleReset, methods=['GET'], strictSlashes=False)
# Add mapping / macro endpoints
self.mappings = {}
if (mappings := Configuration.get('server.http.mappings')) is not None:
# mappings is a list of tuples
for (k, v) in mappings:
L.isInfo and L.log(f'Registering mapping: {self.rootPath}{k} -> {self.rootPath}{v}')
self.addEndpoint(self.rootPath + k, handler=self.requestRedirect, methods=['GET', 'POST', 'PUT', 'DELETE'])
self.mappings = dict(mappings)
# Disable most logs from requests and urllib3 library
logging.getLogger("requests").setLevel(LogLevel.WARNING)
logging.getLogger("urllib3").setLevel(LogLevel.WARNING)
if not CSE.security.verifyCertificate: # only when we also verify certificates
urllib3.disable_warnings()
L.isInfo and L.log('HTTP Server initialized')
def run(self) -> None:
""" Run the http server in a separate thread.
"""
self.httpActor = BackgroundWorkerPool.newActor(self._run, name='HTTPServer')
self.httpActor.start()
def shutdown(self) -> bool:
""" Shutting down the http server.
"""
L.isInfo and L.log('HttpServer shut down')
self.isStopped = True
return True
def _run(self) -> None:
WSGIRequestHandler.protocol_version = "HTTP/1.1"
# Run the http server. This runs forever.
# The server can run single-threadedly since some of the underlying
# components (e.g. TinyDB) may run into problems otherwise.
if self.flaskApp is not None:
# Disable the flask banner messages
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None # type: ignore
# Start the server
try:
self.flaskApp.run(host=self.listenIF,
port=self.port,
threaded=Configuration.get('http.multiThread'),
request_handler=ACMERequestHandler,
ssl_context=CSE.security.getSSLContext(),
debug=False)
except Exception as e:
# No logging for headless, nevertheless print the reason what happened
if CSE.isHeadless:
L.console(str(e), isError=True)
L.logErr(str(e))
CSE.shutdown() # exit the CSE. Cleanup happens in the CSE atexit() handler
def addEndpoint(self, endpoint:str=None, endpoint_name:str=None, handler:FlaskHandler=None, methods:list[str]=None, strictSlashes:bool=True) -> None:
self.flaskApp.add_url_rule(endpoint, endpoint_name, handler, methods=methods, strict_slashes=strictSlashes)
def _handleRequest(self, path:str, operation:Operation) -> Response:
""" Get and check all the necessary information from the request and
build the internal strutures. Then, depending on the operation,
call the associated request handler.
"""
L.isDebug and L.logDebug(f'==> HTTP-{operation.name}: /{path}') # path = request.path w/o the root
L.isDebug and L.logDebug(f'Headers: \n{str(request.headers)}')
dissectResult = self._dissectHttpRequest(request, operation, path)
if self.isStopped:
responseResult = Result(rsc=RC.internalServerError, dbg='http server not running', status=False)
else:
try:
if dissectResult.status:
if operation in [ Operation.CREATE, Operation.UPDATE ]:
if dissectResult.request.ct == ContentSerializationType.CBOR:
L.isDebug and L.logDebug(f'Body: \n{Utils.toHex(cast(bytes, dissectResult.request.data))}\n=>\n{dissectResult.request.dict}')
else:
L.isDebug and L.logDebug(f'Body: \n{str(dissectResult.request.data)}')
responseResult = CSE.request.handleRequest(dissectResult.request)
else:
responseResult = dissectResult
except Exception as e:
responseResult = Utils.exceptionToResult(e)
responseResult.request = dissectResult.request
return self._prepareResponse(responseResult)
def handleGET(self, path:str=None) -> Response:
Utils.renameCurrentThread()
CSE.event.httpRetrieve() # type: ignore
return self._handleRequest(path, Operation.RETRIEVE)
def handlePOST(self, path:str=None) -> Response:
Utils.renameCurrentThread()
CSE.event.httpCreate() # type: ignore
return self._handleRequest(path, Operation.CREATE)
def handlePUT(self, path:str=None) -> Response:
Utils.renameCurrentThread()
CSE.event.httpUpdate() # type: ignore
return self._handleRequest(path, Operation.UPDATE)
def handleDELETE(self, path:str=None) -> Response:
Utils.renameCurrentThread()
CSE.event.httpDelete() # type: ignore
return self._handleRequest(path, Operation.DELETE)
#########################################################################
# Handle requests to mapped paths
def requestRedirect(self, path:str=None) -> Response:
path = request.path[len(self.rootPath):] if request.path.startswith(self.rootPath) else request.path
if path in self.mappings:
L.isDebug and L.logDebug(f'==> Redirecting to: /{path}')
CSE.event.httpRedirect() # type: ignore
return flask.redirect(self.mappings[path], code=307)
return Response('', status=404)
#########################################################################
#
# Various handlers
#
# Redirect request to / to webui
def redirectRoot(self) -> Response:
""" Redirect a request to the webroot to the web UI.
"""
return flask.redirect(self.webuiRoot, code=302)
def getVersion(self) -> Response:
""" Handle a GET request to return the CSE version.
"""
return Response(C.version, headers=self._responseHeaders)
def handleConfig(self, path:str=None) -> Response:
""" Handle a configuration request. This can either be e GET request to query a
configuration value, or a PUT request to set a new value to a configuration setting.
Note, that only a few of configuration settings are supported.
"""
def _r(r:str) -> Response: # just construct a response. Trying to reduce the clutter here
return Response(r, headers=self._responseHeaders)
if request.method == 'GET':
if path == None or len(path) == 0:
return _r(Configuration.print())
if Configuration.has(path):
return _r(str(Configuration.get(path)))
return _r('')
elif request.method =='PUT':
data = request.data.decode('utf-8').rstrip()
try:
L.isDebug and L.logDebug(f'New remote configuration: {path} = {data}')
if path == 'cse.checkExpirationsInterval':
if (d := int(data)) < 1:
return _r('nak')
Configuration.set(path, d)
CSE.registration.stopExpirationMonitor()
CSE.registration.startExpirationMonitor()
return _r('ack')
elif path in [ 'cse.req.minet', 'cse.req.maxnet' ]:
if (d := int(data)) < 1:
return _r('nak')
Configuration.set(path, d)
return _r('ack')
except:
return _r('nak')
return _r('nak')
return _r('unsupported')
def handleStructure(self, path:str='puml') -> Response:
""" Handle a structure request. Return a description of the CSE's current resource
and registrar / registree deployment.
An optional parameter 'lvl=<int>' can limit the generated resource tree's depth.
"""
lvl = request.args.get('lvl', default=0, type=int)
if path == 'puml':
return Response(response=CSE.statistics.getStructurePuml(lvl), headers=self._responseHeaders)
if path == 'text':
return Response(response=CSE.console.getResourceTreeText(lvl), headers=self._responseHeaders)
return Response(response='unsupported', status=422, headers=self._responseHeaders)
def handleReset(self, path:str=None) -> Response:
""" Handle a CSE reset request.
"""
CSE.resetCSE()
return Response(response='', status=200)
#########################################################################
#
# Send HTTP requests
#
def _prepContent(self, content:bytes|str|Any, ct:ContentSerializationType) -> str:
if content is None: return ''
if isinstance(content, str): return content
return content.decode('utf-8') if ct == ContentSerializationType.JSON else Utils.toHex(content)
def sendHttpRequest(self, method:Callable, url:str, originator:str, ty:T=None, data:Any=None, parameters:Parameters=None, ct:ContentSerializationType=None, targetResource:Resource=None) -> Result: # type: ignore[type-arg]
ct = CSE.defaultSerialization if ct is None else ct
# Set basic headers
hty = f';ty={int(ty):d}' if ty is not None else ''
hds = { 'User-Agent' : self.serverID,
'Content-Type' : f'{ct.toHeader()}{hty}',
'Accept' : ct.toHeader(),
C.hfOrigin : originator,
C.hfRI : Utils.uniqueRI(),
C.hfRVI : self.hfvRVI, # TODO this actually depends in the originator
}
# Add additional headers
if parameters is not None:
if C.hfcEC in parameters: # Event Category
hds[C.hfEC] = parameters[C.hfcEC]
# serialize data (only if dictionary, pass on non-dict data)
content = Utils.serializeData(data, ct) if isinstance(data, dict) else data
# ! Don't forget: requests are done through the request library, not flask.
# ! The attribute names are different
try:
L.isDebug and L.logDebug(f'Sending request: {method.__name__.upper()} {url}')
if ct == ContentSerializationType.CBOR:
L.isDebug and L.logDebug(f'HTTP-Request ==>:\nHeaders: {hds}\nBody: \n{self._prepContent(content, ct)}\n=>\n{str(data) if data is not None else ""}\n')
else:
L.isDebug and L.logDebug(f'HTTP-Request ==>:\nHeaders: {hds}\nBody: \n{self._prepContent(content, ct)}\n')
# Actual sending the request
r = method(url, data=content, headers=hds, verify=CSE.security.verifyCertificate)
responseCt = ContentSerializationType.getType(r.headers['Content-Type']) if 'Content-Type' in r.headers else ct
rc = RC(int(r.headers['X-M2M-RSC'])) if 'X-M2M-RSC' in r.headers else RC.internalServerError
L.isDebug and L.logDebug(f'HTTP-Response <== ({str(r.status_code)}):\nHeaders: {str(r.headers)}\nBody: \n{self._prepContent(r.content, responseCt)}\n')
except Exception as e:
L.isDebug and L.logWarn(f'Failed to send request: {str(e)}')
return Result(rsc=RC.targetNotReachable, dbg='target not reachable')
return Result(dict=Utils.deserializeData(r.content, responseCt), rsc=rc)
#########################################################################
def _prepareResponse(self, result:Result) -> Response:
content:str|bytes|JSON = ''
# Build the headers
headers = {}
headers['Server'] = self.serverID # set server field
headers['X-M2M-RSC'] = f'{result.rsc}' # set the response status code
if result.request.headers.requestIdentifier is not None:
headers['X-M2M-RI'] = result.request.headers.requestIdentifier
if result.request.headers.releaseVersionIndicator is not None:
headers['X-M2M-RVI'] = result.request.headers.releaseVersionIndicator
if result.request.headers.vendorInformation is not None:
headers['X-M2M-VSI'] = result.request.headers.vendorInformation
# HTTP status code
statusCode = result.rsc.httpStatusCode()
#
# Determine the accept type and encode the content accordinly
#
# Look whether there is a accept header in the original request
if result.request.headers.accept is not None and len(result.request.headers.accept) > 0:
ct = ContentSerializationType.getType(result.request.headers.accept[0])
# No accept, check originator
elif len(csz := CSE.request.getSerializationFromOriginator(result.request.headers.originator)) > 0:
ct = csz[0]
# Default: configured CSE's default
else:
ct = CSE.defaultSerialization
# Assign and encode content accordingly
headers['Content-Type'] = (cts := ct.toHeader())
content = result.toData(ct)
# Build and return the response
if isinstance(content, bytes):
L.isDebug and L.logDebug(f'<== HTTP-Response (RSC: {result.rsc:d}):\nHeaders: {str(headers)}\nBody: \n{Utils.toHex(content)}\n=>\n{str(result.toData())}')
else:
L.isDebug and L.logDebug(f'<== HTTP-Response (RSC: {result.rsc:d}):\nHeaders: {str(headers)}\nBody: {str(content)}\n')
return Response(response=content, status=statusCode, content_type=cts, headers=headers)
# def _prepareException(self, e:Exception) -> Result:
# tb = traceback.format_exc()
# L.logErr(tb, exc=e)
# tbs = tb.replace('"', '\\"').replace('\n', '\\n')
# return Result(rsc=RC.internalServerError, dbg=f'encountered exception: {tbs}')
#########################################################################
#
# HTTP request helper functions
#
#def _dissectHttpRequest(self, request:Request, operation:Operation, _id:Tuple[str, str, str]) -> Result:
def _dissectHttpRequest(self, request:Request, operation:Operation, to:str) -> Result:
""" Dissect an HTTP request. Combine headers and contents into a single structure. Result is returned in Result.request.
"""
# def extractMultipleArgs(args:MultiDict, argName:str, validate:bool=True) -> Tuple[bool, str]:
# """ Get multi-arguments. Remove the found arguments from the original list, but add the new list again with the argument name.
# """
# lst = []
# for e in args.getlist(argName):
# for es in (t := e.split()): # check for number
# if validate:
# if not CSE.validator.validateRequestArgument(argName, es).status:
# return False, f'error validating "{argName}" argument(s)'
# lst.extend(t)
# args.poplist(argName) # type: ignore [no-untyped-call] # perhaps even multiple times
# if len(lst) > 0:
# args[argName] = lst
# return True, None
def extractMultipleArgs(args:MultiDict, argName:str) -> None:
""" Get multi-arguments. Remove the found arguments from the original list, but add the new list again with the argument name.
"""
lst = [ t for sublist in args.getlist(argName) for t in sublist.split() ]
args.poplist(argName) # type: ignore [no-untyped-call] # perhaps even multiple times
if len(lst) > 0:
args[argName] = lst
def requestHeaderField(request:Request, field:str) -> str:
""" Return the value of a specific Request header, or `None` if not found.
"""
if not request.headers.has_key(field):
return None
return request.headers.get(field)
cseRequest = CSERequest()
req:ReqResp = {}
cseRequest.data = request.data # get the data first. This marks the request as consumed, just in case that we have to return early
cseRequest.op = operation
req['op'] = operation.value # Needed later for validation
req['to'] = to
# Copy and parse the original request headers
if (f := requestHeaderField(request, C.hfOrigin)) is not None:
req['fr'] = f
if (f := requestHeaderField(request, C.hfRI)) is not None:
req['rqi'] = f
if (f := requestHeaderField(request, C.hfRET)) is not None:
req['rqet'] = f
if (f := requestHeaderField(request, C.hfRST)) is not None:
req['rset'] = f
if (f := requestHeaderField(request, C.hfOET)) is not None:
req['oet'] = f
if (f := requestHeaderField(request, C.hfRVI)) is not None:
req['rvi'] = f
if (rtu := requestHeaderField(request, C.hfRTU)) is not None: # handle rtu as a list
req['rtu'] = rtu.split('&')
if (f := requestHeaderField(request, C.hfVSI)) is not None:
req['vsi'] = f
# parse and extract content-type header
# cseRequest.headers.contentType = request.content_type
if (ct := request.content_type) is not None:
if not ct.startswith(tuple(C.supportedContentHeaderFormat)):
ct = None
else:
p = ct.partition(';') # always returns a 3-tuple
ct = p[0] # only the content-type without the resource type
t = p[2].partition('=')[2]
if len(t) > 0:
req['ty'] = t # Here we found the type for CREATE requests
cseRequest.headers.contentType = ct
# parse accept header
cseRequest.headers.accept = request.headers.getlist('accept')
cseRequest.headers.accept = [ a for a in cseRequest.headers.accept if a != '*/*' ]
cseRequest.originalArgs = deepcopy(request.args) # Keep the original args
# copy request arguments for greedy attributes checking
args = request.args.copy() # type: ignore [no-untyped-call]
# Do some special handling for those arguments that could occur multiple
# times in the args MultiDict. They are collected together in a single list
# and added again to args.
extractMultipleArgs(args, 'ty') # conversation to int happens later in fillAndValidateCSERequest()
extractMultipleArgs(args, 'cty')
extractMultipleArgs(args, 'lbl')
# Handle rcn differently.
# rcn is not a filter criteria like all the other attributes, but an own request attribute
if (rcn := args.get('rcn')) is not None:
req['rcn'] = rcn
del args['rcn']
# Extract further request arguments from the http request
# add all the args to the filterCriteria
filterCriteria:ReqResp = {}
for k,v in args.items():
filterCriteria[k] = v
req['fc'] = filterCriteria
# De-Serialize the content
if not (contentResult := CSE.request.deserializeContent(cseRequest.data, cseRequest.headers.contentType)).status:
return Result(rsc=contentResult.rsc, request=cseRequest, dbg=contentResult.dbg, status=False)
# Remove 'None' fields *before* adding the pc, because the pc may contain 'None' fields that need to be preserved
req = Utils.removeNoneValuesFromDict(req)
# Add the primitive content and
req['pc'] = contentResult.data[0] # The actual content
cseRequest.ct = contentResult.data[1] # The conten serialization type
cseRequest.req = req # finally store the oneM2M request object in the cseRequest
# do validation and copying of attributes of the whole request
try:
# L.logWarn(str(cseRequest))
if not (res := CSE.request.fillAndValidateCSERequest(cseRequest)).status:
return res
except Exception as e:
return Result(rsc=RC.badRequest, request=cseRequest, dbg=f'invalid arguments/attributes ({str(e)})', status=False)
# Here, if everything went okay so far, we have a request to the CSE
return Result(request=cseRequest, status=True)
##########################################################################
#
# Own request handler.
# Actually only to redirect some logging of the http server.
# This handler does NOT handle requests.
#
class ACMERequestHandler(WSGIRequestHandler):
# Just like WSGIRequestHandler, but without "- -"
def log(self, type, message, *args): # type: ignore
L.isDebug and L.logDebug(message % args)
return
# L.isDebug and L.log(f'{self.address_string()} {message % args}\n')
# Just like WSGIRequestHandler, but without "code"
def log_request(self, code='-', size='-'): # type: ignore
L.isDebug and L.logDebug(f'"{self.requestline}" {size} {code}')
return
def log_message(self, format, *args): # type: ignore
L.isDebug and L.logDebug(format % args)
return
| 39.43299
| 223
| 0.699695
| 2,931
| 22,950
| 5.451382
| 0.211873
| 0.006259
| 0.010702
| 0.012768
| 0.214107
| 0.1728
| 0.148955
| 0.106209
| 0.087433
| 0.04694
| 0
| 0.002804
| 0.160959
| 22,950
| 581
| 224
| 39.500861
| 0.826963
| 0.276122
| 0
| 0.103125
| 0
| 0.01875
| 0.139558
| 0.047435
| 0
| 0
| 0
| 0.001721
| 0
| 1
| 0.08125
| false
| 0
| 0.059375
| 0.003125
| 0.265625
| 0.003125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47b6367947784f5f8c60ac4a630ae41b0271c546
| 2,855
|
py
|
Python
|
tests.py
|
dave-shawley/sprockets.mixins.statsd
|
98dcce37d275a3ab96ef618b4756d7c4618a550a
|
[
"BSD-3-Clause"
] | 1
|
2016-04-18T14:43:28.000Z
|
2016-04-18T14:43:28.000Z
|
tests.py
|
dave-shawley/sprockets.mixins.statsd
|
98dcce37d275a3ab96ef618b4756d7c4618a550a
|
[
"BSD-3-Clause"
] | 1
|
2015-03-19T20:09:31.000Z
|
2015-03-19T20:56:13.000Z
|
tests.py
|
dave-shawley/sprockets.mixins.statsd
|
98dcce37d275a3ab96ef618b4756d7c4618a550a
|
[
"BSD-3-Clause"
] | 1
|
2021-07-21T16:45:20.000Z
|
2021-07-21T16:45:20.000Z
|
"""
Tests for the sprockets.mixins.statsd package
"""
import mock
import socket
try:
import unittest2 as unittest
except ImportError:
import unittest
from tornado import httputil
from tornado import web
from sprockets.mixins import statsd as statsd
class StatsdRequestHandler(statsd.RequestMetricsMixin,
web.RequestHandler):
pass
class Context(object):
remote_ip = '127.0.0.1'
protocol = 'http'
class Connection(object):
context = Context()
def set_close_callback(self, callback):
pass
class MixinTests(unittest.TestCase):
def setUp(self):
self.application = web.Application()
self.request = httputil.HTTPServerRequest('GET',
uri='http://test/foo',
connection=Connection(),
host='127.0.0.1')
self.handler = StatsdRequestHandler(self.application, self.request)
self.handler._status_code = 200
def test_on_finish_calls_statsd_add_timing(self):
self.handler.statsd_use_hostname = True
self.request._finish_time = self.request._start_time + 1
self.duration = self.request._finish_time - self.request._start_time
with mock.patch('sprockets.clients.statsd.add_timing') as add_timing:
self.handler.on_finish()
add_timing.assert_called_once_with(
'timers.' + socket.gethostname(), 'tests',
'StatsdRequestHandler', 'GET', '200',
value=self.duration * 1000)
def test_on_finish_calls_statsd_incr(self):
self.handler.statsd_use_hostname = True
with mock.patch('sprockets.clients.statsd.incr') as incr:
self.handler.on_finish()
incr.assert_called_once_with(
'counters.' + socket.gethostname(), 'tests',
'StatsdRequestHandler', 'GET', '200')
def test_on_finish_calls_statsd_add_timing_without_hostname(self):
self.handler.statsd_use_hostname = False
self.request._finish_time = self.request._start_time + 1
self.duration = self.request._finish_time - self.request._start_time
with mock.patch('sprockets.clients.statsd.add_timing') as add_timing:
self.handler.on_finish()
add_timing.assert_called_once_with(
'timers', 'tests', 'StatsdRequestHandler', 'GET', '200',
value=self.duration * 1000)
def test_on_finish_calls_statsd_incr_without_hostname(self):
self.handler.statsd_use_hostname = False
with mock.patch('sprockets.clients.statsd.incr') as incr:
self.handler.on_finish()
incr.assert_called_once_with(
'counters', 'tests', 'StatsdRequestHandler', 'GET', '200')
| 34.817073
| 77
| 0.633275
| 314
| 2,855
| 5.512739
| 0.254777
| 0.063547
| 0.020797
| 0.034662
| 0.622761
| 0.622761
| 0.586944
| 0.54535
| 0.54535
| 0.441363
| 0
| 0.018243
| 0.270403
| 2,855
| 81
| 78
| 35.246914
| 0.81277
| 0.015762
| 0
| 0.4
| 0
| 0
| 0.114959
| 0.045698
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.1
| false
| 0.033333
| 0.133333
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47bbc3f593c6dfe99cc6291d9534d485f7b0f42d
| 3,462
|
py
|
Python
|
nussl/transformers/transformer_deep_clustering.py
|
KingStorm/nussl
|
78edfdaad16845fc705cefb336a7e6e5923fbcd4
|
[
"MIT"
] | 1
|
2018-10-22T19:30:45.000Z
|
2018-10-22T19:30:45.000Z
|
dataHelper/nussl/transformers/transformer_deep_clustering.py
|
AleXander-Tsui/Audio-Localization-and-Seperation
|
17d40e72b406d62ca5cb695938b50c6412f9524a
|
[
"MIT"
] | null | null | null |
dataHelper/nussl/transformers/transformer_deep_clustering.py
|
AleXander-Tsui/Audio-Localization-and-Seperation
|
17d40e72b406d62ca5cb695938b50c6412f9524a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Deep Clustering modeller class
"""
from .. import torch_imported
if torch_imported:
import torch
import torch.nn as nn
import numpy as np
class TransformerDeepClustering(nn.Module):
"""
Transformer Class for deep clustering
"""
def __init__(self, hidden_size=300, input_size=150, num_layers=2, embedding_size=20):
if not torch_imported:
raise ImportError('Cannot import pytorch! Install pytorch to continue.')
super(TransformerDeepClustering, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.embedding_size = embedding_size
self.num_layers = num_layers
rnn = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, bidirectional=True,
batch_first=True, dropout=0.5)
linear = nn.Linear(self.hidden_size*2, self.input_size*self.embedding_size)
self.add_module('rnn', rnn)
self.add_module('linear', linear)
def forward(self, input_data):
"""
Forward training
Args:
input_data:
Returns:
"""
sequence_length = input_data.size(1)
num_frequencies = input_data.size(2)
output, hidden = self.rnn(input_data)
output = output.contiguous()
output = output.view(-1, sequence_length, 2*self.hidden_size)
embedding = self.linear(output)
embedding = embedding.view(-1, sequence_length*num_frequencies, self.embedding_size)
embedding = nn.functional.normalize(embedding, p=2, dim=-1)
return embedding
@staticmethod
def affinity_cost(embedding, assignments):
"""
Function defining the affinity cost for deep clustering
Args:
embedding:
assignments:
Returns:
"""
embedding = embedding.view(-1, embedding.size()[-1])
assignments = assignments.view(-1, assignments.size()[-1])
silence_mask = torch.sum(assignments, dim=-1, keepdim=True)
embedding = silence_mask * embedding
embedding_transpose = embedding.transpose(1, 0)
assignments_transpose = assignments.transpose(1, 0)
class_weights = nn.functional.normalize(torch.sum(assignments, dim=-2),
p=1, dim=-1).unsqueeze(0)
class_weights = 1.0 / (torch.sqrt(class_weights) + 1e-7)
weights = torch.mm(assignments, class_weights.transpose(1, 0))
assignments = assignments * weights.repeat(1, assignments.size()[-1])
embedding = embedding * weights.repeat(1, embedding.size()[-1])
loss_est = torch.norm(torch.mm(embedding_transpose, embedding), p=2)
loss_est_true = 2*torch.norm(torch.mm(embedding_transpose, assignments), p=2)
loss_true = torch.norm(torch.mm(assignments_transpose, assignments), p=2)
loss = loss_est - loss_est_true + loss_true
loss = loss / (loss_est + loss_true)
return loss
@staticmethod
def show_model(model):
"""
Prints a message to the console with model info
Args:
model:
Returns:
"""
print(model)
num_parameters = 0
for p in model.parameters():
if p.requires_grad:
num_parameters += np.cumprod(p.size())[-1]
print('Number of parameters: {}'.format(num_parameters))
| 33.941176
| 94
| 0.625361
| 405
| 3,462
| 5.17037
| 0.279012
| 0.043458
| 0.033429
| 0.022923
| 0.077841
| 0.032474
| 0
| 0
| 0
| 0
| 0
| 0.019329
| 0.267764
| 3,462
| 101
| 95
| 34.277228
| 0.806706
| 0.09792
| 0
| 0.035088
| 0
| 0
| 0.028504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.122807
| 0
| 0.245614
| 0.035088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47be1f989acf928be71983840ea1023cdafbcb67
| 1,569
|
py
|
Python
|
Gallery/views.py
|
munganyendesandrine/GalleryApp
|
cb17eca8b814f212c1b78925d957b40380830f9b
|
[
"Unlicense",
"MIT"
] | null | null | null |
Gallery/views.py
|
munganyendesandrine/GalleryApp
|
cb17eca8b814f212c1b78925d957b40380830f9b
|
[
"Unlicense",
"MIT"
] | null | null | null |
Gallery/views.py
|
munganyendesandrine/GalleryApp
|
cb17eca8b814f212c1b78925d957b40380830f9b
|
[
"Unlicense",
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from .models import Image,Category,Location
def gallery_today(request):
gallery = Image.objects.all()
return render(request, 'all-galleries/today-gallery.html', {"gallery": gallery})
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
searched_images = Image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'all-galleries/search.html',{"message":message,"images": searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'all-galleries/search.html',{"message":message})
def filter_results(request):
if 'location' in request.GET and request.GET["location"]:
filter_term = request.GET.get("location")
filtered_images = Image.filter_by_location(filter_term)
message = f"{filter_term}"
return render(request, 'all-galleries/filter.html',{"message":message,"images": filtered_images})
else:
message = "You haven't filtered for any term"
return render(request, 'all-galleries/filter.html',{"message":message})
# def delete_image(request, pk):
# gallery = get_object_or_404(Cat, pk=pk)
# if request.method == 'POST':
# gallery.delete()
# return redirect('/')
# return render(request, 'all-galleries/today-gallery.html', {"gallery": gallery})
| 34.108696
| 105
| 0.662843
| 187
| 1,569
| 5.449198
| 0.262032
| 0.070658
| 0.111874
| 0.129539
| 0.463199
| 0.463199
| 0.363101
| 0.363101
| 0.351325
| 0.351325
| 0
| 0.002415
| 0.208413
| 1,569
| 46
| 106
| 34.108696
| 0.818035
| 0.185468
| 0
| 0.083333
| 0
| 0
| 0.250786
| 0.103774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.458333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47c5b5de088c43f83c5e3e066561ed05afd513fb
| 1,666
|
py
|
Python
|
select_language.py
|
zhangenter/tetris
|
300c668d9732cd037bfc6f47c289bd5ee4a009b2
|
[
"Apache-2.0"
] | 3
|
2019-05-08T14:49:10.000Z
|
2021-01-20T13:22:45.000Z
|
select_language.py
|
zhangenter/tetris
|
300c668d9732cd037bfc6f47c289bd5ee4a009b2
|
[
"Apache-2.0"
] | null | null | null |
select_language.py
|
zhangenter/tetris
|
300c668d9732cd037bfc6f47c289bd5ee4a009b2
|
[
"Apache-2.0"
] | 2
|
2020-01-28T14:37:06.000Z
|
2020-04-03T13:37:14.000Z
|
# -*- coding=utf-8 -*-
import pygame
from bf_form import BFForm
from bf_button import BFButton
from globals import LanguageConfigParser, LanguageLib
class SelectLanguageForm(BFForm):
def __init__(self, screen, after_close):
super(SelectLanguageForm, self).__init__(screen, after_close)
def select_language(self, btn):
lang_conf_parser = LanguageConfigParser()
lang_conf_parser.set_cut_language(btn.tag)
lang_conf_parser.save()
LanguageLib.instance().reload_language()
self.result = 1
if self.after_close: self.after_close(self)
def prepare(self):
lang_conf_parser = LanguageConfigParser()
supports = lang_conf_parser.get_support_names()
num = len(supports)
parent_width, parent_height = self.screen.get_size()
self.desc = LanguageLib.instance().get_text('please select language')
self.width = 400
btn_width = self.width * 0.6
btn_height = 40
btn_top = 20
btn_space = 20
self.height = btn_top + 30 + num * btn_height + (num - 1) * btn_space + 30 + self.footer_height
btn_left = (self.width - btn_width) / 2 + (parent_width-self.width) / 2
btn_y = btn_top + 30 + (parent_height - self.height)/2
for k in supports:
label = lang_conf_parser.get_support_label(k)
btn = BFButton(self.screen, (btn_left, btn_y, btn_width, btn_height), text=label.decode('utf-8'), click=self.select_language)
btn.tag = k
self.btn_group.add_button(btn)
btn_y += btn_height + btn_space
self.add_cancel_btn(parent_width, parent_height)
| 35.446809
| 137
| 0.660264
| 218
| 1,666
| 4.743119
| 0.334862
| 0.046422
| 0.081238
| 0.065764
| 0.046422
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019017
| 0.242497
| 1,666
| 46
| 138
| 36.217391
| 0.800317
| 0.012005
| 0
| 0.057143
| 0
| 0
| 0.016453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.114286
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47c64ebe5bf8c8e7b695f55fd8ecece7fcce4585
| 3,084
|
py
|
Python
|
SpellingCorrection/SpellingCorrection.py
|
kxu776/Natural-Langauge-Processing
|
61c863e6cccf6d745b7bfc630a803dcec89214a1
|
[
"MIT"
] | null | null | null |
SpellingCorrection/SpellingCorrection.py
|
kxu776/Natural-Langauge-Processing
|
61c863e6cccf6d745b7bfc630a803dcec89214a1
|
[
"MIT"
] | null | null | null |
SpellingCorrection/SpellingCorrection.py
|
kxu776/Natural-Langauge-Processing
|
61c863e6cccf6d745b7bfc630a803dcec89214a1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 14:09:44 2018
@author: VeNoMzZxHD
"""
import tkinter
from tkinter.filedialog import askopenfilename
from collections import Counter
import re
import string
#Returns string of text file
def readFile():
'''
tkinter.Tk().withdraw()
inputfilename = askopenfilename()
'''
inputfilename = 'big.txt'
with open(inputfilename) as inputfile:
return inputfile.read()
#Returns Counter dictionary containing words and their number of occurences within the input file
def countWords(text):
words = re.findall(r'\w+', text.lower())
return Counter(words)
def P(word):
total = sum(countDict.values())
probability = (countDict[word]/total)
#print("The probability of '" + word + "' occuring: " + str(probability))
return probability
#Returns list of possible permutations of removing a single char from input word.
def removeLetter(word):
permList = []
for i in range(len(word)):
permList.append(word[:i]+word[i+1:])
return permList
def insertLetter(word):
permList = []
for i in range(len(word)+1):
for letter in string.ascii_lowercase:
permList.append(word[:i] + letter + word[i:])
return permList
def replaceLetter(word):
permList = []
for i in range(len(word)):
for letter in string.ascii_lowercase:
permList.append(word[:i] + letter + word[i+1:])
return permList
def swapLetters(word):
permList = []
for i in range(len(word)):
for x in range(i,len(word)):
modWord = bytearray(word, 'utf8')
tempChar = modWord[i]
modWord[i] = modWord[x]
modWord[x] = tempChar
permList.append(modWord.decode('utf8'))
return list(set(permList))
def oneCharEdits(word):
permutations = []
permutations.extend(removeLetter(word))
permutations.extend(insertLetter(word))
permutations.extend(replaceLetter(word))
permutations.extend(swapLetters(word))
return list(set(permutations))
def twoCharEdits(word):
validWords = []
oneCharPerms = oneCharEdits(word)
twoCharPerms = oneCharPerms.copy()
for permWord in oneCharPerms:
twoCharPerms.extend(oneCharEdits(permWord))
twoCharPerms = list(set(twoCharPerms))
for permWord in twoCharPerms:
if isAWord(permWord):
validWords.append(permWord)
return validWords
def isAWord(word):
return True if word in countDict else False
def findCorrection(word):
if isAWord(word):
return word
candidates = twoCharEdits(word)
candidates = {key: P(key) for key in candidates}
try:
return max(candidates, key=candidates.get)
except ValueError:
return ""
countDict = countWords(readFile())
while True:
inword = input("Type quit to exit, or input word: \n")
if inword.lower() == 'quit':
break
correction = findCorrection(inword)
if correction=="":
print("No correction found")
else:
print(correction)
| 28.293578
| 97
| 0.648508
| 354
| 3,084
| 5.644068
| 0.358757
| 0.015015
| 0.03003
| 0.032032
| 0.143143
| 0.143143
| 0.122623
| 0.122623
| 0.092593
| 0.061061
| 0
| 0.007259
| 0.240597
| 3,084
| 109
| 98
| 28.293578
| 0.845858
| 0.133593
| 0
| 0.146341
| 0
| 0
| 0.029145
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134146
| false
| 0
| 0.060976
| 0.012195
| 0.353659
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47c7ce2b3e6297aeb01c2a6dd339609f2dbc4c40
| 9,826
|
py
|
Python
|
src/derivation/FactNode.py
|
KDahlgren/orik
|
4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f
|
[
"MIT"
] | 2
|
2018-01-23T22:08:32.000Z
|
2018-03-11T18:32:53.000Z
|
src/derivation/FactNode.py
|
KDahlgren/orik
|
4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f
|
[
"MIT"
] | 4
|
2017-10-24T19:13:40.000Z
|
2018-06-05T22:16:45.000Z
|
src/derivation/FactNode.py
|
KDahlgren/orik
|
4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f
|
[
"MIT"
] | 2
|
2017-10-24T18:55:45.000Z
|
2018-01-26T05:11:38.000Z
|
#!/usr/bin/env python
# **************************************** #
#############
# IMPORTS #
#############
# standard python packages
import ConfigParser, copy, inspect, logging, os, sys
from Node import Node
if not os.path.abspath( __file__ + "/../../../lib/iapyx/src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../../lib/iapyx/src" ) )
from utils import tools
# **************************************** #
class FactNode( Node ) :
#####################
# SPECIAL ATTRIBS #
#####################
treeType = "fact"
#################
# CONSTRUCTOR #
#################
def __init__( self, name="DEFAULT", isNeg=None, record=[], parsedResults={}, cursor=None, argDict = {} ) :
logging.debug( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" )
logging.debug( "in FactNode.FactNode : " + name )
logging.debug( " name = " + name )
logging.debug( " isNeg = " + str( isNeg ) )
logging.debug( " record = " + str( record ) )
logging.debug( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" )
self.argDict = {}
self.argDict = argDict
self.name = name
self.isNeg = isNeg
self.record = record
self.parsedResults = parsedResults
self.cursor = cursor
self.interesting = False
# -------------------------------- #
# grab settings configs
self.num_filtering_configs = 0
# +++++++++++++++ #
# TREE SIMPLIFY #
# +++++++++++++++ #
try :
self.TREE_SIMPLIFY = tools.getConfig( self.argDict[ "settings" ], \
"DEFAULT", \
"TREE_SIMPLIFY", \
bool )
except ConfigParser.NoOptionError :
self.TREE_SIMPLIFY = False
logging.warning( "WARNING : no 'TREE_SIMPLIFY' defined in 'DEFAULT' section of " + \
self.argDict[ "settings" ] + "...running with TREE_SIMPLIFY==False." )
logging.debug( " FACT NODE : using TREE_SIMPLIFY = " + str( self.TREE_SIMPLIFY ) )
# +++++++++++++ #
# CLOCKS_ONLY #
# +++++++++++++ #
try :
self.CLOCKS_ONLY = tools.getConfig( self.argDict[ "settings" ], \
"DEFAULT", \
"CLOCKS_ONLY", \
bool )
if self.CLOCKS_ONLY :
self.num_filtering_configs += 1
except ConfigParser.NoOptionError :
self.CLOCKS_ONLY = False
logging.warning( "WARNING : no 'CLOCKS_ONLY' defined in 'DEFAULT' section of " + \
self.argDict[ "settings" ] + "...running with CLOCKS_ONLY==False." )
# ++++++++++++++++ #
# POS_FACTS_ONLY #
# ++++++++++++++++ #
try :
self.POS_FACTS_ONLY = tools.getConfig( self.argDict[ "settings" ], \
"DEFAULT", \
"POS_FACTS_ONLY", \
bool )
if self.POS_FACTS_ONLY :
self.num_filtering_configs += 1
except ConfigParser.NoOptionError :
self.POS_FACTS_ONLY = False
logging.warning( "WARNING : no 'POS_FACTS_ONLY' defined in 'DEFAULT' section of " + \
self.argDict[ "settings" ] + "...running with POS_FACTS_ONLY==False." )
# ++++++++++++++++++++ #
# EXCLUDE_SELF_COMMS #
# ++++++++++++++++++++ #
try :
self.EXCLUDE_SELF_COMMS = tools.getConfig( self.argDict[ "settings" ], \
"DEFAULT", \
"EXCLUDE_SELF_COMMS", \
bool )
if self.EXCLUDE_SELF_COMMS :
self.num_filtering_configs += 1
except ConfigParser.NoOptionError :
self.EXCLUDE_SELF_COMMS = False
logging.warning( "WARNING : no 'EXCLUDE_SELF_COMMS' defined in 'DEFAULT' section of " + \
self.argDict[ "settings" ] + "...running with EXCLUDE_SELF_COMMS==False." )
# ++++++++++++++++++++++ #
# EXCLUDE_NODE_CRASHES #
# ++++++++++++++++++++++ #
try :
self.EXCLUDE_NODE_CRASHES = tools.getConfig( self.argDict[ "settings" ], \
"DEFAULT", \
"EXCLUDE_NODE_CRASHES", \
bool )
if self.EXCLUDE_NODE_CRASHES :
self.num_filtering_configs += 1
except ConfigParser.NoOptionError :
self.EXCLUDE_NODE_CRASHES = False
logging.warning( "WARNING : no 'EXCLUDE_NODE_CRASHES' defined in 'DEFAULT' section of " + \
self.argDict[ "settings" ] + "...running with EXCLUDE_NODE_CRASHES==False." )
# -------------------------------- #
# make sure this is actually a
# fact.
if not self.is_fact() :
tools.bp( __name__, inspect.stack()[0][3], " FATAL ERROR : relation '" + self.name + "' does not reference a fact. aborting." )
# -------------------------------- #
# determine whether this fact is interesting
self.am_i_interesting()
# -------------------------------- #
# initialize node object
Node.__init__( self, self.treeType, \
self.name, \
self.isNeg, \
self.record, \
self.parsedResults, \
self.cursor )
#############
# __STR__ #
#############
# the string representation of a FactNode
def __str__( self ) :
if self.isNeg :
negStr = "_NOT_"
return "fact->" + negStr + self.name + "(" + str(self.record) + ")"
else :
return "fact->" + self.name + "(" + str(self.record) + ")"
######################
# AM I INTERESTING #
######################
# check if this fact is interesting
# using heuristics
def am_i_interesting( self ) :
flag = 0
if self.CLOCKS_ONLY and self.name.startswith( "clock" ) :
flag += 1
if self.POS_FACTS_ONLY and not self.isNeg :
flag += 1
if self.EXCLUDE_SELF_COMMS and not self.is_self_comm() :
flag += 1
if self.EXCLUDE_NODE_CRASHES and not self.is_node_crash() :
flag += 1
logging.debug( " AM I INTERESTING : flag = " + str( flag ) )
logging.debug( " AM I INTERESTING : self.num_filtering_configs = " + str( self.num_filtering_configs ) )
logging.debug( " AM I INTERESTING : flag == self.num_filtering_configs = " + str( flag == self.num_filtering_configs ) )
if flag >= self.num_filtering_configs :
self.interesting = True
logging.debug( " AM I INTERESTING : self.name = " + self.name )
logging.debug( " AM I INTERESTING : conclusion : " + str( self.interesting ) )
##################
# IS SELF COMM #
##################
def is_self_comm( self ) :
if not self.name == "clock" :
return False
else :
if self.record[ 0 ] == self.record[ 1 ] :
return True
else :
return False
###################
# IS NODE CRASH #
###################
def is_node_crash( self ) :
if not self.name == "clock" :
return False
else :
if self.record[ 1 ] == "_" :
return True
else :
return False
#############
# IS FACT #
#############
# make sure this is actually a fact in the database.
def is_fact( self ) :
if self.name == "clock" or self.name == "next_clock" or self.name == "crash" :
return True
self.cursor.execute( "SELECT fid \
FROM Fact \
WHERE name=='" + self.name + "'" )
fid_list = self.cursor.fetchall()
fid_list = tools.toAscii_list( fid_list )
logging.debug( " IS FACT : fid_list = " + str( fid_list ) )
# if this is a negative fact, just make sure the relation exists
if self.isNeg :
if len( fid_list ) > 0 :
return True
else :
return False
else :
for fid in fid_list :
self.cursor.execute( "SELECT dataID,data,dataType \
FROM FactData \
WHERE fid=='" + fid + "'" )
data_list = self.cursor.fetchall()
data_list = tools.toAscii_multiList( data_list )
fact = []
for d in data_list :
data = d[1]
dataType = d[2]
if dataType == "int" :
fact.append( data )
else :
data = data.replace( "'", "" )
data = data.replace( '"', '' )
fact.append( data )
logging.debug( "fact = " + str( fact ) )
logging.debug( "self.record = " + str( self.record ) )
logging.debug( "fact == self.record is " + str( fact == self.record ) )
#if fact == self.record : # does not handle wildcards
if self.is_match( fact ) :
return True
return False # otherwise, return false
##############
# IS MATCH #
##############
# check if the input fact 'matches' the record for this fact node.
def is_match( self, fact ) :
for i in range( 0, len( self.record ) ) :
fact_datum = fact[ i ]
record_datum = self.record[ i ]
# remove any quotes
if record_datum.startswith( "'" ) and record_datum.endswith( "'" ) :
record_datum = record_datum.replace( "'", "" )
elif record_datum.startswith( '"' ) and record_datum.endswith( '"' ) :
record_datum = record_datum.replace( '"', "" )
if record_datum == "_" :
pass
else :
if not fact_datum == record_datum :
return False
return True
#########
# EOF #
#########
| 32.006515
| 134
| 0.479849
| 935
| 9,826
| 4.873797
| 0.165775
| 0.042133
| 0.035111
| 0.050472
| 0.396313
| 0.317972
| 0.253456
| 0.189379
| 0.189379
| 0.174896
| 0
| 0.002934
| 0.340932
| 9,826
| 306
| 135
| 32.111111
| 0.700741
| 0.119886
| 0
| 0.329545
| 0
| 0
| 0.166808
| 0.034065
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039773
| false
| 0.005682
| 0.017045
| 0
| 0.153409
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47ca2154dad4d9f3a8ceb261cf0f46981b5b61af
| 2,656
|
py
|
Python
|
sector/models.py
|
uktrade/invest
|
15b84c511839b46e81608fca9762d2df3f6df16c
|
[
"MIT"
] | 1
|
2019-01-18T03:50:46.000Z
|
2019-01-18T03:50:46.000Z
|
sector/models.py
|
uktrade/invest
|
15b84c511839b46e81608fca9762d2df3f6df16c
|
[
"MIT"
] | 50
|
2018-01-24T18:04:08.000Z
|
2019-01-03T03:30:30.000Z
|
sector/models.py
|
uktrade/invest
|
15b84c511839b46e81608fca9762d2df3f6df16c
|
[
"MIT"
] | 2
|
2018-02-12T15:20:52.000Z
|
2019-01-18T03:51:52.000Z
|
from django.db import models
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.core.blocks import StructBlock, CharBlock
from wagtail.core.fields import StreamField
from wagtail.core.models import Page
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtailmarkdown.blocks import MarkdownBlock
from invest.blocks.location import LocationAccordionItemBlock
from invest.blocks.markdown import MarkdownAccordionItemBlock
class SectorLandingPage(Page):
subpage_types = ['sector.sectorPage']
# page fields
heading = models.CharField(max_length=255)
hero_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
content_panels = Page.content_panels + [
FieldPanel('heading'),
ImageChooserPanel('hero_image'),
]
def get_context(self, request):
context = super().get_context(request)
sector_cards = self.get_descendants().type(SectorPage) \
.live() \
.order_by('sectorpage__heading')
context['sector_cards'] = sector_cards
return context
class SectorPage(Page):
# Related sector are implemented as subpages
subpage_types = ['sector.sectorPage']
featured = models.BooleanField(default=False)
description = models.TextField() # appears in card on external pages
# page fields
heading = models.CharField(max_length=255)
hero_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
pullout = StreamField([
('content', StructBlock([
('text', MarkdownBlock()),
('stat', CharBlock()),
('stat_text', CharBlock()
)], max_num=1, min_num=0))
], blank=True)
# accordion
subsections = StreamField([
('markdown', MarkdownAccordionItemBlock()),
('location', LocationAccordionItemBlock()),
])
content_panels = Page.content_panels + [
FieldPanel('description'),
FieldPanel('featured'),
ImageChooserPanel('hero_image'),
FieldPanel('heading'),
StreamFieldPanel('pullout'),
StreamFieldPanel('subsections')
]
def get_context(self, request):
context = super().get_context(request)
context['sector_cards'] = self.get_children().type(SectorPage) \
.live() \
.order_by('sectorpage__heading')
# pages will return as Page type, use .specific to get sectorPage
return context
| 29.511111
| 73
| 0.657756
| 259
| 2,656
| 6.594595
| 0.362934
| 0.032201
| 0.026347
| 0.032787
| 0.31733
| 0.31733
| 0.270492
| 0.221311
| 0.221311
| 0.221311
| 0
| 0.003953
| 0.237952
| 2,656
| 89
| 74
| 29.842697
| 0.839921
| 0.065512
| 0
| 0.477612
| 0
| 0
| 0.099798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.134328
| 0
| 0.402985
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47ca87bbbe5378196163b9f006e09077555d7b34
| 985
|
py
|
Python
|
output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_enumeration_5_xsd/nistschema_sv_iv_atomic_id_enumeration_5.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_enumeration_5_xsd/nistschema_sv_iv_atomic_id_enumeration_5.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_enumeration_5_xsd/nistschema_sv_iv_atomic_id_enumeration_5.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-ID-enumeration-5-NS"
class NistschemaSvIvAtomicIdEnumeration5Type(Enum):
BA = "ba"
CA = "ca"
EFOR = "efor"
HREGISTRY_AS_ON_WORK_U = "hregistry.as.on-work.u"
ITS_INCLUD = "_its-includ"
@dataclass
class Out:
class Meta:
name = "out"
namespace = "NISTSchema-SV-IV-atomic-ID-enumeration-5-NS"
any_element: Optional[object] = field(
default=None,
metadata={
"type": "Wildcard",
"namespace": "##any",
}
)
@dataclass
class NistschemaSvIvAtomicIdEnumeration5:
class Meta:
name = "NISTSchema-SV-IV-atomic-ID-enumeration-5"
namespace = "NISTSchema-SV-IV-atomic-ID-enumeration-5-NS"
value: Optional[NistschemaSvIvAtomicIdEnumeration5Type] = field(
default=None,
metadata={
"required": True,
}
)
| 22.906977
| 68
| 0.636548
| 104
| 985
| 5.923077
| 0.423077
| 0.077922
| 0.090909
| 0.12987
| 0.332792
| 0.274351
| 0.274351
| 0.219156
| 0.219156
| 0
| 0
| 0.009472
| 0.249746
| 985
| 42
| 69
| 23.452381
| 0.824087
| 0
| 0
| 0.30303
| 0
| 0
| 0.250761
| 0.193909
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47cc63cf4b5393de155d0003d5754fcb3e06068b
| 889
|
py
|
Python
|
tests/test_http_requests.py
|
andreygrechin/umbr_api
|
e9efd734a7395d25a1bab87c861b2cfee61e6a05
|
[
"MIT"
] | 4
|
2021-01-11T02:14:59.000Z
|
2022-02-15T09:20:25.000Z
|
tests/test_http_requests.py
|
andreygrechin/umbr_api
|
e9efd734a7395d25a1bab87c861b2cfee61e6a05
|
[
"MIT"
] | null | null | null |
tests/test_http_requests.py
|
andreygrechin/umbr_api
|
e9efd734a7395d25a1bab87c861b2cfee61e6a05
|
[
"MIT"
] | 2
|
2021-12-14T10:20:00.000Z
|
2022-02-20T01:05:18.000Z
|
#!/usr/bin/env python3
# pylint: disable=no-self-use
"""Test unit."""
import unittest
class TestCase(unittest.TestCase):
"""Main class."""
def test_send_post(self):
"""Call incorrect send_post, get None.""" # import requests
from umbr_api._http_requests import send_post
response = send_post(" ")
self.assertEqual(response, None)
def test_send_get(self):
"""Call incorrect send_get, get None.""" # import requests
from umbr_api._http_requests import send_get
response = send_get(" ")
self.assertEqual(response, None)
def test_send_delete(self):
"""Call incorrect send_delete, get None.""" # import requests
from umbr_api._http_requests import send_delete
response = send_delete(" ")
self.assertEqual(response, None)
if __name__ == "__main__":
unittest.main()
| 25.4
| 70
| 0.651294
| 108
| 889
| 5.064815
| 0.305556
| 0.058501
| 0.060329
| 0.115174
| 0.435101
| 0.435101
| 0.435101
| 0.296161
| 0.296161
| 0.296161
| 0
| 0.001468
| 0.233971
| 889
| 34
| 71
| 26.147059
| 0.801762
| 0.257593
| 0
| 0.1875
| 0
| 0
| 0.01746
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.1875
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47cd0ed87b30c0eeeb6aca7161bf214f8970893c
| 4,802
|
py
|
Python
|
sharpenCommander/dlgFind.py
|
cjng96/sharpenCommander
|
0d3a95dccc617481d9976789feffc115520243e6
|
[
"Apache-2.0"
] | null | null | null |
sharpenCommander/dlgFind.py
|
cjng96/sharpenCommander
|
0d3a95dccc617481d9976789feffc115520243e6
|
[
"Apache-2.0"
] | null | null | null |
sharpenCommander/dlgFind.py
|
cjng96/sharpenCommander
|
0d3a95dccc617481d9976789feffc115520243e6
|
[
"Apache-2.0"
] | null | null | null |
import os
import urwid
from .globalBase import *
from .urwidHelper import *
from .tool import *
#import dc
from .myutil import *
class DlgFind(cDialog):
def __init__(self, onExit=None):
super().__init__()
self.onExit = onExit
self.widgetFileList = mListBox(urwid.SimpleFocusListWalker(btnListMakeTerminal([], None)))
self.widgetFileList.setFocusCb(lambda newFocus: self.onFileFocusChanged(newFocus))
self.widgetContent = mListBox(urwid.SimpleListWalker(textListMakeTerminal(["< Nothing to display >"])))
self.widgetContent.isViewContent = True
self.header = ">> dc find - q/F4(Quit) </>,h/l(Prev/Next file) Enter(goto) E(edit)..."
self.headerText = urwid.Text(self.header)
self.widgetFrame = urwid.Pile(
[(15, urwid.AttrMap(self.widgetFileList, 'std')), ('pack', urwid.Divider('-')), self.widgetContent])
self.mainWidget = urwid.Frame(self.widgetFrame, header=self.headerText)
self.cbFileSelect = lambda btn: self.onFileSelected(btn)
self.content = ""
self.selectFileName = ""
self.lstFile = []
def onFileFocusChanged(self, newFocus):
# old widget
# widget = self.widgetFileList.focus
# markup = ("std", widget.base_widget.origTxt)
# widget.base_widget.set_label(markup)
# widget = self.widgetFileList.body[newFocus]
# markup = ("std_f", widget.base_widget.origTxt)
# widget.base_widget.set_label(markup)
widget = self.widgetFileList.focus
widget.original_widget.set_label(widget.base_widget.markup[0])
widget = self.widgetFileList.body[newFocus]
widget.base_widget.set_label(widget.base_widget.markup[1])
self.widgetFileList.set_focus_valign("middle")
self.selectFileName = fileBtnName(widget)
try:
with open(self.selectFileName, "r", encoding="UTF-8") as fp:
ss = fp.read()
except UnicodeDecodeError:
ss = "No utf8 file[size:%d]" % os.path.getsize(self.selectFileName)
ss = ss.replace("\t", " ")
del self.widgetContent.body[:]
self.widgetContent.body += textListMakeTerminal(ss.splitlines())
self.widgetFrame.set_focus(self.widgetContent)
return True
def onFileSelected(self, btn):
if btn.original_widget.attr is None:
self.close()
return
self.selectFileName = gitFileBtnName(btn)
itemPath = os.path.join(os.getcwd(), self.selectFileName)
pp = os.path.dirname(itemPath)
os.chdir(pp)
g.savePath(pp)
g.targetFile = os.path.basename(itemPath)
#raise urwid.ExitMainLoop()
self.close()
def inputFilter(self, keys, raw):
if filterKey(keys, "down"):
self.widgetContent.scrollDown()
if filterKey(keys, "up"):
self.widgetContent.scrollUp()
if filterKey(keys, "enter"):
self.onFileSelected(self.widgetFileList.focus)
return keys
def recvData(self, data):
if data is None:
self.headerText.set_text(self.header + "!!!")
if len(self.widgetFileList.body) == 0:
self.widgetFileList.body += btnListMakeTerminal(["< No result >"], None)
return
ss = data.decode("UTF-8")
self.content += ss
pt = self.content.rfind("\n")
if pt == -1:
return True
ss = self.content[:pt]
self.content = self.content[pt:]
for line in ss.splitlines():
line = line.strip()
if line == "":
continue
self.lstFile.append(line)
self.fileShow()
return True
def fileShow(self):
del self.widgetFileList.body[:]
for line in self.lstFile:
# TODO: filter
# markup = erminal2markup(line, 0)
# markupF = terminal2markup(line, 1)
markup = ("std", line)
markupF = ('std_f', line)
btn = btnGen(markup, markupF, self.cbFileSelect, len(self.widgetFileList.body) == 0)
self.widgetFileList.body.append(btn)
if len(self.widgetFileList.body) == 1:
self.onFileFocusChanged(0)
def unhandled(self, key):
if key == 'f4' or key == "q":
#raise urwid.ExitMainLoop()
self.close()
elif key == 'left' or key == "[" or key == "h":
self.widgetFileList.focusPrevious()
elif key == 'right' or key == "]" or key == "l":
self.widgetFileList.focusNext()
elif key == "H":
for i in range(10):
self.widgetFileList.focusPrevious()
elif key == "L":
for i in range(10):
self.widgetFileList.focusNext()
elif key == "k":
self.widgetContent.scrollUp()
elif key == "j":
self.widgetContent.scrollDown()
elif key == "K":
for i in range(15):
self.widgetContent.scrollUp()
elif key == "J":
for i in range(15):
self.widgetContent.scrollDown()
elif key == "e" or key == "E":
btn = self.widgetFileList.focus
fname = gitFileBtnName(btn)
g.loop.stop()
systemRet("%s %s" % (g.editApp, fname))
g.loop.start()
elif key == "H":
popupMsg("Dc help", "Felix Felix Felix Felix\nFelix Felix")
| 27.44
| 106
| 0.659933
| 582
| 4,802
| 5.398625
| 0.300687
| 0.114577
| 0.056015
| 0.014004
| 0.254933
| 0.156906
| 0.141311
| 0.079567
| 0.049013
| 0.049013
| 0
| 0.006734
| 0.19596
| 4,802
| 174
| 107
| 27.597701
| 0.807045
| 0.082882
| 0
| 0.201681
| 0
| 0.008403
| 0.060247
| 0
| 0
| 0
| 0
| 0.005747
| 0
| 1
| 0.058824
| false
| 0
| 0.05042
| 0
| 0.168067
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47d18703506147df7e77ebf700589e58f57e4508
| 350
|
py
|
Python
|
running_sum.py
|
erjan/coding_exercises
|
53ba035be85f1e7a12b4d4dbf546863324740467
|
[
"Apache-2.0"
] | null | null | null |
running_sum.py
|
erjan/coding_exercises
|
53ba035be85f1e7a12b4d4dbf546863324740467
|
[
"Apache-2.0"
] | null | null | null |
running_sum.py
|
erjan/coding_exercises
|
53ba035be85f1e7a12b4d4dbf546863324740467
|
[
"Apache-2.0"
] | null | null | null |
'''
Given an array nums. We define a running sum of an array as runningSum[i] = sum(nums[0]…nums[i]).
Return the running sum of nums.
'''
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
res = list()
for i in range(len(nums)):
res.append(sum(nums[:(i+1)]))
print(res)
return res
| 21.875
| 97
| 0.58
| 55
| 350
| 3.745455
| 0.563636
| 0.067961
| 0.116505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007843
| 0.271429
| 350
| 15
| 98
| 23.333333
| 0.788235
| 0.371429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.428571
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47d3c6d2f3f9ad6b0e3ffc64b6de5590845ebff4
| 30,949
|
py
|
Python
|
MagiskPatcher.py
|
affggh/Magisk_patcher
|
77b7a90c821d45e0b090ee1905dfbca7028e9ac2
|
[
"Apache-2.0"
] | 19
|
2022-01-27T11:12:43.000Z
|
2022-03-06T00:09:47.000Z
|
MagiskPatcher.py
|
affggh/Magisk_patcher
|
77b7a90c821d45e0b090ee1905dfbca7028e9ac2
|
[
"Apache-2.0"
] | null | null | null |
MagiskPatcher.py
|
affggh/Magisk_patcher
|
77b7a90c821d45e0b090ee1905dfbca7028e9ac2
|
[
"Apache-2.0"
] | 6
|
2022-01-28T15:51:19.000Z
|
2022-02-20T17:39:46.000Z
|
#!/usr/bin/env python3
# 脚本 by affggh
# Apcache 2.0
import os
import sys
import shutil
import zipfile
import subprocess
import platform
import requests
if os.name == 'nt':
import tkinter as tk
if os.name == 'posix':
from mttkinter import mtTkinter as tk
# While Load some need thread funcion on Linux it will failed
# Just use mttkinter replace regular tkinter
from tkinter.filedialog import *
from tkinter import ttk
from tkinter import *
#import ttkbootstrap as ttk
import time
import webbrowser
import threading
# Hide console , need ```pip install pywin32```
# import win32gui, win32con
# the_program_to_hide = win32gui.GetForegroundWindow()
# win32gui.ShowWindow(the_program_to_hide, win32con.SW_HIDE)
def main():
VERSION = "20220611"
LOCALDIR = os.path.abspath(os.path.dirname(sys.argv[0]))
# Read config from GUIcfg.txt
configPath = LOCALDIR + os.sep + "bin" + os.sep + "GUIcfg.txt"
with open(configPath, "r") as file:
for line in file.readlines():
if((line.split('=', 1)[0]) == "THEME"):
THEME = line.split('=', 1)[1]
THEME = THEME.replace('\n', '')
if(THEME!="dark"): # 防止手贱改成别的导致主题爆炸
THEME="light"
elif((line.split('=', 1)[0]) == "DONATE_BUTTON"):
SHOW_DONATE_BUTTON = line.split('=', 1)[1]
SHOW_DONATE_BUTTON = SHOW_DONATE_BUTTON.replace('\n', '') #显示捐赠按钮
elif((line.split('=', 1)[0]) == "GIT_USE_MIRROR"):
if (line.split('=', 1)[1].strip("\n").lower()) == "true":
GIT_USE_MIRROR = True
else:
GIT_USE_MIRROR = False
elif((line.split('=', 1)[0]) == "GIT_MIRROR"):
GIT_MIRROR = line.split('=', 1)[1]
# Detect machine and ostype
ostype = platform.system().lower()
machine = platform.machine().lower()
if machine == 'aarch64_be' \
or machine == 'armv8b' \
or machine == 'armv8l':
machine = 'aarch64'
if machine == 'i386' or machine == 'i686':
machine = 'x86'
if machine == "amd64":
machine = 'x86_64'
if ostype == 'windows':
if not machine == 'x86_64':
print("Error : Program on windows only support 64bit machine")
sys.exit(1)
if ostype == 'linux':
if not (machine == 'aarch64' or \
machine == 'arm' or \
machine == 'x86_64'):
print("Error : Machine not support your device [%s]" %machine)
sys.exit(1)
root = tk.Tk()
root.geometry("820x480")
# Set the initial theme
root.tk.call("source", LOCALDIR+os.sep+"sun-valley.tcl")
root.tk.call("set_theme", THEME)
def change_theme():
# NOTE: The theme's real name is sun-valley-<mode>
if root.tk.call("ttk::style", "theme", "use") == "sun-valley-dark":
# Set light theme
root.tk.call("set_theme", "light")
else:
# Set dark theme
root.tk.call("set_theme", "dark")
root.resizable(0,0) # 设置最大化窗口不可用
root.title("Magisk Patcher by 酷安 affggh " + "版本号 : %s" %(VERSION))
def logo():
os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
root.iconbitmap(os.path.abspath(LOCALDIR+os.sep+'bin' + os.sep+ 'logo.ico'))
if os.name == 'nt':
logo()
# Frame 这里都用到了外部命令导致卡顿,子进程运行来缓解
frame2_3 = Frame(root, relief=FLAT)
frame2 = ttk.LabelFrame(frame2_3, text="功能页面", labelanchor="n", relief=SUNKEN, borderwidth=1)
frame3 = ttk.LabelFrame(frame2_3, text="信息反馈", labelanchor="nw", relief=SUNKEN, borderwidth=1)
textfont = "Consolas"
text = Text(frame3,width=70,height=15,font=textfont) # 信息展示
filename = tk.StringVar()
arch = tk.StringVar()
keepverity = tk.StringVar()
keepforceencrypt = tk.StringVar()
patchvbmetaflag = tk.StringVar()
mutiseletion = tk.StringVar()
recoverymodeflag = tk.BooleanVar()
recoverymode = tk.StringVar()
recoverymode.set('false')
# For logo
photo = tk.PhotoImage(file=LOCALDIR+os.sep+"bin"+os.sep+"logo.png")#file:t图片路径
# For aboutme
photo2 = tk.PhotoImage(file=LOCALDIR+os.sep+"bin"+os.sep+"logo.png")#file:t图片路径
# For donate QR code
photo3 = tk.PhotoImage(file=LOCALDIR+os.sep+"bin"+os.sep+"alipay.png")#file:t图片路径
photo4 = tk.PhotoImage(file=LOCALDIR+os.sep+"bin"+os.sep+"wechat.png")#file:t图片路径
photo5 = tk.PhotoImage(file=LOCALDIR+os.sep+"bin"+os.sep+"zfbhb.png")#file:t图片路径
global Thanks
Thanks = 0 # 左下角的贴图说谢谢
os.chdir(LOCALDIR)
def get_time():
'''显示当前时间'''
global time1
time1 = ''
time2 = time.strftime('%H:%M:%S')
# 能动态显示系统时间
if time2 != time1:
time1 = time2
text.insert(END, "[%s] : " %(time1))
def selectFile():
global filepath
filepath = askopenfilename() # 选择打开什么文件,返回文件名
filename.set(os.path.abspath(filepath))
showinfo("选择文件为:\n%s" %(filename.get()))
def showinfo(textmsg):
textstr = textmsg
get_time() # 获取时间戳
text.insert(END,"%s" %(textstr) + "\n")
text.update() # 实时返回信息
text.yview('end')
def affgghsay(word):
line = ''
for i in range(len(word.encode("gb2312"))):
line += '─' # gb2312中文是两个字节,利用这点填充全角半角
text.insert(END,
'''
(\︵/) ┌%s┐
>(—﹏—)< < %s│
/ ﹌ \╯ └%s┘
affggh 提醒您
'''%(line, word, line))
text.yview('end')
def runcmd(cmd):
if os.name == 'nt':
sFlag = False
else:
sFlag = True # fix file not found on linux
try:
ret = subprocess.Popen(cmd, shell=sFlag, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for i in iter(ret.stdout.readline, b''):
text.insert(END, i.strip().decode("UTF-8") + "\n")
text.update()
text.yview(END)
except subprocess.CalledProcessError as e:
for i in iter(e.stdout.readline,b''):
text.insert(END, i.strip().decode("UTF-8") + "\n")
text.update()
text.yview(END)
def get_releases(url):
data = requests.get(url).json()
return data
def ret_dlink(url):
data = get_releases(url)
dlink = {}
for i in data:
for j in i['assets']:
if j['name'].startswith("Magisk-v") and j['name'].endswith(".apk"):
if GIT_USE_MIRROR:
dlink.update({j['name'] : j['browser_download_url'].replace("https://github.com/", GIT_MIRROR)})
else:
dlink.update({j['name'] : j['browser_download_url']})
return dlink
def download(url, fileToSave):
def p(now, total):
return int((now/total)*100)
file = fileToSave
chunk_size = 1024
affgghsay("Starting download file...")
r = requests.get(url, stream=True)
total_size = int(r.headers['content-length'])
now = 0
progressbar['maximum'] = 100
with open(file, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk:
before = now
f.write(chunk)
now += chunk_size
if now > before:
# print("下载进度 [%s/100]" %progress(now, total_size), end='\r')
progress.set(p(now, total_size))
progress.set(0)
affgghsay("文件下载完成"+file)
def thrun(fun): # 调用子线程跑功能,防止卡住
# showinfo("Test threading...")
th=threading.Thread(target=fun)
th.daemon = True
th.start()
def cleaninfo():
text.delete(1.0, END) # 清空text
text.image_create(END,image=photo)
text.insert(END," Copyright(R) affggh Apache2.0\n" \
"\n 此脚本为免费工具,如果你花钱买了你就是大傻逼\n")
def test():
affgghsay("Testing...")
def showConfig():
affgghsay("确认配置信息")
text.insert(END , "\n" + \
" 镜像架构 = " + "%s\n" %(arch.get()) + \
" 保持验证 = " + "%s\n" %(keepverity.get()) + \
" 保持强制加密 = " + "%s\n" %(keepforceencrypt.get()) + \
" 修补vbmeta标志 = "+ "%s\n" %(patchvbmetaflag.get()) +\
" Recovery Mode = " + "%s\n" %(recoverymode.get()))
tabControl.select(tab2)
def selectConfig():
configpath = askopenfilename() # 选择打开什么文件,返回文件名
showinfo("从配置文件中读取:\n%s" %(configpath))
if os.path.isfile(configpath):
with open(configpath, 'r') as f:
PatchConfig = {}
for i in f.readlines():
if not i[0:1] == '#':
l = i.strip('\n').split('=')
if not i.find('=') == -1:
PatchConfig[l[0]] = l[1]
arch.set(PatchConfig['arch'])
keepverity.set(PatchConfig['keepverity'])
keepforceencrypt.set(PatchConfig['keepforceencrypt'])
patchvbmetaflag.set(PatchConfig['patchvbmetaflag'])
recoverymode.set(PatchConfig['recoverymode'])
if recoverymode.get() == 'true':
recoverymodeflag.set(True)
else:
recoverymodeflag.set(False)
# showConfig()
else:
affgghsay("取消选择config文件")
def confirmConfig():
showConfig()
def __select(*args):
affgghsay("选择Magisk版本为 : %s" %(mutiseletion.get()))
if not os.access("." + os.sep + "prebuilt" + os.sep + mutiseletion.get() + ".apk", os.F_OK):
affgghsay("你选择的版本文件不存在,正在下载...")
try:
download(dlink[mutiseletion.get()+".apk"], "."+os.sep+"prebuilt"+os.sep+mutiseletion.get()+".apk")
except:
affgghsay("出现错误,请关掉代理重试")
def select(*args):
th = threading.Thread(target=__select, args=args)
th.daemon = True
th.start()
def recModeStatus():
if recoverymodeflag.get()== True:
affgghsay("开启recovery模式修补")
recoverymode.set("true")
else:
affgghsay("关闭recovery模式修补")
recoverymode.set("false")
def parseZip(filename):
def returnMagiskVersion(buf):
v = "Unknow"
l = buf.decode('utf_8').split("\n")
for i in l:
if not i.find("MAGISK_VER=") == -1:
v = i.split("=")[1].strip("'")
break
return v
def rename(n):
if n.startswith("lib") and n.endswith(".so"):
n = n.replace("lib", "").replace(".so", "")
return n
if not os.access(filename, os.F_OK):
return False
else:
f = zipfile.ZipFile(filename, 'r')
l = f.namelist() # l equals list
tl = [] # tl equals total get list
for i in l:
if not i.find("assets/") == -1 or \
not i.find("lib/") == -1:
tl.append(i)
buf = f.read("assets/util_functions.sh")
mVersion = returnMagiskVersion(buf)
showinfo("Parse Magisk Version : " + mVersion)
for i in tl:
if arch.get() == "arm64":
if i.startswith("lib/arm64-v8a/") and i.endswith(".so"):
if not (i.endswith("busybox.so") or i.endswith("magiskboot.so")):
f.extract(i, "tmp")
elif arch.get() == "arm":
if i.startswith("lib/armeabi-v7a/") and i.endswith(".so"):
if not (i.endswith("busybox.so") or i.endswith("magiskboot.so")):
f.extract(i, "tmp")
elif arch.get() == "x86_64":
if i.startswith("lib/x86_64/") and i.endswith(".so"):
if not (i.endswith("busybox.so") or i.endswith("magiskboot.so")):
f.extract(i, "tmp")
elif arch.get() == "x86":
if i.startswith("lib/x86/") and i.endswith(".so"):
if not (i.endswith("busybox.so") or i.endswith("magiskboot.so")):
f.extract(i, "tmp")
for i in tl:
if arch.get() == "arm64" and not os.access("libmagisk32.so", os.F_OK):
if i == "lib/armeabi-v7a/libmagisk32.so":
f.extract("lib/armeabi-v7a/libmagisk32.so", "tmp")
elif arch.get() == "x86_64" and not os.access("libmagisk32.so", os.F_OK):
if i == "lib/x86/libmagisk32.so":
f.extract("lib/armeabi-v7a/libmagisk32.so", "tmp")
for root, dirs, files in os.walk("tmp"):
for file in files:
if file.endswith(".so"):
shutil.move(root+os.sep+file, rename(os.path.basename(file)))
shutil.rmtree("tmp")
return True
def PatchonWindows():
affgghsay(" ---->> 修补开始")
progressbar['maximum'] = 3
start_time = time.time()
if not os.access(filename.get(), os.F_OK):
affgghsay("待修补文件不存在")
affgghsay(" <<---- 修补失败")
return False
# cmd = [LOCALDIR+os.sep+'magisk_patcher.bat','patch','-i','%s' %(filename.get()),'-a','%s' %(arch.get()),'-kv','%s' %(keepverity.get()),'-ke','%s' %(keepforceencrypt.get()),'-pv','%s' %(patchvbmetaflag.get()),'-m','.\\prebuilt\\%s.apk' %(mutiseletion.get())]
f = "." + os.sep + "prebuilt" + os.sep + mutiseletion.get() + ".apk"
if not parseZip(f):
affgghsay("apk文件解析失败")
affgghsay(" <<---- 修补失败")
return False
progress.set(1)
if os.name == 'nt':
cmd = "." + os.sep + "bin" + os.sep + ostype + os.sep + machine + os.sep + "busybox ash "
elif os.name == 'posix':
cmd = "." + os.sep + "bin" + os.sep + ostype + os.sep + machine + os.sep + "busybox ash "
else:
showinfo("not support")
progress.set(0)
return False
if not os.access("./bin/boot_patch.sh", os.F_OK):
affgghsay("Error : 关键脚本丢失")
progress.set(0)
return False
cmd += "." + os.sep + "bin" + os.sep + "boot_patch.sh \"%s\"" %(filename.get())
cmd += " %s" %keepverity.get()
cmd += " %s" %keepforceencrypt.get()
cmd += " %s" %patchvbmetaflag.get()
cmd += " %s" %recoverymode.get()
try:
progress.set(2)
thrun(runcmd(cmd)) # 调用子线程运行减少卡顿
except:
progress.set(0)
affgghsay("Error : 出现问题,修补失败")
progress.set(3)
cleanUp()
end_time = time.time()
use_time = end_time - start_time
affgghsay(" 总共用时 [%.2f] s" %use_time)
affgghsay(" <<--- 修补结束")
progress.set(0)
def GenDefaultConfig():
affgghsay(" ---->> 生成选中配置")
if os.path.isfile('.' + os.sep + 'config.txt'):
os.remove('.' + os.sep + 'config.txt')
with open("." + os.sep + "config.txt", 'w') as f:
f.write("# VAR TYPE\n")
f.write("arch=%s\n" %(arch.get()) + \
"keepverity=%s\n" %(keepverity.get()) + \
"keepforceencrypt=%s\n" %(keepforceencrypt.get()) + \
"patchvbmetaflag=%s\n" %(patchvbmetaflag.get()) + \
"recoverymode=%s\n" %(recoverymode.get()) + \
"magisk=%s\n" %("." + os.sep + "prebuilt" + os.sep + mutiseletion.get() + ".apk") )
# magisk=%s not use on python program, only worked on batch version
if os.path.isfile('.' + os.sep + 'config.txt'):
affgghsay("确认配置信息:")
text.insert(END, "\n" + \
" 镜像架构 = " + "%s\n" %(arch.get()) + \
" 保持验证 = " + "%s\n" %(keepverity.get()) + \
" 保持强制加密 = " + "%s\n" %(keepforceencrypt.get()) + \
" 修补vbmeta标志 = "+ "%s\n" %(patchvbmetaflag.get()) +\
" Recovery Mode = " + "%s\n" %(recoverymode.get()))
affgghsay("成功生成配置")
else:
affgghsay("选中配置生成失败")
affgghsay(" <<---- 生成选中配置")
def GetDeviceConfig():
affgghsay(" ---->> 读取设备配置")
affgghsay(" 根据设备不同,生成速度也不同...请稍等...")
if os.name == 'nt':
cmd = "." + os.sep + "bin" + os.sep + ostype + os.sep + machine + os.sep + "adb get-state"
elif os.name == 'posix':
cmd = "adb get-state"
else:
affgghsay("系统不支持")
return False
deviceState = subprocess.getstatusoutput(cmd)
if deviceState[0] == 1:
affgghsay("设备未连接,或驱动未安装")
return False
elif deviceState[0] == 0:
if os.name == 'nt':
cmd = "." + os.sep + "bin" + os.sep + ostype + os.sep + machine + os.sep + "adb "
elif os.name == 'posix':
cmd = "adb "
if deviceState[1].strip(" ").strip("\n") == 'device':
tmppath = "/data/local/tmp"
elif deviceState[1].strip(" ").strip("\n") == 'recovery':
tmppath = "/tmp"
else:
affgghsay("不支持的设备状态")
return False
subprocess.getoutput(cmd + "push " + "." + os.sep + "bin" + os.sep + "get_config.sh %s/get_config.sh" %tmppath)
subprocess.getoutput(cmd + "shell chmod a+x %s/get_config.sh" %tmppath)
out = subprocess.getoutput(cmd + "shell sh %s/get_config.sh" %tmppath)
for i in out.splitlines():
if len(i.split("=")) > 1:
var = i.split("=")[0].strip(" ").lower()
t = i.split("=")[1].strip(" ").lower()
if var == 'arch':
arch.set(t)
elif var == 'keepverity':
keepverity.set(t)
elif var == 'keepforceencrypt':
keepforceencrypt.set(t)
elif var == 'patchvbmetaflag':
patchvbmetaflag.set(t)
affgghsay("自动修改配置%s为%s" %(var, t))
else:
affgghsay("设备未知状态")
return False
affgghsay(" <<---- 读取设备配置")
def opensource():
webbrowser.open("https://github.com/affggh/Magisk_Patcher")
def About():
root2 = tk.Toplevel()
curWidth = 300
curHight = 180
# 获取屏幕宽度和高度
scn_w, scn_h = root.maxsize()
# print(scn_w, scn_h)
# 计算中心坐标
cen_x = (scn_w - curWidth) / 2
cen_y = (scn_h - curHight) / 2
# print(cen_x, cen_y)
# 设置窗口初始大小和位置
size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)
root2.geometry(size_xy)
#root2.geometry("300x180")
root2.resizable(0,0) # 设置最大化窗口不可用
root2.title("关于脚本和作者信息")
aframe1 = Frame(root2, relief=FLAT, borderwidth=1)
aframe2 = Frame(root2, relief=FLAT, borderwidth=1)
aframe1.pack(side=BOTTOM, expand=YES, pady=3)
aframe2.pack(side=BOTTOM, expand=YES, pady=3)
ttk.Button(aframe1, text='访问项目', command=opensource).pack(side=LEFT, expand=YES, padx=5)
ttk.Button(aframe1, text='获取最新', command=lambda u="https://hub.fastgit.xyz/affggh/Magisk_patcher/archive/refs/heads/main.zip":webbrowser.open(u)).pack(side=LEFT, expand=YES, padx=5)
ttk.Label(aframe2, text='脚本编写自affggh\nshell脚本提取修改自Magisk-v24.1安装包\n项目开源地址:github.com/affggh/Magisk_Patcher\n').pack(side=BOTTOM, expand=NO, pady=3)
imgLabe2 = ttk.Label(aframe2,image=photo2)#把图片整合到标签类中
imgLabe2.pack(side=TOP, expand=YES, pady=3)
root2.mainloop()
def donateme():
cleaninfo()
text.image_create(END,image=photo3)
text.image_create(END,image=photo4)
text.image_create(END,image=photo5)
global Thanks
if Thanks==0:
Label(frame4,text=' ----------------------------\n' \
' < 谢谢老板!老板发大财!|\n' \
' ----------------------------').pack(side=LEFT, expand=NO, pady=3)
Thanks = 1
def color(value):
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3)
def colorfuldonate():
button = tk.Button(frame41, text='给我捐钱', width=12, height=1, command=donateme, bg="red", fg="white", font=('黑体', '14'))
button.grid(row=0, column=1, padx=3, pady=0)
while(True):
r = 255
g = 0
b = 0
for c in range(255):
r = r-1
g = g+1
button.configure(bg=color((r,g,b)))
time.sleep(0.000001)
for c in range(255):
g = g-1
b = b+1
button.configure(bg=color((r,g,b)))
time.sleep(0.000001)
for c in range(255):
b = b-1
r = r+1
button.configure(bg=color((r,g,b)))
time.sleep(0.000001)
def pointdonate():
lab = tk.Label(frame41, text='<<点我', font=('黑体', '14'))
lab.grid(row=0, column=2, padx=2, pady=0)
while(True):
lab.configure(bg='#FFFF00',fg='#000000')
time.sleep(0.1)
lab.configure(bg='#9400D3',fg='#FFFFFF')
time.sleep(0.1)
def pointdonate2():
lab = tk.Label(frame41, text='点我>>', font=('黑体', '14'))
lab.grid(row=0, column=0, padx=2, pady=0)
while(True):
lab.configure(bg='#FFFF00',fg='#000000')
time.sleep(0.1)
lab.configure(bg='#9400D3',fg='#FFFFFF')
time.sleep(0.1)
def pdp():
th2=threading.Thread(target=pointdonate)
th2.setDaemon(True) #守护线程
th2.start()
th=threading.Thread(target=colorfuldonate)
th.setDaemon(True) #守护线程
th.start()
th3=threading.Thread(target=pointdonate2)
th3.setDaemon(True) #守护线程
th3.start()
def listdir(path):
L=[]
for root, dirs, files in os.walk(path):
for file in files:
if os.path.splitext(file)[1] == '.apk':
tmp = os.path.basename(os.path.join(root, file)).strip(".apk")
L.append(tmp)
return L
def cleanUp():
def rm(p):
if os.access(p, os.F_OK):
if os.path.isdir(p):
shutil.rmtree(p)
elif os.path.isfile(p):
os.remove(p)
else:
os.remove(p)
l = ["busybox", "magisk32", "magisk64", "magiskinit", "magiskboot"]
d = ["tmp"]
for i in l:
rm(i)
for i in d:
rm(i)
cmd = "." + os.sep + "bin" + os.sep + ostype + os.sep + machine + os.sep + "magiskboot cleanup"
thrun(runcmd(cmd))
def get_comboxlist():
url = "https://api.github.com/repos/topjohnwu/Magisk/releases"
l = []
try:
global dlink
dlink = ret_dlink(url)
for i in dlink.keys():
l.append(i.replace(".apk", ""))
except:
affgghsay(" 从网络读取失败, 仅加载本地目录")
for i in os.listdir("." + os.sep + "prebuilt"):
if i.endswith(".apk"):
l.append(os.path.basename(i).replace(".apk", ""))
l2=list(set(l))
l2.sort(key=l.index)
comboxlist["values"] = l2
if len(l) > 0:
comboxlist.current(0)
select()
# button and text
# Frame 1 文件选择
frame1 = LabelFrame(root, text="文件选择", labelanchor="w", relief=FLAT, borderwidth=1)
frame1.pack(side=TOP, fill=BOTH, padx=6, pady=8, expand=NO)
# tk.Label(frame1, text='选择文件').pack(side=LEFT)
ttk.Entry(frame1, width=70,textvariable=filename).pack(side=LEFT, expand=YES, fill=X, padx=10)
ttk.Button(frame1, text='选择文件', command=selectFile).pack(side=LEFT)
#
# Frame 2 功能页面
frame2.pack(side=LEFT, fill=BOTH, padx=2, pady=3, expand=NO)
tabControl = ttk.Notebook(frame2)
tab1 = ttk.Frame(tabControl) #增加新选项卡
tab11 = ttk.Frame(tab1)
tab111 = ttk.LabelFrame(tab11, text="镜像架构", labelanchor="n", relief=SUNKEN, borderwidth=1)
tab111.pack(side=TOP, expand=NO, fill=BOTH)
arch.set("arm64")
ttk.Radiobutton(tab111, text='arm',variable=arch, value='arm').grid(row=0, column=0, padx=0, pady=0)
ttk.Radiobutton(tab111, text='arm64',variable=arch, value='arm64').grid(row=0, column=1, padx=0, pady=0)
ttk.Radiobutton(tab111, text='x86',variable=arch, value='x86').grid(row=1, column=0, padx=0, pady=0)
ttk.Radiobutton(tab111, text='x86_64',variable=arch, value='x86_64').grid(row=1, column=1, padx=0, pady=0)
tab112 = ttk.LabelFrame(tab11, text="保持验证", labelanchor="n", relief=SUNKEN, borderwidth=1)
tab112.pack(side=TOP, expand=YES, fill=BOTH)
keepverity.set("true")
ttk.Radiobutton(tab112, text='是',variable=keepverity, value='true').grid(row=0, column=0, padx=0, pady=0)
ttk.Radiobutton(tab112, text='否',variable=keepverity, value='false').grid(row=0, column=1, padx=10, pady=0)
tab113 = ttk.LabelFrame(tab11, text="保持强制加密", labelanchor="n", relief=SUNKEN, borderwidth=1)
tab113.pack(side=TOP, expand=YES, fill=BOTH)
keepforceencrypt.set("true")
ttk.Radiobutton(tab113, text='是',variable=keepforceencrypt, value='true').grid(row=0, column=0, padx=0, pady=0)
ttk.Radiobutton(tab113, text='否',variable=keepforceencrypt, value='false').grid(row=0, column=1, padx=10, pady=0)
tab113 = ttk.LabelFrame(tab11, text="修补vbmeta标志", labelanchor="n", relief=SUNKEN, borderwidth=1)
tab113.pack(side=TOP, expand=YES, fill=BOTH)
patchvbmetaflag.set("false")
ttk.Radiobutton(tab113, text='是',variable=patchvbmetaflag, value='true').grid(row=0, column=0, padx=0, pady=0)
ttk.Radiobutton(tab113, text='否',variable=patchvbmetaflag, value='false').grid(row=0, column=1, padx=10, pady=0)
tab12 = ttk.Frame(tab1)
tab11.pack(side=TOP, expand=YES, fill=BOTH)
ttk.Button(tab12, text='确认配置', command=confirmConfig).pack(side=TOP, expand=YES, pady=3)
ttk.Button(tab12, text='指定config.txt', command=selectConfig).pack(side=TOP, expand=YES, pady=2)
tabControl.add(tab1, text='配置') #把新选项卡增加到Notebook
tab2 = ttk.Frame(tabControl) #增加新选项卡
ttk.Button(tab2, text='使用当前配置\n修 补', command=PatchonWindows).pack(side=TOP, expand=NO, pady=3)
# ttk.Button(tab2, text='连接设备环境\n修 补', command=PatchonDevice).pack(side=TOP, expand=NO, pady=3)
ttk.Label(tab2, text='使用设备环境修补不需要\n配置各种参数\n配置来源与设备').pack(side=BOTTOM, expand=NO, pady=3)
ttk.Label(tab2, text='选择Magisk版本').pack(side=TOP, expand=NO, pady=3)
ttk.Checkbutton(tab2, variable=recoverymodeflag, text="recovery修补", command=recModeStatus).pack(side=TOP, expand=NO, pady=3)
comboxlist = ttk.Combobox(tab2, textvariable=mutiseletion, width=14)
'''
filelist = listdir("./prebuilt")
filelist.reverse() # 高版本在前面
comboxlist["values"]=(filelist)
if len(filelist)>0:
comboxlist.current(0) # 选择第一个
else:
showinfo("Error : 没有找到Magisk安装包,请确保prebuilt目录下存在apk文件")
'''
# thrun(get_comboxlist())
comboxlist.bind("<<ComboboxSelected>>",select)
comboxlist.pack(side=TOP, expand=NO, pady=3)
tabControl.add(tab2, text='修补') #把新选项卡增加到Notebook
ttk.Button(tab2, text='获取magisk列表', command=get_comboxlist).pack(side=TOP, expand=NO, pady=3)
tab3 = tk.Frame(tabControl) #增加新选项卡
ttk.Button(tab3, text='生成选中配置\nconfig.txt', command=lambda:thrun(GenDefaultConfig)).pack(side=TOP, expand=NO, pady=3)
ttk.Button(tab3, text='读取设备配置\nconfig.txt', command=lambda:thrun(GetDeviceConfig)).pack(side=TOP, expand=NO, pady=3)
# ttk.Button(tab3, text='test', command=lambda:thrun(test)).pack(side=TOP, expand=NO, pady=3)
tabControl.add(tab3, text='读取') #把新选项卡增加到Notebook
tab12.pack(side=TOP, expand=NO, fill=BOTH)
tabControl.pack(side=TOP, expand=YES, fill="both")
# Frame 3 信息展示 功能页面
frame3.pack(side=RIGHT, fill=BOTH, padx=2, pady=3, expand=YES)
scroll = ttk.Scrollbar(frame3)
scroll.pack(side=RIGHT,fill=Y, padx=1, pady=5)
text.pack(side=RIGHT, expand=YES, fill=BOTH, padx=5 ,pady=1)
scroll.config(command=text.yview)
text.config(yscrollcommand=scroll.set)
frame2_3.pack(side=TOP, expand=NO, pady=2, fill=BOTH)
# Frame 4 关于 和 清除信息t
frame4 = Frame(root, relief=FLAT)
progress = tk.DoubleVar(value=0)
progressbar = ttk.Progressbar(frame4, length=200, variable=progress, mode='determinate')
ttk.Button(frame4, text='清空信息', command=cleaninfo).pack(side=RIGHT, expand=NO, pady=3)
ttk.Button(frame4, text='关于', command=About).pack(side=RIGHT, expand=NO, pady=3)
ttk.Button(frame4, text='切换主题', command=change_theme).pack(side=RIGHT, expand=NO, pady=3)
if(SHOW_DONATE_BUTTON!="False"):
# 超炫的捐赠按钮
frame41 = Frame(frame4, relief=FLAT)
pdp()
frame41.pack(side=RIGHT, expand=NO, pady=3)
else:
ttk.Button(frame4, text='捐赠', command=donateme).pack(side=RIGHT, expand=NO, pady=3)
progressbar.pack(side=RIGHT, expand=NO, padx=(0, 10))
ttk.Label(frame4, text="进度条:").pack(side=RIGHT, expand=NO, padx=(10, 0))
frame4.pack(side=TOP, expand=NO, padx=10, ipady=5, fill=X)
imgLabel = ttk.Label(frame4,image=photo)#把图片整合到标签类中
imgLabel.pack(side=LEFT, expand=NO, pady=3)
text.image_create(END,image=photo)
text.insert(END," Copyright(R) affggh Apache2.0\n" \
" 当前脚本运行环境:\n" \
" [%s] [%s]\n" \
"此脚本为免费工具,如果你花钱买了你就是大傻逼\n" \
"普通流程:\n" \
"修改配置-->确认配置-->修补\n" \
"简单点:\n" \
"直接选个magisk版本-->插手机-->手机修补\n (不过配置只能用手机的)\n" \
" 注:recovery模式仅支持windows修补\n" %(ostype, machine))
affgghsay("此脚本为免费工具,如果你花钱买了你就是大傻逼")
# root.update()
root.mainloop()
if __name__=='__main__':
main()
| 40.509162
| 267
| 0.534751
| 3,671
| 30,949
| 4.478071
| 0.178698
| 0.016424
| 0.014721
| 0.021717
| 0.334753
| 0.287365
| 0.246609
| 0.212665
| 0.179451
| 0.15962
| 0
| 0.030167
| 0.30379
| 30,949
| 763
| 268
| 40.562254
| 0.732213
| 0.060034
| 0
| 0.205371
| 0
| 0.00158
| 0.127353
| 0.016451
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061611
| false
| 0
| 0.023697
| 0.00158
| 0.113744
| 0.00316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47d719fa6ddaa13236b1671d0f097880df05054a
| 3,010
|
py
|
Python
|
solvers/shortest_path.py
|
Psychofun/Snake-Gym
|
59646ef2213e4cc2a68e238d010f5e9f25826951
|
[
"MIT"
] | null | null | null |
solvers/shortest_path.py
|
Psychofun/Snake-Gym
|
59646ef2213e4cc2a68e238d010f5e9f25826951
|
[
"MIT"
] | null | null | null |
solvers/shortest_path.py
|
Psychofun/Snake-Gym
|
59646ef2213e4cc2a68e238d010f5e9f25826951
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("..")
from gym_snake.envs.node import Node
from gym_snake.envs.snake_env import action_to_vector
from gym_snake.envs.snake_env import SnakeAction
from gym_snake.envs.snake_env import SnakeCellState
from gym_snake.envs.snake_env import rotate_action_clockwise
from gym_snake.envs.snake_env import rotate_action_counter_clockwise
from gym_snake.envs.snake_env import invert_action
from gym_snake.queue import Queue
class ShortestPathBFSSolver():
def __init__(self):
pass
def move(self, environment):
self.environment = environment.copy()
self.environment.move()
shortest_path_move_from_transposition_table = self.environment._path_move_from_transposition_table(self.environment.starting_node, self.environment.fruit_node)
if shortest_path_move_from_transposition_table:
#print(" shortest_path_move_from_transposition_table: ", shortest_path_move_from_transposition_table)
return shortest_path_move_from_transposition_table
shortest_path = self.shortest_path(self.environment, self.environment.starting_node, self.environment.fruit_node)
if shortest_path:
#print("Shortest path: ", [x.action for x in shortest_path])
self.environment.transposition_table[self.environment.fruit_node] = shortest_path
first_point = shortest_path[-2]
return first_point.action
#print("prev action: ", self.environment.prev_action)
return self.environment.prev_action
def shortest_path(self, environment, start, end):
queue = Queue([start])
visited_nodes = set([start])
shortest_path = []
while queue.queue:
current_node = queue.dequeue()
if current_node == end:
shortest_path = current_node._recreate_path_for_node()
break
for action in environment.possible_actions_for_current_action(current_node.action):
# Convert action (int) to tuple
a_vector = action_to_vector(action)
# Apply action to point
neighbor = (current_node.point[0] + a_vector[0], current_node.point[1] + a_vector[1])
neighbor_state = environment.cell_state(neighbor)
if (neighbor_state == SnakeCellState.EMPTY or
neighbor_state == SnakeCellState.DOT
):
child_node = Node(neighbor)
child_node.action = action
child_node.previous_node = current_node
if child_node not in visited_nodes and child_node not in queue.queue:
visited_nodes.add(current_node)
queue.enqueue(child_node)
if shortest_path:
return shortest_path
else:
return []
| 38.589744
| 168
| 0.639535
| 337
| 3,010
| 5.379822
| 0.225519
| 0.112521
| 0.052951
| 0.061776
| 0.323773
| 0.323773
| 0.290678
| 0.199669
| 0.118036
| 0.071704
| 0
| 0.002356
| 0.295017
| 3,010
| 77
| 169
| 39.090909
| 0.852026
| 0.08804
| 0
| 0.039216
| 0
| 0
| 0.000751
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0.019608
| 0.176471
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47d7a401f53299346b73e5c7c5fe542392290c13
| 22,070
|
py
|
Python
|
progressbot.py
|
tchapley/ProgressBot
|
60837055999cbddcad637a514dc8af2e748374a8
|
[
"MIT"
] | null | null | null |
progressbot.py
|
tchapley/ProgressBot
|
60837055999cbddcad637a514dc8af2e748374a8
|
[
"MIT"
] | 2
|
2021-03-31T18:38:57.000Z
|
2021-12-13T19:46:50.000Z
|
progressbot.py
|
tchapley/ProgressBot
|
60837055999cbddcad637a514dc8af2e748374a8
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import asyncio
import logging
import sys
import requests
import datetime
from bs4 import BeautifulSoup
from util import *
from wowapi import WowApi, WowApiException, WowApiConfigException
from killpoints import KillPoints
from math import ceil
base_wow_progress = "http://www.wowprogress.com"
base_wow_armory = "http://us.battle.net/wow/en/character/{0}/{1}/advanced"
base_wc_logs = "https://www.warcraftlogs.com:443/v1"
class_array = [ "Warrior", "Paladin", "Hunter", "Rogue", "Priest", "Death Knight",
"Shaman", "Mage", "Warlock", "Monk", "Druid", "Demon Hunter" ]
race_map = {
1: "Human", 2: "Orc", 3: "Dwarf", 4: "Night Elf", 5: "Undead", 6: "Tauren", 7: "Gnome",
8: "Troll", 9: "Goblin", 10: "Blood Elf", 11: "Draenei", 22: "Worgen",
24:"Pandaren", 25:"Pandaren", 26:"Pandaren"
}
artifactLevelCost = {
1: { "cost": 100, "total": 100 },
2: { "cost": 300, "total": 400 },
3: { "cost": 325, "total": 725 },
4: { "cost": 350, "total": 1075 },
5: { "cost": 375, "total": 1450 },
6: { "cost": 400, "total": 1850 },
7: { "cost": 425, "total": 2275 },
8: { "cost": 450, "total": 3250 },
9: { "cost": 525, "total": 3875 },
10: { "cost": 625, "total": 4625 },
11: { "cost": 750, "total": 4625 },
12: { "cost": 875, "total": 5500 },
13: { "cost": 1000, "total": 6500 },
14: { "cost": 6840, "total": 13340 },
15: { "cost": 8830, "total": 22170 },
16: { "cost": 11280, "total": 33450 },
17: { "cost": 14400, "total": 47850 },
18: { "cost": 18620, "total": 66470 },
19: { "cost": 24000, "total": 90470 },
20: { "cost": 30600, "total": 121070 },
21: { "cost": 39520, "total": 160590 },
22: { "cost": 50880, "total": 211470 },
23: { "cost": 64800, "total": 276270 },
24: { "cost": 82500, "total": 358770 },
25: { "cost": 105280, "total": 464050 },
26: { "cost": 138650, "total": 602700 },
27: { "cost": 182780, "total": 785480 },
28: { "cost": 240870, "total": 1026350 },
29: { "cost": 315520, "total": 1341870 },
30: { "cost": 417560, "total": 1759430 },
31: { "cost": 546000, "total": 2305430 },
32: { "cost": 718200, "total": 3023630 },
33: { "cost": 946660, "total": 3970290 },
34: { "cost": 1245840, "total": 5216130 },
35: { "cost": 1635200, "total": 6851330 },
36: { "cost": 1915000, "total": 8766330 },
37: { "cost": 2010000, "total": 10776330 },
38: { "cost": 2110000, "total": 12886330 },
39: { "cost": 2215000, "total": 15101330 },
40: { "cost": 2325000, "total": 17426330 },
41: { "cost": 2440000, "total": 19866330 },
42: { "cost": 2560000, "total": 22426330 },
43: { "cost": 2690000, "total": 25116330 },
44: { "cost": 2825000, "total": 27941330 },
45: { "cost": 2965000, "total": 30906330 },
46: { "cost": 3115000, "total": 34021330 },
47: { "cost": 3270000, "total": 37291330 },
48: { "cost": 3435000, "total": 40726330 },
49: { "cost": 3605000, "total": 44331330 },
50: { "cost": 3785000, "total": 48116330 },
51: { "cost": 3975000, "total": 52091330 },
52: { "cost": 4175000, "total": 56266330 },
53: { "cost": 4385000, "total": 60651330 },
54: { "cost": 4605000, "total": 65256330 }
}
artifactKnowledge = {
0: 1,
1: 1.25,
2: 1.5,
3: 1.9,
4: 2.4,
5: 3,
6: 3.75,
7: 4.75,
8: 6,
9: 7.5,
10: 9.5,
11: 12,
12: 15,
13: 18.75,
14: 23.5,
15: 29.5,
16: 37,
17: 46.5,
18: 58,
19: 73,
20: 91,
21: 114,
22: 143,
23: 179,
24: 224,
25: 250
}
apRewards = {
"+2-3": 500,
"+4-6": 800,
"+7-9": 1000,
"10+": 1200,
}
set_wow_api_key()
set_wclogs_api_key()
# Logger info
discord_logger = logging.getLogger('discord')
discord_logger.setLevel(logging.CRITICAL)
bot = commands.Bot(command_prefix='!', description ='Progress Bot')
"""
Events Region
"""
@bot.event
async def on_ready():
print("Logged in as {0} with ID {1}".format(bot.user.name, bot.user.id));
@bot.command()
async def exit():
print('Exiting')
# await bot.say('This conversation can serve no purpose anymore. Goodbye.')
await bot.logout()
"""
Commands Region
"""
@bot.command()
async def ap(classes="", realm="connected-boulderfist", region="us"):
print("\n%s***COMMAND***: artifact power command with arguments class=%s realm=%s region=%s"%(get_current_time(),classes, realm, region))
url_class = ""
if classes:
if classes == "death_knight":
classes = string.replace(classes, "_", "")
url_class = "class." + classes
url = base_wow_progress + "/artifact_power/{0}/{1}/{2}".format(region, realm, url_class)
page = requests.get(url)
print("URL: {0} Status: {1}".format(url, page.status_code))
try:
soup = BeautifulSoup(page.content, "lxml", from_encoding="UTF")
table = soup.find("table").find_all("td")
values = []
for i in table:
values.append(i.get_text().encode("UTF"))
characters = []
for rank, name, guild, ap in zip(values[0::4], values[1::4], values[2::4], values[3::4]):
characters.append(ArtifactPower(rank.decode("unicode_escape"), name.decode("unicode_escape"), guild.decode("unicode_escape"), ap.decode("unicode_escape")))
headers = ['rank', 'name', 'guild', 'ap']
item_lens = [[getattr(character, x) for x in headers] for character in characters]
max_lens = [len(str(max(i, key=lambda x: len(str(x))))) for i in zip(*[headers] + item_lens)]
message = "```css\nArtifact power rankings for {0}-{1}".format(region, realm)
if classes:
message += " for " + classes + "s"
message += "\n"
for i in characters:
message += '\t'.join('{0:{width}}'.format(x, width=y) for x, y in zip([getattr(i, x) for x in headers], max_lens)) + "\n"
await bot.say("{0}\n```<{1}>".format(message, url))
except Exception as ex:
print(ex)
await bot.say("{0}\n<{1}>".format(str(ex), url))
@bot.command()
async def character(name="bresp", realm="boulderfist", region="us"):
print("\n%s***COMMAND***: character command with arguments name=%s realm=%s region=%s"%(get_current_time(), name, realm, region))
payload = ""
try:
payload = WowApi.get_character_profile(region, realm, name, locale="en_US", fields="achievements,items,statistics")
except WowApiException as ex:
print(ex)
await bot.say(str(ex))
return
playerName = payload['name']
level = payload['level']
race = race_map[payload['race']]
playerClass = class_array[payload['class']-1]
playerRealm = payload['realm']
battlegroup = payload['battlegroup']
itemLevel = payload['items']['averageItemLevelEquipped']
achievementPoints = payload['achievementPoints']
artifactPoints = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(30103)]
mainLevel = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(29395)]
knowledge = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(31466)]
lastModified = get_time(payload['lastModified'] / 1000)
fifteen = 0
ten = 0
five = 0
two = 0
if 32028 in payload['achievements']['criteria']:
fifteen = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(32028)]
if 33098 in payload['achievements']['criteria']:
ten += payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(33098)]
if 33097 in payload['achievements']['criteria']:
five += payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(33097)]
if 33096 in payload['achievements']['criteria']:
two += payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(33096)]
mythics = "Mythics: #fifteen: {0} #ten: {1} #five: {2} #two: {3}".format(fifteen, ten, five, two)
EN = []
TOV = []
NH = []
for x in payload['statistics']['subCategories']:
if x['name'] == "Dungeons & Raids":
for y in x['subCategories']:
if y['name'] == "Legion":
populate_raids(y, EN, 7, 33)
populate_raids(y, TOV, 3, 61)
populate_raids(y, NH, 10, 73)
en = get_difficulty(EN, 7)
tov = get_difficulty(TOV, 3)
nh = get_difficulty(NH, 10)
print("Looking for {0} on {1}-{2}".format(name, region, realm))
message = "**{0}** *{1} {2} {3}*\n".format(playerName, level, race, playerClass)
message += "```css\n"
message += "Realm: {0}\n".format(playerRealm)
message += "Battlegroup: {0}\n".format(battlegroup)
message += "Item Level: {0}\n".format(itemLevel)
message += "Achievement Points: {0}\n".format(achievementPoints)
message += "Artifact Power: {0}\n".format(artifactPoints)
message += "Artifact Knowledge: {0}\n".format(knowledge)
message += "Artifact Level: {0}\n".format(mainLevel)
message += "{0}\n".format(mythics)
message += "Raids:\n\tEmerald Nightmare: {0}\n\tTrial of Valor: {1}\n\tNighthold: {2}\n".format(en, tov, nh)
await bot.say("{0}```\nLast Updated: {1}\n<{2}>".format(message, lastModified, base_wow_armory.format(realm, playerName)))
@bot.command()
async def calc(name="bresp", realm="boulderfist", apInLevel=0, region="us"):
print("\n%s***COMMAND***: calc command with arguments name=%s realm=%s apInLevel=%s region=%s"%(get_current_time(), name, realm, apInLevel, region))
payload = ""
try:
payload = WowApi.get_character_profile(region, realm, name, locale="en_US", fields="achievements")
except WowApiException as ex:
print(ex)
await bot.say(str(ex))
return
playerName = payload['name']
mainLevel = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(29395)]
knowledge = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(31466)]
multiplier = artifactKnowledge[knowledge]
artifactPoints = (artifactLevelCost[mainLevel]['total']+apInLevel)
apToLevel = 0
apToMax = 0
if mainLevel < 54:
apToLevel = artifactLevelCost[mainLevel+1]['cost'] - apInLevel
apToMax = artifactLevelCost[54]['total'] - artifactPoints
apTo35 = 0
if mainLevel < 35:
apTo35 = artifactLevelCost[35]['total'] - artifactPoints
rows = []
for reward in apRewards:
scaledReward = apRewards[reward] * multiplier
toLevel = ceil(apToLevel / scaledReward)
to35 = ceil(apTo35 / scaledReward)
toMax = ceil(apToMax / scaledReward)
rows.append(Calc(reward, toLevel, to35, toMax))
# print("Looking for {0} on {1}-{2}".format(name, region, realm))
message = "```css\n"
message += "Total AP: {0}\n".format(artifactPoints)
message += "Artifact Level: {0}\n".format(mainLevel)
message += "Artifact Knowledge: {0}\n".format(knowledge)
message += "AP in level: {0}\n".format(apInLevel)
message += "AP to next level: {0}\n".format(apToLevel)
message += "AP to 54: {0}\n\n".format(apToMax)
headers = ['mythic', 'toNextLevel', 'to35', 'toMax']
item_lens = [[getattr(row, x) for x in headers] for row in rows]
max_lens = [len(str(max(i, key=lambda x: len(str(x))))) for i in zip(*[headers] + item_lens)]
message += "\t".join('{0:{width}}'.format(x, width=y) for x, y in zip(headers, max_lens)) + '\n'
for i in rows:
message += '\t'.join('{0:{width}}'.format(x, width=y) for x, y in zip([getattr(i, x) for x in headers], max_lens)) + "\n"
await bot.say("{0}\n```".format(message))
@bot.command()
async def guild(guild="dragon+knight", realm="boulderfist", region="us"):
print("\n%s***COMMAND***: guild command with arguments guild=%s realm=%s region=%s"%(get_current_time(), guild, realm, region))
guild = guild.replace("_", "+")
url = base_wow_progress + "/guild/{0}/{1}/{2}".format(region, realm, guild)
page = requests.get(url)
print("URL: {0} Status: {1}".format(url, page.status_code))
try:
soup = BeautifulSoup(page.content, "lxml", from_encoding="UTF")
progress = soup.find_all("span", class_="innerLink")
if not progress: raise ValueError("No progress found\n<{0}>".format(url))
print("Looking for %s on %s-%s"%(guild, region, realm))
message = "**{0}** *{1}*".format(guild.replace("+", " "), realm)
message += "```css\n"
for b in progress:
message += b.get_text()
await bot.say("{0}\n```<{1}>".format(message, url))
except Exception as ex:
print(str(ex))
await bot.say(str(ex))
@bot.command()
async def legendary(name="bresp", realm="boulderfist", region="us"):
print("\n%s***COMMAND***: legendary command with arguments name=%s realm=%s region=%s"%(get_current_time(),name, realm, region))
payload = ""
try:
payload = WowApi.get_character_profile(region, realm, name, locale="en_US", fields="achievements,progression")
except WowApiException as ex:
print(ex)
await bot.say(str(ex))
return
kp = KillPoints(payload)
killpoints = kp.get_total_points()
legendaries = kp.get_legendary_count(killpoints)
till_next = kp.get_points_till_next(killpoints)
percent_till_next = kp.get_percent_till_next()
message = "**{0}** has **{1}** kill points.\n".format(payload['name'], killpoints)
message += "They should have **{0} legendaries**\n".format(legendaries)
message += "They have **{0} points** until their next legendary\n".format(till_next)
message += "They have completed **{0}%** of the progress towards their next legendary".format(percent_till_next)
await bot.say(message)
@bot.command()
async def mounts(name="bresp", realm="boulderfist", mount="", region="us"):
print("\n%s***COMMAND***: mount command with arguments name=%s mount=%s realm=%s region=%s"%(get_current_time(), name, mount, realm, region))
payload = ""
try:
payload = WowApi.get_character_profile(region, realm, name, locale="en_US", fields="mounts")
except WowApiException as ex:
print(ex)
await bot.say(str(ex))
return
playerName = payload['name']
if not mount:
collected = payload['mounts']['numCollected']
await bot.say("**{0}** has collected **{1} mounts**".format(playerName, collected))
else:
mount.replace("\"", "")
for m in payload['mounts']['collected']:
if m['name'].lower() == mount.lower() :
await bot.say("**{0}** has collected **{1}**".format(playerName, m['name']))
return
else:
await bot.say("**{0}** has *not* collected **{1}**".format(playerName, mount))
@bot.command()
async def mp(classes="", realm="connected-boulderfist", region="us"):
print("\n%s***COMMAND***: mythic plus command with arguments class=%s realm=%s region=%s"%(get_current_time(),classes, realm, region))
url_class = ""
if classes:
if classes == "death_knight":
classes = classes.replace("_", "")
url_class = "class." + classes
url = base_wow_progress + "/mythic_plus_score/{0}/{1}/{2}".format(region, realm, url_class)
page = requests.get(url)
print("URL: {0} Status: {1}".format(url, page.status_code))
try:
soup = BeautifulSoup(page.content, "lxml", from_encoding="UTF")
table = soup.find("table").find_all("td")
values = []
for i in table:
values.append(i.get_text().encode("UTF"))
characters = []
for rank, name, guild, score in zip(values[0::4], values[1::4], values[2::4], values[3::4]):
characters.append(MythicPlus(rank.decode("unicode_escape"), name.decode("unicode_escape"), guild.decode("unicode_escape"), score.decode("unicode_escape")))
headers = ['rank', 'name', 'guild', 'score']
item_lens = [[getattr(character, x) for x in headers] for character in characters]
max_lens = [len(str(max(i, key=lambda x: len(str(x))))) for i in zip(*[headers] + item_lens)]
message = "```css\nMythic plus rankings for {0}-{1}".format(region, realm)
if classes:
message += " for " + classes + "s"
message += "\n"
for i in characters:
message += "\t".join('{0:{width}}'.format(x, width=y) for x, y in zip([getattr(i, x) for x in headers], max_lens)) + "\n"
await bot.say("{0}\n```<{1}>".format(message, url))
except Exception as ex:
print(ex)
await bot.say("{0}\n<{1}>".format(str(ex), url))
@bot.command()
async def pvp(name="", realm="boulderfist", region="us"):
print("\n%s***COMMAND***: pvp command with arguments name=%s realm=%s region=%s"%(get_current_time(), name, realm, region))
payload = ""
try:
payload = WowApi.get_character_profile(region, realm, name, locale="en_US", fields="pvp")
except WowApiException as ex:
print(ex)
await bot.say(str(ex))
return
playerName = payload['name']
level = payload['level']
race = race_map[payload['race']]
playerClass = class_array[payload['class']-1]
lastModified = get_time(payload['lastModified'] / 1000)
playerRealm = payload['realm']
battlegroup = payload['battlegroup']
honorableKills = payload['totalHonorableKills']
rbgRating = payload['pvp']['brackets']['ARENA_BRACKET_RBG']['rating']
twosRating = payload['pvp']['brackets']['ARENA_BRACKET_2v2']['rating']
threesRating = payload['pvp']['brackets']['ARENA_BRACKET_3v3']['rating']
message = "**{0}** *{1} {2} {3}*\n".format(playerName, level, race, playerClass)
message += "```css\n"
message += "Realm: {0}\n".format(playerRealm)
message += "Battlegroup: {0}\n".format(battlegroup)
message += "Honorable Kills: {0}\n".format(honorableKills)
message += "Rated BG Rating: {0}\n".format(rbgRating)
message += "Twos Rating: {0}\n".format(twosRating)
message += "Threes Rating: {0}\n".format(threesRating)
await bot.say("{0}```\nLast Updated: {1}\n<{2}>".format(message, lastModified, base_wow_armory.format(realm, playerName)))
@bot.command()
async def rank(name="", spec="", role="dps", realm="boulderfist", region="us"):
print("\n%s***COMMAND***: rank command with arguments name=%s spec=%s role=%s realm=%s region=%s"%(get_current_time(), name, spec, role, realm, region))
if not spec:
await bot.say("Please provide a spec to check ranks for")
return
if role not in [ 'dps', 'hps', 'krsi' ]:
await bot.say("Please provide a valid role. Your options are hps, dps, or krsi")
return
stats = {
5: { 'kills': 0, 'best': 0, 'average': 0, 'allstar_points': 0, 'size': 0},
4: { 'kills': 0, 'best': 0, 'average': 0, 'allstar_points': 0, 'size': 0},
3: { 'kills': 0, 'best': 0, 'average': 0, 'allstar_points': 0, 'size': 0}
}
character_id = ""
url = base_wc_logs + "/parses/character/{0}/{1}/{2}".format(name, realm, region)
page = requests.get(url, { 'metric': role, 'api_key': os.environ['WCLOG_APIKEY'] })
print("URL: {0} Status: {1}".format(url, page.status_code))
if page.status_code != 200:
await bot.say("No rankings found\n<{0}>".format(url))
return
else:
payload = page.json()
for i in payload:
difficulty = i['difficulty']
stats[difficulty]['size'] += 1
for j in range(0, len(i['specs'])):
character_id = i['specs'][j]['data'][0]['character_id']
if i['specs'][j]['spec'].lower() == spec.lower():
stats[difficulty]['kills'] += len(i['specs'][j]['data'])
historical_percent = i['specs'][j]['best_historical_percent']
if historical_percent > stats[difficulty]['best']:
stats[difficulty]['best'] = historical_percent
stats[difficulty]['average'] += historical_percent
stats[difficulty]['allstar_points'] += i['specs'][j]['best_allstar_points']
items = []
for key in stats:
difficulty = ""
if key == 5: difficulty = "Mythic"
elif key == 4: difficulty = "Heroic"
elif key == 3: difficulty = "Normal"
kills = stats[key]['kills']
best = stats[key]['best']
average = stats[key]['average']
size = stats[key]['size']
if size != 0:
average = round(average / size)
allstar_points = round(stats[key]['allstar_points'])
items.append(Rankings(difficulty, kills, best, average, allstar_points))
headers = ['difficulty', 'kills', 'best', 'average', 'allstar_points']
item_lens = [[getattr(item, x) for x in headers] for item in items]
max_lens = [len(str(max(i, key=lambda x: len(str(x))))) for i in zip(*[headers] + item_lens)]
message = "```css\nLatest rankings for {0} (spec={1} role={2}) on {3}-{4}\n".format(name, spec, role, region, realm)
message += "\t".join('{0:{width}}'.format(x, width=y) for x, y in zip(headers, max_lens)) + '\n'
for i in items:
message += "\t".join('{0:{width}}'.format(x, width=y) for x, y in zip([getattr(i, x) for x in headers], max_lens)) + "\n"
url = "https://www.warcraftlogs.com/rankings/character/{0}/latest".format(character_id)
await bot.say("{0}\n```<{1}>".format(message, url))
@bot.command()
async def realm(realm="connected-boulderfist", region="us"):
print("\n%s***COMMAND***: realm command with arguments realm=%s region=%s"%(get_current_time(), realm, region))
url = base_wow_progress + "/pve/{0}/{1}".format(region, realm)
page = requests.get(url)
print("URL: {0} Status: {1}".format(url, page.status_code))
try:
soup = BeautifulSoup(page.content, "lxml", from_encoding="UTF")
guilds = soup.find_all("a", class_="guild")
ranks = soup.find_all("span", class_="rank")
progress = soup.find_all("span", class_="ratingProgress")
items = []
for i in range(0, len(guilds)):
items.append(GuildProgress(i+1, guilds[i].get_text(), ranks[i].get_text(), progress[i].get_text()))
headers = ['rank', 'name', 'world', 'progress']
item_lens = [[getattr(guild, x) for x in headers] for guild in items]
max_lens = [len(str(max(i, key=lambda x: len(str(x))))) for i in zip(*[headers] + item_lens)]
message = "```css\nGuild progress rankings for {0}-{1}\n".format(region, realm)
for i in items:
message += '\t'.join('{0:{width}}'.format(x, width=y) for x, y in zip([getattr(i, x) for x in headers], max_lens)) + "\n"
await bot.say("{0}\n```<{1}>".format(message, url))
except Exception as ex:
print(ex)
await bot.say("{0}\n<{1}>".format(str(ex), url))
@bot.command()
async def whoisyourmaster():
await bot.reply("you are")
bot.run('MjczNTgyNTAwNTk1MzY3OTM2.C2lq3A.imEczu1BMAqrOYJfZEBTPJavOvc')
| 37.343486
| 161
| 0.631899
| 2,964
| 22,070
| 4.640014
| 0.180162
| 0.004363
| 0.019996
| 0.012216
| 0.50629
| 0.483458
| 0.44354
| 0.405657
| 0.375191
| 0.364939
| 0
| 0.064321
| 0.168056
| 22,070
| 590
| 162
| 37.40678
| 0.684712
| 0.006751
| 0
| 0.340381
| 0
| 0.038055
| 0.249634
| 0.014085
| 0.002114
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02537
| 0
| 0.044397
| 0.059197
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47d9292775bb73955a326acc7b317a3683aeeec2
| 10,974
|
py
|
Python
|
python_poc/adapters/fingrid_api_adapter.py
|
pervcomp/Procem
|
6cefbf6c81b51af948feb9510d39820f8e6f113e
|
[
"MIT"
] | 1
|
2019-01-09T14:38:44.000Z
|
2019-01-09T14:38:44.000Z
|
python_poc/adapters/fingrid_api_adapter.py
|
pervcomp/Procem
|
6cefbf6c81b51af948feb9510d39820f8e6f113e
|
[
"MIT"
] | 4
|
2021-03-09T00:03:21.000Z
|
2022-02-12T05:33:21.000Z
|
python_poc/adapters/fingrid_api_adapter.py
|
pervcomp/Procem
|
6cefbf6c81b51af948feb9510d39820f8e6f113e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Module for reading and parsing values from Fingrid APIs."""
# Copyright (c) TUT Tampere University of Technology 2015-2018.
# This software has been developed in Procem-project funded by Business Finland.
# This code is licensed under the MIT license.
# See the LICENSE.txt in the project root for the license terms.
#
# Main author(s): Ville Heikkila, Otto Hylli, Pekka Itavuo,
# Teemu Laukkarinen ja Ulla-Talvikki Virta
import copy
import csv
import datetime
import json
import requests
import time
try:
import adapters.common_utils as common_utils
import adapters.rest_utils as rest_utils
except:
# used when running the module directly
import common_utils
import rest_utils
class FingridCollection:
"""Class for holding a collection of Fingrid reader/handler objects."""
def __init__(self, params, data_queue):
# always wait at least this long before making a new query
self.__min_waiting_time = params.get("min_waiting_time_s", 10)
self.__fingrids = [] # the Fingrid objects
self.__times = [] # the calculated waiting times for each Fingrid objects until next read should be done
self.__last_check = time.time() # the time in which the last API check was done
self.__data_queue = data_queue # the queue which is used to send the received data to Procem RTL handler
self.createFingrids(params)
def createFingrids(self, params):
"""Create the Fingrid objects for the collection according to the given parameters."""
csv_filename = params.get("csv_filename", "")
config = params.get("config", {})
csv_header = config.get("csv_header", {})
rtl_id_field = csv_header.get("rtl_id", "rtl_id")
variable_id_field = csv_header.get("variable_id", "variable_id")
datatype_field = csv_header.get("datatype", "datatype")
unit_field = csv_header.get("unit", "unit")
query_interval_field = csv_header.get("query_interval", "query_interval")
query_interval_min_field = csv_header.get("query_interval_min", "query_interval_min")
store_interval_field = csv_header.get("store_interval", "store_interval")
is_prediction_field = csv_header.get("is_prediction", "is_prediction")
prediction_length_field = csv_header.get("prediction_length", "prediction_length")
name_field = csv_header.get("name", "name")
path_field = csv_header.get("path", "path")
confidential_field = csv_header.get("confidential", "confidential")
try:
with open(csv_filename, mode="r") as csv_file:
reader = csv.DictReader(csv_file, delimiter=";")
for row in reader:
new_params = copy.deepcopy(params)
new_params["rtl_id"] = int(row.get(rtl_id_field, 0))
new_params["id"] = int(row.get(variable_id_field, 0))
new_params["datatype"] = row.get(datatype_field, "float")
new_params["unit"] = row.get(unit_field, "")
new_params["time_interval_s"] = int(row.get(query_interval_field, 3600))
new_params["time_interval_min_s"] = int(row.get(query_interval_min_field, 60))
new_params["iot_ticket_name"] = row.get(name_field, "")
new_params["iot_ticket_path"] = row.get(path_field, "/Fingrid")
new_params["confidential"] = row.get(confidential_field, "") != ""
store_interval = row.get(store_interval_field, "")
if store_interval != "":
new_params["store_interval"] = int(store_interval)
is_prediction = row.get(is_prediction_field, "") != ""
if is_prediction:
new_params["is_prediction"] = is_prediction
new_params["prediction_length_s"] = int(row.get(prediction_length_field, 0))
self.__fingrids.append(Fingrid(new_params, self.__data_queue))
self.__times.append(None)
except:
pass
def getData(self):
"""Tries to get new data from the Fingrid APIs. If new data is found, it is send to the Procem RTL handler and
the function returns True. Otherwise, the function returns False."""
time_diff = time.time() - self.__last_check
success = []
for index, (fingrid, waiting_time) in enumerate(zip(self.__fingrids, self.__times)):
if waiting_time is None:
self.__times[index] = fingrid.getWaitingTime() + time_diff
continue
elif waiting_time <= time_diff:
success.append(fingrid.getData())
self.__times[index] = None
if success.count(True) > 0:
# put empty item to the queue as a mark that the buffer should be emptied
self.__data_queue.put(bytes())
return True
else:
return False
def getWaitingTime(self):
"""Returns the time in seconds that should be waited before making the next data query."""
current_time = time.time()
time_diff = current_time - self.__last_check
for index, (fingrid, waiting_time) in enumerate(zip(self.__fingrids, self.__times)):
if waiting_time is None:
self.__times[index] = fingrid.getWaitingTime()
else:
self.__times[index] = max(waiting_time - time_diff, 0.0)
min_waiting_time = min(self.__times)
self.__last_check = current_time
return max(min_waiting_time, self.__min_waiting_time)
class Fingrid:
"""Class for holding a single Fingrid API reader/handler."""
def __init__(self, params, data_queue):
self.__config = params.get("config", {})
self.__variable_id = int(params.get("id", 0))
self.__rtl_id = int(params.get("rtl_id", 0))
self.__unit = params.get("unit", "")
self.__datatype = params.get("datatype", "float")
self.__path = params.get("iot_ticket_path", "/Fingrid")
self.__name = params.get("iot_ticket_name", "")
self.__confidential = params.get("confidential", False)
self.__last_update = None # the timestamp for the latest query time
self.__last_value_dt = None # the datetime for the latest received value
self.__time_interval = params.get("time_interval_s", 3600)
self.__time_interval_min = params.get("time_interval_min_s", 60)
self.__store_interval = params.get("store_interval", 0)
self.__is_prediction = params.get("is_prediction", False)
self.__prediction_length = params.get("prediction_length_s", 0)
self.__data_queue = data_queue
def getStartTime(self):
"""Calculates and returns the start time as a timestamp for the next API query."""
if self.__store_interval > 0:
if self.__last_value_dt is not None and not self.__is_prediction:
return (self.__last_value_dt + datetime.timedelta(seconds=self.__store_interval)).timestamp()
dt_now = datetime.datetime.now().replace(microsecond=0)
if self.__last_update is None:
dt_now -= datetime.timedelta(seconds=self.__time_interval)
day_start = dt_now.replace(hour=0, minute=0, second=0)
seconds_back = int((dt_now - day_start).total_seconds()) % self.__store_interval
dt_start = dt_now - datetime.timedelta(seconds=seconds_back)
return dt_start.timestamp()
else:
if self.__last_update is None:
return time.time() - self.__time_interval
elif self.__is_prediction:
return time.time()
else:
return self.__last_update + 1
def getData(self):
"""Tries to get new data from the Fingrid API. If new data is found, it is send to the Procem RTL handler and
the function returns True. Otherwise, the function returns False."""
try:
starttime = self.getStartTime()
if self.__is_prediction:
endtime = time.time() + self.__prediction_length
else:
endtime = time.time()
# get the response from the API
kwargs = {
"config": self.__config,
"variable_id": self.__variable_id,
"start_time": starttime,
"end_time": endtime
}
req = rest_utils.runAPIQuery(**kwargs)
if req.status_code != rest_utils.STATUS_OK:
print(common_utils.getTimeString(), "Fingrid, received status code:", req.status_code,
"for variable", self.__variable_id)
return False
result_datetime_format = self.__config["result_datetime_format"]
data = json.loads(req.text)
values = []
first_dt = None
if self.__is_prediction:
self.__last_value_dt = None
for item in data:
v = item["value"]
time_str = item["start_time"]
dt = datetime.datetime.strptime(time_str, result_datetime_format)
if (self.__last_value_dt is not None and
(dt - self.__last_value_dt).total_seconds() < self.__store_interval):
continue
else:
self.__last_value_dt = dt
if first_dt is None:
first_dt = dt
ts = int(dt.timestamp() * 1000)
values.append({"v": v, "ts": ts})
if len(values) == 0:
return False
self.sendDataToProcem(values)
self.__last_update = time.time()
return True
except Exception as error:
print(common_utils.getTimeString(), "Fingrid,", error)
return False
def getWaitingTime(self):
"""Returns the time in seconds that should be waited before making the next data query."""
if self.__last_update is None:
return self.__time_interval_min / 2
else:
return max(
self.__time_interval_min,
self.__time_interval - (time.time() - self.__last_update))
def sendDataToProcem(self, values):
"""Sends the data to Procem RTL handler."""
rtl_id = self.__rtl_id
unit = self.__unit
datatype = self.__datatype
name = self.__name
path = self.__path
confidential = self.__confidential
for item in values:
v = item["v"]
ts = item["ts"]
pkt_str = common_utils.getProcemRTLpkt(name, path, v, ts, unit, datatype, rtl_id, confidential)
packet = bytes(pkt_str, "utf-8")
self.__data_queue.put(packet)
| 43.896
| 118
| 0.609714
| 1,323
| 10,974
| 4.745276
| 0.185941
| 0.022937
| 0.02676
| 0.032494
| 0.229213
| 0.162472
| 0.133801
| 0.124881
| 0.124881
| 0.115642
| 0
| 0.005949
| 0.295334
| 10,974
| 249
| 119
| 44.072289
| 0.805897
| 0.162202
| 0
| 0.207447
| 0
| 0
| 0.082052
| 0.002417
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047872
| false
| 0.005319
| 0.053191
| 0
| 0.18617
| 0.010638
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47e1291a9d383474886f3b6cb416cfcb840ff9bb
| 1,514
|
py
|
Python
|
containers/ice_block.py
|
craigtmoore/freezer_escape_room
|
813144641c079db9ab73c873e354ffc57200a3dd
|
[
"MIT"
] | null | null | null |
containers/ice_block.py
|
craigtmoore/freezer_escape_room
|
813144641c079db9ab73c873e354ffc57200a3dd
|
[
"MIT"
] | null | null | null |
containers/ice_block.py
|
craigtmoore/freezer_escape_room
|
813144641c079db9ab73c873e354ffc57200a3dd
|
[
"MIT"
] | null | null | null |
from typing import Set, List
from inspectable import Inspectable
from interactable import Interactable
from items import Batteries, Hammer
from usable import Usable
class IceBlock(Inspectable, Interactable):
def __init__(self):
super().__init__()
self.name = 'ice block'
self.description = 'A large block of ice'
self.is_broken = False
def inspect(self) -> Set:
if self.is_broken:
print('Pieces of ice litter the floor where you shattered it earlier.')
else:
print('You look closely at the ice and see a pair of batteries are frozen inside.')
print('You attempt to smash the ice on the floor to get them out, but it is too ')
print('solid to break that way, you\'ll need something to smash it')
return set()
def interact(self, usable: Usable) -> List:
found_items = []
if self.is_broken:
print(f'You attempt to use the {usable.name} on pieces of ice and start to wonder if you\'re going crazy')
elif isinstance(usable, Hammer):
print('You smash the ice with the hammer until it shatters and the batteries fall to the floor.')
print('You collect the batteries and put them in your pocket.')
found_items.append(Batteries())
else:
print(f'You attempt to use the {usable.name} on the block of ice, but it does not look like it will work '
f'so you stop.')
return found_items
| 36.926829
| 118
| 0.640687
| 218
| 1,514
| 4.385321
| 0.431193
| 0.020921
| 0.037657
| 0.029289
| 0.109833
| 0.075314
| 0.075314
| 0.075314
| 0.075314
| 0.075314
| 0
| 0
| 0.287979
| 1,514
| 40
| 119
| 37.85
| 0.886827
| 0
| 0
| 0.129032
| 0
| 0.032258
| 0.395641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.16129
| 0
| 0.354839
| 0.258065
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47e169f6fbed0c98822c2408dc1e36d39f35b41d
| 463
|
py
|
Python
|
scripts/make_json_dataset.py
|
sethah/deeptennis
|
a689c5f1d6f5ff1d665aec99b8db6262d3442c3a
|
[
"MIT"
] | 27
|
2018-11-23T21:37:14.000Z
|
2021-11-22T08:44:35.000Z
|
scripts/make_json_dataset.py
|
sethah/deeptennis
|
a689c5f1d6f5ff1d665aec99b8db6262d3442c3a
|
[
"MIT"
] | 6
|
2019-07-09T16:26:56.000Z
|
2021-05-17T17:29:42.000Z
|
scripts/make_json_dataset.py
|
sethah/deeptennis
|
a689c5f1d6f5ff1d665aec99b8db6262d3442c3a
|
[
"MIT"
] | 4
|
2019-06-11T06:44:30.000Z
|
2021-02-27T14:49:02.000Z
|
import argparse
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--frames-path", type=str)
parser.add_argument("--output-path", type=str)
args = parser.parse_args()
frame_paths = [p for p in Path(args.frames_path).iterdir()]
with open(args.output_path, "w") as f:
for p in sorted(Path(args.frames_path).iterdir()):
f.write('{"image_path": "%s"}\n' % str(p))
| 33.071429
| 63
| 0.652268
| 66
| 463
| 4.333333
| 0.515152
| 0.104895
| 0.118881
| 0.125874
| 0.174825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185745
| 463
| 13
| 64
| 35.615385
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0.12311
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47e20a2e721763c69d92b54d367291736f3e69c7
| 26,979
|
py
|
Python
|
app/document/routes.py
|
DCGM/pero_ocr_web
|
e901027712827278f9ace914f6ccba16d3ac280f
|
[
"BSD-2-Clause"
] | 2
|
2020-05-07T13:58:31.000Z
|
2021-01-27T09:33:07.000Z
|
app/document/routes.py
|
DCGM/pero_ocr_web
|
e901027712827278f9ace914f6ccba16d3ac280f
|
[
"BSD-2-Clause"
] | 47
|
2019-09-17T19:20:07.000Z
|
2022-03-20T12:33:28.000Z
|
app/document/routes.py
|
DCGM/pero_ocr_web
|
e901027712827278f9ace914f6ccba16d3ac280f
|
[
"BSD-2-Clause"
] | 1
|
2019-10-02T10:42:35.000Z
|
2019-10-02T10:42:35.000Z
|
import _thread
import sqlalchemy
from app.document import bp
from flask_login import login_required, current_user
from flask import render_template, redirect, url_for, request, send_file, flash, jsonify
from flask import current_app
from app.document.general import create_document, check_and_remove_document, save_image, \
get_collaborators_select_data, save_collaborators, is_document_owner, is_user_owner_or_collaborator,\
remove_image, get_document_images, get_page_layout, get_page_layout_text, update_confidences, is_user_trusted,\
is_granted_acces_for_page, is_granted_acces_for_document, get_line_image_by_id, get_sucpect_lines_ids, \
compute_scores_of_doc, skip_textline, get_line, is_granted_acces_for_line, create_string_response, \
update_baselines, make_image_preview, find_textlines, get_documents_with_granted_acces, \
check_and_change_public_document, is_document_public
from werkzeug.exceptions import NotFound
from app.db.general import get_requests
from app.db.general import get_user_documents, get_document_by_id, get_user_by_email, get_all_documents,\
get_previews_for_documents, get_image_by_id, get_public_documents
from app.document.forms import CreateDocumentForm
from app.document.annotation_statistics import get_document_annotation_statistics, get_user_annotation_statistics, get_document_annotation_statistics_by_day
from io import BytesIO
import dateutil.parser
import zipfile
import time
import os
import json
import re
from natsort import natsorted
@bp.route('/documents')
@login_required
def documents():
if is_user_trusted(current_user):
user_documents = get_all_documents()
else:
user_documents = get_user_documents(current_user)
user_documents = sorted(user_documents, key=lambda x: x.created_date)[::-1]
document_ids = [d.id for d in user_documents]
previews = dict([(im.document_id, im) for im in get_previews_for_documents(document_ids)])
for d in user_documents:
if d.id not in previews:
previews[d.id] = ""
return render_template('document/documents.html', documents=user_documents, previews=previews)
@bp.route('/public_documents')
def public_documents():
db_documents = get_public_documents()
document_ids = [d.id for d in db_documents]
previews = dict([(im.document_id, im) for im in get_previews_for_documents(document_ids)])
for d in db_documents:
if d.id not in previews:
previews[d.id] = ""
return render_template('document/public_documents.html', documents=db_documents, previews=previews)
@bp.route('/annotation_statistics/<string:document_id>')
@login_required
def annotation_statistics(document_id):
if not (is_user_owner_or_collaborator(document_id, current_user) or is_user_trusted(current_user)):
flash(u'You do not have sufficient rights to view statistics for this document!', 'danger')
return redirect(url_for('main.index'))
document = get_document_by_id(document_id)
statistics = get_document_annotation_statistics(document)
return render_template('document/annotation_statistics.html', statistics=statistics, header_name=document.name)
@bp.route('/annotation_statistics')
@login_required
def annotation_statistics_global():
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to view global statistics!', 'danger')
return redirect(url_for('main.index'))
statistics = get_document_annotation_statistics()
return render_template('document/annotation_statistics.html', statistics=statistics, header_name='All documents')
@bp.route('/user_annotation_statistics/<string:user_email>')
@login_required
def user_annotation_statistics(user_email):
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to view statistics for other users!', 'danger')
return redirect(url_for('main.index'))
user = get_user_by_email(user_email)
statistics = get_user_annotation_statistics(user)
return render_template('document/annotation_statistics.html',
statistics=statistics, header_name=f'{user.first_name} {user.last_name}')
@bp.route('/user_annotation_statistics')
@login_required
def user_annotation_statistics_current_user():
statistics = get_user_annotation_statistics(current_user)
return render_template('document/annotation_statistics.html',
statistics=statistics, header_name=f'{current_user.first_name} {current_user.last_name}')
@bp.route('/user_annotation_statistics_global')
@login_required
def user_annotation_statistics_global():
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to view statistics for other users!', 'danger')
return redirect(url_for('main.index'))
statistics = get_user_annotation_statistics()
return render_template('document/annotation_statistics.html', statistics=statistics, header_name='All users')
@bp.route('/requests')
@login_required
def requests():
if is_user_trusted(current_user):
user_documents = get_all_documents()
else:
user_documents = get_user_documents(current_user)
document_ids = [d.id for d in user_documents]
requests = get_requests(document_ids)
return render_template('requests/request_list.html', requests=requests)
@bp.route('/document_history/<string:document_id>')
@login_required
def document_history(document_id):
if not (is_user_owner_or_collaborator(document_id, current_user) or is_user_trusted(current_user)):
flash(u'You do not have sufficient rights to view statistics for this document!', 'danger')
return redirect(url_for('main.index'))
db_requests = get_requests(document_ids=[document_id])
db_document = get_document_by_id(document_id)
ann_stats = get_document_annotation_statistics_by_day(db_document.id)
import altair as alt
data = [{'x': str(date), 'y': count, 'u': f'{user1} {user2}'} for date, user1, user2, count in ann_stats]
data = alt.Data(values=data)
chart = alt.Chart(data).mark_bar().encode(
x='x:T', # specify ordinal data
y='sum(y):Q', # specify quantitative data
color='u:N'
).properties(width='container', height=300)
return render_template('document/document_history.html',
requests=db_requests, document=db_document, graph_json=chart.to_json(indent=0))
@bp.route('/new_document', methods=['GET', 'POST'])
@login_required
def new_document():
form = CreateDocumentForm()
if form.validate_on_submit():
document = create_document(form.document_name.data, current_user)
flash(u'Document successfully created!', 'success')
return redirect(url_for('document.upload_images_to_document', document_id=document.id))
else:
return render_template('document/new_document.html', form=form)
@bp.route('/make_public/<string:document_id>')
@login_required
def make_public(document_id):
document_name = check_and_change_public_document(document_id, current_user, True)
if document_name:
flash(f'Document "{document_name}" in now public!', 'success')
return document_id
else:
flash(u'You do not have sufficient rights to make this document public!', 'danger')
return None
@bp.route('/make_private/<string:document_id>')
@login_required
def make_private(document_id):
document_name = check_and_change_public_document(document_id, current_user, False)
if document_name:
flash(f'Document "{document_name}" in now private!', 'success')
return document_id
else:
flash(u'You do not have sufficient rights to make this document public!', 'danger')
return None
@bp.route('/delete_document/<string:document_id>')
@login_required
def delete_document(document_id):
if check_and_remove_document(document_id, current_user):
flash(u'Document successfully deleted!', 'success')
return document_id
else:
flash(u'You do not have sufficient rights to remove this document!', 'danger')
return None
@bp.route('/upload_images_to_document/<string:document_id>', methods=['GET'])
@login_required
def upload_images_to_document(document_id):
if not is_user_owner_or_collaborator(document_id, current_user):
flash(u'You do not have sufficient rights to upload images!', 'danger')
return redirect(url_for('main.index'))
document = get_document_by_id(document_id)
images = get_document_images(document)
return render_template('document/upload_images_to_document.html', document=document, images=images)
@bp.route('/upload_image_to_document/<string:document_id>', methods=['POST'])
@login_required
def upload_image_to_document(document_id):
if not is_user_owner_or_collaborator(document_id, current_user):
flash(u'You do not have sufficient rights to upload images!', 'danger')
return '', 404
if request.method == 'POST':
f = request.files.get('file')
status = save_image(f, document_id)
if status == '':
return '', 200
return status, 409
def image_preview(image_id=None, public_access=False):
if image_id is None:
return send_file('static/img/missing_page.png', cache_timeout=10000000)
try:
db_image = get_image_by_id(image_id)
except (sqlalchemy.exc.StatementError, sqlalchemy.orm.exc.NoResultFound):
return "Image does not exist.", 404
document_id = db_image.document_id
if public_access:
db_document = get_document_by_id(db_image.document_id)
if not db_document.is_public:
return send_file('static/img/missing_page.png', cache_timeout=10000000)
else:
if not is_granted_acces_for_document(document_id, current_user):
return send_file('static/img/missing_page.png', cache_timeout=10000000)
image_preview_path = os.path.join(current_app.config['PREVIEW_IMAGES_FOLDER'], str(document_id), str(image_id) + '.jpg')
if not os.path.isfile(image_preview_path):
make_image_preview(db_image)
return send_file(image_preview_path, cache_timeout=0)
@bp.route('/get_image_preview/<string:image_id>')
@bp.route('/get_image_preview/')
@login_required
def get_image_preview(image_id=None):
return image_preview(image_id=image_id, public_access=False)
@bp.route('/get_public_image_preview/<string:image_id>')
@bp.route('/get_public_image_preview/')
def get_public_image_preview(image_id=None):
return image_preview(image_id=image_id, public_access=True)
@bp.route('/get_document_image_ids/<string:document_id>')
@login_required
def get_document_image_ids(document_id):
if not is_granted_acces_for_document(document_id, current_user):
flash(u'You do not have sufficient rights to document!', 'danger')
return redirect(url_for('main.index'))
document = get_document_by_id(document_id)
images = natsorted(get_document_images(document).all(), key=lambda x: x.filename)
return jsonify([str(x.id) for x in images])
@bp.route('/get_page_xml_regions/<string:image_id>')
@login_required
def get_page_xml_regions(image_id):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_granted_acces_for_page(image_id, current_user):
flash(u'You do not have sufficient rights to download regions!', 'danger')
return redirect(url_for('main.index'))
page_layout = get_page_layout(db_image, only_regions=True)
filename = "{}.xml".format(os.path.splitext(page_layout.id)[0])
return create_string_response(filename, page_layout.to_pagexml_string(), minetype='text/xml')
@bp.route('/get_page_xml_lines/<string:image_id>')
@login_required
def get_page_xml_lines(image_id):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_granted_acces_for_page(image_id, current_user):
flash(u'You do not have sufficient rights to download xml!', 'danger')
return redirect(url_for('main.index'))
page_layout = get_page_layout(db_image, only_regions=False)
filename = "{}.xml".format(os.path.splitext(page_layout.id)[0])
return create_string_response(filename, page_layout.to_pagexml_string(), minetype='text/xml')
@bp.route('/get_annotated_page_xml_lines/<string:image_id>')
@bp.route('/get_annotated_page_xml_lines/<string:image_id>/<string:from_time>/')
@login_required
def get_annotated_page_xml_lines(image_id, from_time=None):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_granted_acces_for_page(image_id, current_user):
flash(u'You do not have sufficient rights to download xml!', 'danger')
return redirect(url_for('main.index'))
if from_time:
try:
from_time = dateutil.parser.parse(from_time)
except:
return 'ERROR: Could not parse from_time argument.', 400
page_layout = get_page_layout(db_image, only_regions=False, only_annotated=True, from_time=from_time,
active_ignoring=True)
filename = "{}.xml".format(os.path.splitext(page_layout.id)[0])
return create_string_response(filename, page_layout.to_pagexml_string(), minetype='text/xml')
@bp.route('/get_alto_xml/<string:image_id>')
@login_required
def get_alto_xml(image_id):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_granted_acces_for_page(image_id, current_user):
flash(u'You do not have sufficient rights to download alto!', 'danger')
return redirect(url_for('main.index'))
page_layout = get_page_layout(db_image, only_regions=False, only_annotated=False, alto=True)
filename = "{}.xml".format(os.path.splitext(page_layout.id)[0])
return create_string_response(filename, page_layout.to_altoxml_string(page_uuid=image_id), minetype='text/xml')
@bp.route('/get_text/<string:image_id>')
@login_required
def get_text(image_id):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_granted_acces_for_page(image_id, current_user):
flash(u'You do not have sufficient rights to download text!', 'danger')
return redirect(url_for('main.index'))
page_layout = get_page_layout(db_image, only_regions=False, only_annotated=False)
file_name = "{}.txt".format(os.path.splitext(page_layout.id)[0])
return create_string_response(file_name, get_page_layout_text(page_layout), minetype='text/plain')
def get_image_common(image_id, public=False):
try:
image_db = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if public:
if not image_db.document.is_public:
return "Image is not public.", 403
elif not is_granted_acces_for_page(image_id, current_user):
return "You do not have access to the requested images.", 403
image_path = os.path.join(current_app.config['UPLOADED_IMAGES_FOLDER'], image_db.path)
if not os.path.isfile(image_path):
print("ERROR: Could not find image on disk. image id: {}, image path: {}.".format(image_id, image_path))
raise NotFound()
return send_file(image_path, as_attachment=True, attachment_filename=image_db.filename, cache_timeout=10000000)
@bp.route('/get_image/<string:image_id>')
@login_required
def get_image(image_id):
return get_image_common(image_id, False)
@bp.route('/get_public_image/<string:image_id>')
def get_public_image(image_id):
return get_image_common(image_id, True)
@bp.route('/download_document_pages/<string:document_id>')
@login_required
def get_document_pages(document_id):
if not is_granted_acces_for_document(document_id, current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
memory_file = BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
document = get_document_by_id(document_id)
for image in document.images:
page_layout = get_page_layout(image, only_regions=False, only_annotated=False)
page_string = page_layout.to_pagexml_string()
text_string = get_page_layout_text(page_layout)
d_page = zipfile.ZipInfo("{}.xml".format(os.path.splitext(page_layout.id)[0]))
d_page.date_time = time.localtime(time.time())[:6]
d_page.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(d_page, page_string)
d_text = zipfile.ZipInfo("{}.txt".format(os.path.splitext(page_layout.id)[0]))
d_text.date_time = time.localtime(time.time())[:6]
d_text.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(d_text, text_string)
memory_file.seek(0)
return send_file(memory_file, attachment_filename='pages.zip', as_attachment=True)
@bp.route('/get_document_annotated_pages/<string:document_id>')
@bp.route('/download_document_annotated_pages/<string:document_id>')
@login_required
def get_document_annotated_pages(document_id):
if not is_granted_acces_for_document(document_id, current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
memory_file = BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
document = get_document_by_id(document_id)
for image in document.images:
page_layout = get_page_layout(image, only_regions=False, only_annotated=True)
xml_string = page_layout.to_pagexml_string()
d_XML = zipfile.ZipInfo("{}.xml".format(os.path.splitext(page_layout.id)[0]))
d_XML.date_time = time.localtime(time.time())[:6]
d_XML.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(d_XML, xml_string)
memory_file.seek(0)
return send_file(memory_file, attachment_filename='pages.zip', as_attachment=True)
@bp.route('/remove_image/<string:document_id>/<string:image_id>')
@login_required
def remove_image_get(document_id, image_id):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_user_owner_or_collaborator(document_id, current_user):
flash(u'You do not have sufficient rights to get this image!', 'danger')
return redirect(url_for('main.index'))
if remove_image(document_id, image_id):
flash(u'Image successfully removed!', 'success')
return redirect(url_for('document.upload_images_to_document', document_id=document_id))
@bp.route('/collaborators/<string:document_id>', methods=['GET'])
@login_required
def collaborators_get(document_id):
if not is_document_owner(document_id, current_user) and not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to edit collaborators!', 'danger')
return redirect(url_for('main.index'))
else:
document = get_document_by_id(document_id)
collaborators = get_collaborators_select_data(document)
reg = re.compile('@.*')
for collaborator in collaborators:
collaborator.email_an = re.sub(reg, '@...', collaborator.user.email)
return render_template('document/edit_collaborators.html', document=document, collaborators=collaborators)
@bp.route('/collaborators/<string:document_id>', methods=['POST'])
@login_required
def collaborators_post(document_id):
collaborators_ids = request.form.getlist('collaborators')
if not is_document_owner(document_id, current_user) and not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to edit collaborators!', 'danger')
return redirect(url_for('main.index'))
else:
save_collaborators(document_id, collaborators_ids)
flash(u'Collaborators saved successfully.', 'success')
return redirect(url_for('document.collaborators_get', document_id=document_id))
@bp.route('/get_keyboard', methods=['GET'])
@login_required
def get_keyboard():
keyboard_dict = {}
for keyboard_layout in os.listdir(current_app.config['KEYBOARD_FOLDER']):
keyboard_layout_name = os.path.splitext(keyboard_layout)[0]
keyboard_layout_path = os.path.join(current_app.config['KEYBOARD_FOLDER'], keyboard_layout)
with open(keyboard_layout_path) as f:
keyboard_dict[keyboard_layout_name] = json.load(f)
return jsonify(keyboard_dict)
@bp.route('/update_confidences', methods=['POST'])
@login_required
def update_all_confidences():
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
file = request.files['data']
content = file.read()
changes = json.loads(content)
update_confidences(changes)
return redirect(url_for('document.documents'))
@bp.route('/update_baselines', methods=['POST'])
@login_required
def update_all_baselines():
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
file = request.files['data']
content = file.read()
changes = json.loads(content)
update_baselines(changes)
return redirect(url_for('document.documents'))
@bp.route('/lines_check', methods=['GET', 'POST'])
@bp.route('/lines_check/<string:document_id>', methods=['GET', 'POST'])
@login_required
def lines_check(document_id=None):
if is_user_trusted(current_user):
user_documents = get_all_documents()
else:
user_documents = get_user_documents(current_user)
selected = [False for _ in user_documents]
if document_id is not None:
for i, document in enumerate(user_documents):
if document_id == str(document.id):
selected[i] = True
if request.method == 'POST':
selected = [False for _ in user_documents]
document_ids = request.form.getlist('documents')
for i, document in enumerate(user_documents):
if document_ids != []:
if str(document.id) in document_ids:
selected[i] = True
return render_template('document/lines_check.html', documents=enumerate(user_documents), selected=selected)
@bp.route('/get_all_lines', methods=['GET'])
@login_required
def get_all_lines():
show_ignored_lines = request.headers.get('show-ignored-lines')
document_ids = json.loads(request.headers.get('documents'))
document_ids = get_documents_with_granted_acces(document_ids, current_user)
if show_ignored_lines == 'true':
show_ignored_lines = True
elif show_ignored_lines == 'false':
show_ignored_lines = False
lines = get_sucpect_lines_ids(document_ids, 'all', show_ignored_lines)
return jsonify(lines)
@bp.route('/get_annotated_lines', methods=['GET'])
@login_required
def get_annotated_lines():
show_ignored_lines = request.headers.get('show-ignored-lines')
document_ids = json.loads(request.headers.get('documents'))
document_ids = get_documents_with_granted_acces(document_ids, current_user)
if show_ignored_lines == 'true':
show_ignored_lines = True
elif show_ignored_lines == 'false':
show_ignored_lines = False
lines = get_sucpect_lines_ids(document_ids, 'annotated', show_ignored_lines)
return jsonify(lines)
@bp.route('/get_not_annotated_lines', methods=['GET'])
@login_required
def get_not_annotated_lines():
show_ignored_lines = request.headers.get('show-ignored-lines')
document_ids = json.loads(request.headers.get('documents'))
document_ids = get_documents_with_granted_acces(document_ids, current_user)
if show_ignored_lines == 'true':
show_ignored_lines = True
elif show_ignored_lines == 'false':
show_ignored_lines = False
lines = get_sucpect_lines_ids(document_ids, 'not_annotated', show_ignored_lines)
return jsonify(lines)
@bp.route('/get_cropped_image/<string:line_id>')
@login_required
def get_cropped_image(line_id):
if not is_granted_acces_for_line(line_id, current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
image = get_line_image_by_id(line_id)
return send_file(BytesIO(image), attachment_filename='{}.jpeg' .format(line_id), mimetype='image/jpeg', as_attachment=True)
@bp.route('/compute_scores/<string:document_id>')
@login_required
def compute_scores(document_id):
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
_thread.start_new_thread( compute_scores_of_doc, (document_id, ) )
flash(u'Computing scores!', 'info')
return jsonify('success')
@bp.route('/skip_line/<string:line_id>')
@login_required
def skip_line(line_id):
if not is_granted_acces_for_line(line_id, current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
skip_textline(line_id)
return jsonify({'status': 'success'})
@bp.route('/get_line_info/<string:line_id>')
@login_required
def get_line_info(line_id):
if not is_granted_acces_for_line(line_id, current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
lines = get_line(line_id)
return jsonify(lines)
@bp.route('/search', methods=['GET', 'POST'])
def search_bar():
query = ""
lines = []
if is_user_trusted(current_user):
user_documents = get_all_documents()
else:
user_documents = get_user_documents(current_user)
selected = [False for _ in user_documents]
if request.method == 'POST':
query = request.form['query']
document_ids = request.form.getlist('documents')
user_document_ids = []
for i, document in enumerate(user_documents):
if document_ids != []:
if str(document.id) in document_ids:
selected[i] = True
user_document_ids.append(str(document.id))
else:
user_document_ids.append(str(document.id))
lines = find_textlines(query, current_user, user_document_ids)
return render_template('document/search_lines.html', query=query, lines=lines, documents=enumerate(user_documents), selected=selected)
| 38.376956
| 156
| 0.725305
| 3,706
| 26,979
| 4.966811
| 0.076902
| 0.043462
| 0.033031
| 0.018254
| 0.733634
| 0.676536
| 0.607649
| 0.567447
| 0.528875
| 0.49617
| 0
| 0.004363
| 0.167427
| 26,979
| 702
| 157
| 38.431624
| 0.81511
| 0.001705
| 0
| 0.483146
| 0
| 0
| 0.195811
| 0.075198
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082397
| false
| 0
| 0.039326
| 0.007491
| 0.284644
| 0.001873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47e38cc73a4ca6342b90794377e31733e0fe8cef
| 4,898
|
py
|
Python
|
src/superdatabase3000/packet.py
|
JeanMax/SuperDatabase3000
|
836395c9b6ea2a5d53f81c22bb126e299f3e1bfc
|
[
"MIT"
] | 1
|
2020-03-30T13:49:29.000Z
|
2020-03-30T13:49:29.000Z
|
src/superdatabase3000/packet.py
|
JeanMax/SuperDatabase3000
|
836395c9b6ea2a5d53f81c22bb126e299f3e1bfc
|
[
"MIT"
] | 5
|
2020-03-30T14:32:48.000Z
|
2020-03-31T12:01:02.000Z
|
src/superdatabase3000/packet.py
|
JeanMax/SuperDatabase3000
|
836395c9b6ea2a5d53f81c22bb126e299f3e1bfc
|
[
"MIT"
] | null | null | null |
"""
This module defines a packet structure
(composed of: canari, payload, payload_size, and eventually an extra payload).
You'll find a 'pack' functions allowing you to create a packet
from a payload (btyes object) you want to send, and an 'unpack' function
that can extract a payload from a packet (as a bytes object too) after
validating the packet structure (canari, checksum, length).
packet[64]: abcd abcdefghabcdefghabcd abcdefgh
^ ^ ^
canari[4] checksum[20] payload_size[8]
payload_size
<------------------------------------------->
abcdefghabcdefghabcdefghabcdefgh [...]
^ ^
payload[32] extra_payload
"""
import collections
import struct
import hashlib
CANARI = 0xdeadbeef
CANARI_SIZE = 4 # unsigned int
CHECKSUM_SIZE = 20 # sha1
INT_SIZE = 8 # unsigned long long
PAYLOAD_MIN_SIZE = 32 # TODO: tweak me based on DbClient requests size: 256-32
PACKET_MIN_SIZE = (
CANARI_SIZE + CHECKSUM_SIZE + INT_SIZE
+ PAYLOAD_MIN_SIZE
) # 64
CHECKSUM_OFFSET = CANARI_SIZE + CHECKSUM_SIZE # we'll start hasing from there
STRUCT_FORMAT = (
"!"
"I" # canari
f"{CHECKSUM_SIZE}s" # checksum
"Q" # payload_size
"{payload_size}s" # payload: complete its size using format
)
Packet = collections.namedtuple(
"Packet",
["canari", "checksum", "payload_size", "payload"]
)
def _checksum(bytes_buf):
"""Return the sha1 digest of the given 'bytes_buf'."""
return hashlib.sha1(bytes_buf[CHECKSUM_OFFSET:]).digest()
def _verify_checksum(ctrl_checksum, bytes_buf):
"""
Return True if the given 'ctrl_checksum' matches the checksum
of 'bytes_buf', otherwise throw a ValueError.
"""
if ctrl_checksum != _checksum(bytes_buf):
raise ValueError("packet: invalid checksum")
return True
def pack(payload, with_checksum=True):
"""
Create a packet from the given 'payload' byte object that you want to send.
If the 'with_checksum' argument is True, the checksum of the payload will
be calculated and inserted in the packet, otherwise the checksum will be
set to zeros.
Returns a bytes object of the created packet (ready to send).
"""
packet = Packet(
canari=CANARI,
checksum=b"\x00" * CHECKSUM_SIZE,
payload_size=len(payload),
payload=payload.ljust(PAYLOAD_MIN_SIZE, b"\x00")
)
payload_size = max(packet.payload_size, PAYLOAD_MIN_SIZE)
try:
bytes_buf = struct.pack(
STRUCT_FORMAT.format(payload_size=payload_size),
*packet
)
except struct.error as e:
raise ValueError(f"packet: {e}")
if with_checksum:
packet = packet._replace(checksum=_checksum(bytes_buf))
bytes_buf = struct.pack(
STRUCT_FORMAT.format(payload_size=payload_size),
*packet
)
return bytes_buf
def unpack(bytes_buf, with_checksum=True):
"""
Extract the payload (as a bytes object) from the given 'bytes_buf' packet.
If the 'with_checksum' argument is True, the checksum in the packet will be
checked against a calculated checksum of the packet payload. Otherwise it
will just be ignored.
Returns a bytes object of the extracted payload.
A ValueError will be thrown if an invalid packet is given as 'bytes_buf'
(invalid canari, checksum, payload length)
"""
# first, we try to unpack as if it was a 64 bytes packet
try:
packet = struct.unpack(
STRUCT_FORMAT.format(payload_size=PAYLOAD_MIN_SIZE),
bytes_buf[:PACKET_MIN_SIZE]
)
except struct.error as e:
raise ValueError(f"packet: {e}")
packet = Packet(*packet)
if packet.canari != CANARI:
raise ValueError("packet: the canari is dead")
# payload can fit in a 64 bytes packet: just verify checksum, then job done
if packet.payload_size <= PAYLOAD_MIN_SIZE:
if with_checksum:
_verify_checksum(packet.checksum, bytes_buf)
packet = packet._replace(
payload=packet.payload[:packet.payload_size]
)
return packet
# packet is actually bigger than 64 bytes (extra_payload)
if len(bytes_buf) <= PACKET_MIN_SIZE:
return packet # the payload is incomplete, and checksum not verified
try:
packet = struct.unpack(
STRUCT_FORMAT.format(payload_size=packet.payload_size),
bytes_buf
)
except struct.error as e:
raise ValueError(f"packet: {e}")
packet = Packet(*packet)
if with_checksum:
_verify_checksum(packet.checksum, bytes_buf)
return packet # complete packet with extra payload
| 32.223684
| 79
| 0.636178
| 609
| 4,898
| 4.962233
| 0.231527
| 0.06552
| 0.041694
| 0.023825
| 0.246525
| 0.230311
| 0.191595
| 0.191595
| 0.191595
| 0.098279
| 0
| 0.009893
| 0.277664
| 4,898
| 151
| 80
| 32.437086
| 0.844262
| 0.454675
| 0
| 0.325301
| 0
| 0
| 0.064188
| 0
| 0
| 0
| 0.003914
| 0.006623
| 0
| 1
| 0.048193
| false
| 0
| 0.036145
| 0
| 0.156627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47e43b3b4e3f0031df6f61702eae33c0a872be24
| 1,095
|
py
|
Python
|
microcosm_flask/swagger/api.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 11
|
2017-01-30T21:53:20.000Z
|
2020-05-29T22:39:19.000Z
|
microcosm_flask/swagger/api.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 139
|
2016-03-09T19:09:59.000Z
|
2021-09-03T17:14:00.000Z
|
microcosm_flask/swagger/api.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 10
|
2016-12-19T22:39:42.000Z
|
2021-03-09T19:23:15.000Z
|
"""
API interfaces for swagger operations.
"""
from typing import (
Any,
Iterable,
Mapping,
Tuple,
)
from marshmallow import Schema
from marshmallow.fields import Field
from microcosm_flask.swagger.parameters import Parameters
from microcosm_flask.swagger.schemas import Schemas
def build_schema(schema: Schema, strict_enums: bool = True) -> Mapping[str, Any]:
"""
Build JSON schema from a marshmallow schema.
"""
builder = Schemas(build_parameter=build_parameter, strict_enums=strict_enums)
return builder.build(schema)
def iter_schemas(schema: Schema, strict_enums: bool = True) -> Iterable[Tuple[str, Any]]:
"""
Build zero or more JSON schemas for a marshmallow schema.
Generates: name, schema pairs.
"""
builder = Schemas(build_parameter=build_parameter, strict_enums=strict_enums)
return builder.iter_schemas(schema)
def build_parameter(field: Field, **kwargs) -> Mapping[str, Any]:
"""
Build JSON parameter from a marshmallow field.
"""
builder = Parameters(**kwargs)
return builder.build(field)
| 23.804348
| 89
| 0.717808
| 133
| 1,095
| 5.789474
| 0.300752
| 0.085714
| 0.042857
| 0.064935
| 0.337662
| 0.280519
| 0.2
| 0.2
| 0.2
| 0.2
| 0
| 0
| 0.185388
| 1,095
| 45
| 90
| 24.333333
| 0.863229
| 0.200913
| 0
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.263158
| 0
| 0.578947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47e4a20a59666f230f44fc593c648ca410af9651
| 1,286
|
py
|
Python
|
converter.py
|
GuzTech/uart_to_hdmi
|
b6ea4efa85a06e59406ffc3b034028f00d5a7cbf
|
[
"MIT"
] | 1
|
2020-07-04T01:09:00.000Z
|
2020-07-04T01:09:00.000Z
|
converter.py
|
GuzTech/uart_to_hdmi
|
b6ea4efa85a06e59406ffc3b034028f00d5a7cbf
|
[
"MIT"
] | null | null | null |
converter.py
|
GuzTech/uart_to_hdmi
|
b6ea4efa85a06e59406ffc3b034028f00d5a7cbf
|
[
"MIT"
] | null | null | null |
import sys
import struct
if(len(sys.argv) != 5):
print("Usage: python converter.py <num_pixels_x> <num_pixels_y> <input file> <output file>\n")
else:
num_pixels_x = int(sys.argv[1])
num_pixels_y = int(sys.argv[2])
print(num_pixels_x)
has_alpha_channel = False
infile = open(sys.argv[3], "rb")
data = infile.read()
data_len = len(data)
if((data_len != (num_pixels_x * num_pixels_y * 4)) or
(data_len != (num_pixels_x * num_pixels_y * 3))):
AssertionError(
"File size does not match given resolution, or does not use 8bpp.")
if(data_len == (num_pixels_x * num_pixels_y * 4)):
has_alpha_channel = True
outfile = open(sys.argv[4], "wb")
infile.seek(0)
for y in range(num_pixels_y):
for x in range(num_pixels_x):
r = (int.from_bytes(infile.read(1), 'little') >> 5) & 0x7
g = (int.from_bytes(infile.read(1), 'little') >> 5) & 0x7
b = (int.from_bytes(infile.read(1), 'little') >> 6) & 0x3
if(has_alpha_channel):
# Alpha channel, we don't use this
_ = infile.read(1)
pixel = (b << 6) | (g << 3) | r
outfile.write(pixel.to_bytes(1, 'little'))
infile.close()
outfile.close()
| 30.619048
| 98
| 0.573872
| 192
| 1,286
| 3.630208
| 0.364583
| 0.167862
| 0.10043
| 0.074605
| 0.289813
| 0.289813
| 0.261119
| 0.219512
| 0.180775
| 0.086083
| 0
| 0.028047
| 0.27916
| 1,286
| 41
| 99
| 31.365854
| 0.72384
| 0.024883
| 0
| 0
| 0
| 0.032258
| 0.141374
| 0
| 0
| 0
| 0.007189
| 0
| 0.032258
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47e4f4051c67291e2bfd6264123e2f3ba68f0903
| 319
|
py
|
Python
|
project_handwritten-character-recognition-with-convolutional-neural-network-master/Codes/remove_old_image.py
|
akash519-gif/Handwritten-letter-detection.
|
f49240bc3dcea5eb8f53bade66ccb49bf8809be6
|
[
"Apache-2.0"
] | null | null | null |
project_handwritten-character-recognition-with-convolutional-neural-network-master/Codes/remove_old_image.py
|
akash519-gif/Handwritten-letter-detection.
|
f49240bc3dcea5eb8f53bade66ccb49bf8809be6
|
[
"Apache-2.0"
] | null | null | null |
project_handwritten-character-recognition-with-convolutional-neural-network-master/Codes/remove_old_image.py
|
akash519-gif/Handwritten-letter-detection.
|
f49240bc3dcea5eb8f53bade66ccb49bf8809be6
|
[
"Apache-2.0"
] | null | null | null |
# import require package
import os
def remove_content(directory):
for file in os.scandir(directory):
print(file.path)
os.remove(file.path)
print("Old images has been deleted")
upload_dir = './uploads'
remove_content(upload_dir)
images_dir = './static/images'
remove_content(images_dir)
| 18.764706
| 44
| 0.705329
| 43
| 319
| 5.069767
| 0.534884
| 0.178899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188088
| 319
| 16
| 45
| 19.9375
| 0.841699
| 0.068966
| 0
| 0
| 0
| 0
| 0.172881
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47e633e7aabb9cbd31dd0cb29459787e531f57cc
| 15,271
|
py
|
Python
|
src/scaffoldfitter/fitterstepfit.py
|
zekh167/scaffoldfitter
|
357a312948464399433f29f19cdac4d7fd6061ef
|
[
"Apache-2.0"
] | null | null | null |
src/scaffoldfitter/fitterstepfit.py
|
zekh167/scaffoldfitter
|
357a312948464399433f29f19cdac4d7fd6061ef
|
[
"Apache-2.0"
] | null | null | null |
src/scaffoldfitter/fitterstepfit.py
|
zekh167/scaffoldfitter
|
357a312948464399433f29f19cdac4d7fd6061ef
|
[
"Apache-2.0"
] | null | null | null |
"""
Fit step for gross alignment and scale.
"""
from opencmiss.utils.zinc.field import assignFieldParameters, createFieldsDisplacementGradients
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zinc.field import Field, FieldFindMeshLocation
from opencmiss.zinc.optimisation import Optimisation
from opencmiss.zinc.result import RESULT_OK
from scaffoldfitter.fitterstep import FitterStep
class FitterStepFit(FitterStep):
_jsonTypeId = "_FitterStepFit"
def __init__(self):
super(FitterStepFit, self).__init__()
self._lineWeight = 10.0
self._markerWeight = 1.0
self._strainPenaltyWeight = 0.0
self._curvaturePenaltyWeight = 0.0
self._edgeDiscontinuityPenaltyWeight = 0.0
self._numberOfIterations = 1
self._maximumSubIterations = 1
self._updateReferenceState = False
@classmethod
def getJsonTypeId(cls):
return cls._jsonTypeId
def decodeSettingsJSONDict(self, dctIn : dict):
"""
Decode definition of step from JSON dict.
"""
assert self._jsonTypeId in dctIn
# ensure all new options are in dct
dct = self.encodeSettingsJSONDict()
dct.update(dctIn)
self._lineWeight = dct["lineWeight"]
self._markerWeight = dct["markerWeight"]
self._strainPenaltyWeight = dct["strainPenaltyWeight"]
self._curvaturePenaltyWeight = dct["curvaturePenaltyWeight"]
self._edgeDiscontinuityPenaltyWeight = dct["edgeDiscontinuityPenaltyWeight"]
self._numberOfIterations = dct["numberOfIterations"]
self._maximumSubIterations = dct["maximumSubIterations"]
self._updateReferenceState = dct["updateReferenceState"]
def encodeSettingsJSONDict(self) -> dict:
"""
Encode definition of step in dict.
:return: Settings in a dict ready for passing to json.dump.
"""
return {
self._jsonTypeId : True,
"lineWeight" : self._lineWeight,
"markerWeight" : self._markerWeight,
"strainPenaltyWeight" : self._strainPenaltyWeight,
"curvaturePenaltyWeight" : self._curvaturePenaltyWeight,
"edgeDiscontinuityPenaltyWeight" : self._edgeDiscontinuityPenaltyWeight,
"numberOfIterations" : self._numberOfIterations,
"maximumSubIterations" : self._maximumSubIterations,
"updateReferenceState" : self._updateReferenceState
}
def getLineWeight(self):
return self._lineWeight
def setLineWeight(self, weight):
assert weight >= 0.0
if weight != self._lineWeight:
self._lineWeight = weight
return True
return False
def getMarkerWeight(self):
return self._markerWeight
def setMarkerWeight(self, weight):
assert weight >= 0.0
if weight != self._markerWeight:
self._markerWeight = weight
return True
return False
def getStrainPenaltyWeight(self):
return self._strainPenaltyWeight
def setStrainPenaltyWeight(self, weight):
assert weight >= 0.0
if weight != self._strainPenaltyWeight:
self._strainPenaltyWeight = weight
return True
return False
def getCurvaturePenaltyWeight(self):
return self._curvaturePenaltyWeight
def setCurvaturePenaltyWeight(self, weight):
assert weight >= 0.0
if weight != self._curvaturePenaltyWeight:
self._curvaturePenaltyWeight = weight
return True
return False
def getEdgeDiscontinuityPenaltyWeight(self):
return self._edgeDiscontinuityPenaltyWeight
def setEdgeDiscontinuityPenaltyWeight(self, weight):
assert weight >= 0.0
if weight != self._edgeDiscontinuityPenaltyWeight:
self._edgeDiscontinuityPenaltyWeight = weight
return True
return False
def getNumberOfIterations(self):
return self._numberOfIterations
def setNumberOfIterations(self, numberOfIterations):
assert numberOfIterations > 0
if numberOfIterations != self._numberOfIterations:
self._numberOfIterations = numberOfIterations
return True
return False
def getMaximumSubIterations(self):
return self._maximumSubIterations
def setMaximumSubIterations(self, maximumSubIterations):
assert maximumSubIterations > 0
if maximumSubIterations != self._maximumSubIterations:
self._maximumSubIterations = maximumSubIterations
return True
return False
def isUpdateReferenceState(self):
return self._updateReferenceState
def setUpdateReferenceState(self, updateReferenceState):
if updateReferenceState != self._updateReferenceState:
self._updateReferenceState = updateReferenceState
return True
return False
def run(self, modelFileNameStem=None):
"""
Fit model geometry parameters to data.
:param modelFileNameStem: Optional name stem of intermediate output file to write.
"""
self._fitter.assignDataWeights(self._lineWeight, self._markerWeight);
fieldmodule = self._fitter._region.getFieldmodule()
optimisation = fieldmodule.createOptimisation()
optimisation.setMethod(Optimisation.METHOD_NEWTON)
optimisation.addDependentField(self._fitter.getModelCoordinatesField())
optimisation.setAttributeInteger(Optimisation.ATTRIBUTE_MAXIMUM_ITERATIONS, self._maximumSubIterations)
#FunctionTolerance = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_FUNCTION_TOLERANCE)
#GradientTolerance = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_GRADIENT_TOLERANCE)
#StepTolerance = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_STEP_TOLERANCE)
MaximumStep = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_MAXIMUM_STEP)
MinimumStep = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_MINIMUM_STEP)
#LinesearchTolerance = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_LINESEARCH_TOLERANCE)
#TrustRegionSize = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_TRUST_REGION_SIZE)
dataScale = self._fitter.getDataScale()
#tol_scale = dataScale # *dataScale
#FunctionTolerance *= tol_scale
#optimisation.setAttributeReal(Optimisation.ATTRIBUTE_FUNCTION_TOLERANCE, FunctionTolerance)
#GradientTolerance /= tol_scale
#optimisation.setAttributeReal(Optimisation.ATTRIBUTE_GRADIENT_TOLERANCE, GradientTolerance)
#StepTolerance *= tol_scale
#optimisation.setAttributeReal(Optimisation.ATTRIBUTE_STEP_TOLERANCE, StepTolerance)
MaximumStep *= dataScale
optimisation.setAttributeReal(Optimisation.ATTRIBUTE_MAXIMUM_STEP, MaximumStep)
MinimumStep *= dataScale
optimisation.setAttributeReal(Optimisation.ATTRIBUTE_MINIMUM_STEP, MinimumStep)
#LinesearchTolerance *= dataScale
#optimisation.setAttributeReal(Optimisation.ATTRIBUTE_LINESEARCH_TOLERANCE, LinesearchTolerance)
#TrustRegionSize *= dataScale
#optimisation.setAttributeReal(Optimisation.ATTRIBUTE_TRUST_REGION_SIZE, TrustRegionSize)
#if self.getDiagnosticLevel() > 0:
# print("Function Tolerance", FunctionTolerance)
# print("Gradient Tolerance", GradientTolerance)
# print("Step Tolerance", StepTolerance)
# print("Maximum Step", MaximumStep)
# print("Minimum Step", MinimumStep)
# print("Linesearch Tolerance", LinesearchTolerance)
# print("Trust Region Size", TrustRegionSize)
dataObjective = None
deformationPenaltyObjective = None
edgeDiscontinuityPenaltyObjective = None
with ChangeManager(fieldmodule):
dataObjective = self.createDataObjectiveField()
result = optimisation.addObjectiveField(dataObjective)
assert result == RESULT_OK, "Fit Geometry: Could not add data objective field"
if (self._strainPenaltyWeight > 0.0) or (self._curvaturePenaltyWeight > 0.0):
deformationPenaltyObjective = self.createDeformationPenaltyObjectiveField()
result = optimisation.addObjectiveField(deformationPenaltyObjective)
assert result == RESULT_OK, "Fit Geometry: Could not add strain/curvature penalty objective field"
if self._edgeDiscontinuityPenaltyWeight > 0.0:
print("WARNING! Edge discontinuity penalty is not supported by NEWTON solver - skipping")
#edgeDiscontinuityPenaltyObjective = self.createEdgeDiscontinuityPenaltyObjectiveField()
#result = optimisation.addObjectiveField(edgeDiscontinuityPenaltyObjective)
#assert result == RESULT_OK, "Fit Geometry: Could not add edge discontinuity penalty objective field"
fieldcache = fieldmodule.createFieldcache()
objectiveFormat = "{:12e}"
for iter in range(self._numberOfIterations):
iterName = str(iter + 1)
if self.getDiagnosticLevel() > 0:
print("-------- Iteration " + iterName)
if self.getDiagnosticLevel() > 0:
result, objective = dataObjective.evaluateReal(fieldcache, 1)
print(" Data objective", objectiveFormat.format(objective))
if deformationPenaltyObjective:
result, objective = deformationPenaltyObjective.evaluateReal(fieldcache, deformationPenaltyObjective.getNumberOfComponents())
print(" Deformation penalty objective", objectiveFormat.format(objective))
result = optimisation.optimise()
if self.getDiagnosticLevel() > 1:
solutionReport = optimisation.getSolutionReport()
print(solutionReport)
assert result == RESULT_OK, "Fit Geometry: Optimisation failed with result " + str(result)
if modelFileNameStem:
self._fitter.writeModel(modelFileNameStem + "_fit" + iterName + ".exf")
self._fitter.calculateDataProjections(self)
if self.getDiagnosticLevel() > 0:
print("--------")
result, objective = dataObjective.evaluateReal(fieldcache, 1)
print(" END Data objective", objectiveFormat.format(objective))
if deformationPenaltyObjective:
result, objective = deformationPenaltyObjective.evaluateReal(fieldcache, deformationPenaltyObjective.getNumberOfComponents())
print(" END Deformation penalty objective", objectiveFormat.format(objective))
if self._updateReferenceState:
self._fitter.updateModelReferenceCoordinates()
self.setHasRun(True)
def createDataObjectiveField(self):
"""
Get FieldNodesetSum objective for data projected onto mesh, including markers with fixed locations.
Assumes ChangeManager(fieldmodule) is in effect.
:return: Zinc FieldNodesetSum.
"""
fieldmodule = self._fitter.getFieldmodule()
delta = self._fitter.getDataDeltaField()
weight = self._fitter.getDataWeightField()
deltaSq = fieldmodule.createFieldDotProduct(delta, delta)
#dataProjectionInDirection = fieldmodule.createFieldDotProduct(dataProjectionDelta, self._fitter.getDataProjectionDirectionField())
#dataProjectionInDirection = fieldmodule.createFieldMagnitude(dataProjectionDelta)
#dataProjectionInDirection = dataProjectionDelta
#dataProjectionInDirection = fieldmodule.createFieldConstant([ weight/dataScale ]*dataProjectionDelta.getNumberOfComponents()) * dataProjectionDelta
dataProjectionObjective = fieldmodule.createFieldNodesetSum(weight*deltaSq, self._fitter.getActiveDataNodesetGroup())
dataProjectionObjective.setElementMapField(self._fitter.getDataHostLocationField())
return dataProjectionObjective
def createDeformationPenaltyObjectiveField(self):
"""
Only call if (self._strainPenaltyWeight > 0.0) or (self._curvaturePenaltyWeight > 0.0)
:return: Zinc FieldMeshIntegral, or None if not weighted.
Assumes ChangeManager(fieldmodule) is in effect.
"""
numberOfGaussPoints = 3
fieldmodule = self._fitter.getFieldmodule()
mesh = self._fitter.getHighestDimensionMesh()
dataScale = 1.0
dimension = mesh.getDimension()
# future: eliminate effect of model scale
#linearDataScale = self._fitter.getDataScale()
#for d in range(dimension):
# dataScale /= linearDataScale
displacementGradient1, displacementGradient2 = createFieldsDisplacementGradients(self._fitter.getModelCoordinatesField(), self._fitter.getModelReferenceCoordinatesField(), mesh)
deformationTerm = None
if self._strainPenaltyWeight > 0.0:
# future: allow variable alpha components
alpha = fieldmodule.createFieldConstant([ self._strainPenaltyWeight*dataScale ]*displacementGradient1.getNumberOfComponents())
wtSqDeformationGradient1 = fieldmodule.createFieldDotProduct(alpha, displacementGradient1*displacementGradient1)
assert wtSqDeformationGradient1.isValid()
deformationTerm = wtSqDeformationGradient1
if self._curvaturePenaltyWeight > 0.0:
# future: allow variable beta components
beta = fieldmodule.createFieldConstant([ self._curvaturePenaltyWeight*dataScale ]*displacementGradient2.getNumberOfComponents())
wtSqDeformationGradient2 = fieldmodule.createFieldDotProduct(beta, displacementGradient2*displacementGradient2)
assert wtSqDeformationGradient2.isValid()
deformationTerm = (deformationTerm + wtSqDeformationGradient2) if deformationTerm else wtSqDeformationGradient2
deformationPenaltyObjective = fieldmodule.createFieldMeshIntegral(deformationTerm, self._fitter.getModelReferenceCoordinatesField(), mesh);
deformationPenaltyObjective.setNumbersOfPoints(numberOfGaussPoints)
return deformationPenaltyObjective
def createEdgeDiscontinuityPenaltyObjectiveField(self):
"""
Only call if self._edgeDiscontinuityPenaltyWeight > 0.0
Assumes ChangeManager(fieldmodule) is in effect.
:return: Zinc FieldMeshIntegralSquares, or None if not weighted.
"""
numberOfGaussPoints = 3
fieldmodule = self._fitter.getFieldmodule()
lineMesh = fieldmodule.findMeshByDimension(1)
edgeDiscontinuity = fieldmodule.createFieldEdgeDiscontinuity(self._fitter.getModelCoordinatesField())
dataScale = self._fitter.getDataScale()
weightedEdgeDiscontinuity = edgeDiscontinuity*fieldmodule.createFieldConstant(self._edgeDiscontinuityPenaltyWeight/dataScale)
edgeDiscontinuityPenaltyObjective = fieldmodule.createFieldMeshIntegralSquares(weightedEdgeDiscontinuity, self._fitter.getModelReferenceCoordinatesField(), lineMesh)
edgeDiscontinuityPenaltyObjective.setNumbersOfPoints(numberOfGaussPoints)
return edgeDiscontinuityPenaltyObjective
| 49.26129
| 185
| 0.71017
| 1,137
| 15,271
| 9.41161
| 0.218118
| 0.021493
| 0.010466
| 0.015699
| 0.213158
| 0.152229
| 0.094571
| 0.084104
| 0.074572
| 0.045977
| 0
| 0.005962
| 0.220156
| 15,271
| 309
| 186
| 49.420712
| 0.892602
| 0.209482
| 0
| 0.181373
| 0
| 0
| 0.060217
| 0.008796
| 0
| 0
| 0
| 0
| 0.063725
| 1
| 0.117647
| false
| 0
| 0.029412
| 0.044118
| 0.29902
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47eb8f87adf534b765a9c50c9659d9424a7c2ade
| 1,315
|
py
|
Python
|
createDB.py
|
ansh-mehta/COVID-19-Vaccine-Slot-Notifier
|
b09d163ebee960089edbd8b894e3b956745504df
|
[
"Apache-2.0"
] | null | null | null |
createDB.py
|
ansh-mehta/COVID-19-Vaccine-Slot-Notifier
|
b09d163ebee960089edbd8b894e3b956745504df
|
[
"Apache-2.0"
] | 1
|
2021-09-11T18:06:33.000Z
|
2021-09-11T18:06:33.000Z
|
createDB.py
|
ansh-mehta/COVID-19-Vaccine-Slot-Notifier
|
b09d163ebee960089edbd8b894e3b956745504df
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
from pymongo import MongoClient, collection
client = MongoClient("mongodb://localhost:27017")
database = client["temp"]
states_districts = database["states_districts"]
states_districts.remove({})
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
}
response = requests.get(
"https://cdn-api.co-vin.in/api/v2/admin/location/states", headers=headers
)
states = json.loads(response.text)["states"]
custom_state_id=1
for state in states:
state_id = state["state_id"]
state_name = state["state_name"].strip()
print(state_name)
response = requests.get(
"https://cdn-api.co-vin.in/api/v2/admin/location/districts/" + str(state_id),
headers=headers,
)
custom_district_id=1
temp=[]
districts = json.loads(response.text)["districts"]
for district in districts:
district_id = district["district_id"]
district_name = district["district_name"].strip()
data={"state_name":state_name,"custom_state_id":custom_state_id,"district_name":district_name,"custom_district_id":custom_district_id,"actual_district_id":district_id}
states_districts.insert_one(data)
custom_district_id+=1
custom_state_id+=1
| 35.540541
| 175
| 0.719392
| 181
| 1,315
| 5.005525
| 0.364641
| 0.0883
| 0.057395
| 0.05298
| 0.121413
| 0.121413
| 0.121413
| 0.121413
| 0.121413
| 0.121413
| 0
| 0.032947
| 0.146008
| 1,315
| 37
| 176
| 35.540541
| 0.77382
| 0
| 0
| 0.060606
| 0
| 0.090909
| 0.317629
| 0.018997
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47ecac75bfa5b5456323216191e97427a888010b
| 3,989
|
py
|
Python
|
model_conv.py
|
isn350/e_hir_GAN
|
53cc7530b1c4bb7ee5250d7fc057b71ceb5726b4
|
[
"MIT"
] | null | null | null |
model_conv.py
|
isn350/e_hir_GAN
|
53cc7530b1c4bb7ee5250d7fc057b71ceb5726b4
|
[
"MIT"
] | null | null | null |
model_conv.py
|
isn350/e_hir_GAN
|
53cc7530b1c4bb7ee5250d7fc057b71ceb5726b4
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
# return tf.random_normal(shape=size, stddev=xavier_stddev)
return xavier_stddev
def conv(x, w, b, stride, name):
with tf.variable_scope('conv'):
tf.summary.histogram('weight', w)
tf.summary.histogram('biases', b)
return tf.nn.conv2d(x,
filter=w,
strides=[1, stride, stride, 1],
padding='SAME',
name=name) + b
def deconv(x, w, b, shape, stride, name):
with tf.variable_scope('deconv'):
tf.summary.histogram('weight', w)
tf.summary.histogram('biases', b)
return tf.nn.conv2d_transpose(x,
filter=w,
output_shape=shape,
strides=[1, stride, stride, 1],
padding='SAME',
name=name) + b
def lrelu(x, alpha=0.2):
with tf.variable_scope('leakyReLU'):
return tf.maximum(x, alpha * x)
def discriminator(X,dim, reuse=False):
with tf.variable_scope('discriminator'):
if reuse:
tf.get_variable_scope().reuse_variables()
K = 64
M = 128
N = 256
W1 = tf.get_variable('D_W1', [4, 4, 1, K], initializer=tf.random_normal_initializer(stddev=0.1))
B1 = tf.get_variable('D_B1', [K], initializer=tf.constant_initializer())
W2 = tf.get_variable('D_W2', [4, 4, K, M], initializer=tf.random_normal_initializer(stddev=0.1))
B2 = tf.get_variable('D_B2', [M], initializer=tf.constant_initializer())
W3 = tf.get_variable('D_W3', [16*16*M, N], initializer=tf.random_normal_initializer(stddev=0.1))
B3 = tf.get_variable('D_B3', [N], initializer=tf.constant_initializer())
W4 = tf.get_variable('D_W4', [N, 1], initializer=tf.random_normal_initializer(stddev=0.1))
B4 = tf.get_variable('D_B4', [1], initializer=tf.constant_initializer())
X = tf.reshape(X, [-1, dim, dim, 1], 'reshape')
conv1 = conv(X, W1, B1, stride=2, name='conv1')
bn1 = tf.contrib.layers.batch_norm(conv1)
conv2 = conv(tf.nn.dropout(lrelu(bn1), 0.4), W2, B2, stride=2, name='conv2')
bn2 = tf.contrib.layers.batch_norm(conv2)
flat = tf.reshape(tf.nn.dropout(lrelu(bn2), 0.4), [-1, 16*16*M], name='flat')
dense = lrelu(tf.matmul(flat, W3) + B3)
logits = tf.matmul(dense, W4) + B4
prob = tf.nn.sigmoid(logits)
return prob, logits
def generator(X, dim, batch_size=64):
with tf.variable_scope('generator'):
K = 256
L = 128
M = 64
W1 = tf.get_variable('G_W1', [100, 16*16*K], initializer=tf.random_normal_initializer(stddev=0.1))
B1 = tf.get_variable('G_B1', [16*16*K], initializer=tf.constant_initializer())
W2 = tf.get_variable('G_W2', [4, 4, M, K], initializer=tf.random_normal_initializer(stddev=0.1))
B2 = tf.get_variable('G_B2', [M], initializer=tf.constant_initializer())
W3 = tf.get_variable('G_W3', [4, 4, 1, M], initializer=tf.random_normal_initializer(stddev=0.1))
B3 = tf.get_variable('G_B3', [1], initializer=tf.constant_initializer())
X = lrelu(tf.matmul(X, W1) + B1)
X = tf.reshape(X, [batch_size, 16, 16, K])
print(X)
deconv1 = deconv(X, W2, B2, shape=[batch_size, 32, 32, M], stride=2, name='deconv1')
bn1 = tf.contrib.layers.batch_norm(deconv1)
deconv2 = deconv(tf.nn.dropout(lrelu(bn1), 0.4), W3, B3, shape=[batch_size, dim, dim, 1], stride=2, name='deconv2')
XX = tf.reshape(deconv2, [-1, dim*dim], 'reshape')
return tf.nn.sigmoid(XX)
| 40.292929
| 124
| 0.552018
| 536
| 3,989
| 3.972015
| 0.177239
| 0.035228
| 0.091592
| 0.052607
| 0.503992
| 0.488962
| 0.404415
| 0.384688
| 0.364021
| 0.31752
| 0
| 0.056611
| 0.300326
| 3,989
| 98
| 125
| 40.704082
| 0.706199
| 0.014289
| 0
| 0.169014
| 0
| 0
| 0.044624
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084507
| false
| 0
| 0.014085
| 0
| 0.183099
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47ed721213e9d40abe12f67af339888e2d8b6e5e
| 12,488
|
py
|
Python
|
squad_utils.py
|
ashtonteng/squad_exp
|
0cdcb3e41783026e805fedbe671a9a69a90d8a86
|
[
"MIT"
] | 1
|
2019-01-08T16:41:54.000Z
|
2019-01-08T16:41:54.000Z
|
squad_utils.py
|
ashtonteng/squad_exp
|
0cdcb3e41783026e805fedbe671a9a69a90d8a86
|
[
"MIT"
] | null | null | null |
squad_utils.py
|
ashtonteng/squad_exp
|
0cdcb3e41783026e805fedbe671a9a69a90d8a86
|
[
"MIT"
] | null | null | null |
import numpy as np
# import matplotlib.pyplot as plt
# import pylab
import re
import itertools
import json
import collections
import multiprocessing as mp
import random
import sys
#sys.path.append("./src/")
#from proto import io as protoio
#from utils.multiprocessor_cpu import MultiProcessorCPU
'''
some general pre/post processing tips:
1. should strip the space at the begining or end
2. consider the influence of punctuation at the end
3. be careful about empty string when using lib re functions
'''
def LoadJsonData(filePath):
'''
Load the file.
@param filePath: filePath string
'''
with open(filePath) as dataFile:
data = json.load(dataFile)
return data
def LoadProtoData(filePath):
data = protoio.ReadArticles(filePath)
dataDict = dict()
for article in data:
title = article.title
dataDict[title] = article
return dataDict
def DumpJsonPrediction(filePath, predictions):
'''
currently only support top 1 prediction.
the output put goes in the following format:
{id : answer string}
'''
predDict = dict()
for title in predictions.keys():
for pred in predictions[title]:
if len(pred["prediction"] ) == 0:
continue
predDict[pred["id"] ] = pred["prediction"][0]
with open(filePath, "w") as outFile:
json.dump(predDict, outFile)
def StripPunct(sentence):
sentence = sentence.replace("...", "<elli>")
if sentence[-1] == '.'\
or sentence[-1] == '?' \
or sentence[-1] == '!' \
or sentence[-1] == ';' \
or sentence[-1] == ",":
sentence = sentence[:-1]
sentence = sentence.replace("<elli>", "...")
return sentence
def ParseJsonData(data):
'''
@param data is a json object. This is the version before
visualization functionality.
'''
dataPerArticle = dict()
for article in data:
text = ""
# process articles to a list of sentences represented by list of words
for paragraph in article["paragraphs"]:
text += paragraph["context"].strip() + " "
textInSentences = TextToSentence(text)
queries = list()
answers = list()
qaIds = list()
for paragraph in article["paragraphs"]:
for qaPair in paragraph["qas"]:
# turn everything into lower cases
queries.append(StripPunct(qaPair["question"].lower().strip() ) )
answers.append(StripPunct(qaPair["answers"][0]["text"].lower().strip() ) )
qaIds.append(qaPair["id"] )
dataPerArticle[article["title"] ] = { \
"textInSentences": textInSentences,
"queries": queries,
"answers": answers,
"qaIds": qaIds
}
return dataPerArticle
def TextToSentence(text):
'''
cut document into sentences with the given delimiters
@param delimiters: delimiters to cut doc to sentences as a list of char
@return sentences: list of full string of sentences
'''
caps = "([A-Z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co|Corp)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
numbers = "([-+]?)([0-9]+)(\.)([0-9]+)"
text = " " + text + " "
text = text.replace("\n"," ")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + caps + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(caps + "[.]" + caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(caps + "[.] " + caps + "[.] " + caps + "[.] ","\\1<prd> \\2<prd> \\3<prd>",text)
text = re.sub(caps + "[.] " + caps + "[.] ","\\1<prd> \\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + caps + "[.]"," \\1<prd>",text)
text = re.sub(numbers, "\\g<1>\\g<2><prd>\\g<4>", text)
# # specific to current SQUAD dataset
text = text.lower()
suffixesSupp = "(\.)([a-z]+)"
text = re.sub(suffixesSupp,"<prd>\\2",text)
text = text.replace("...", "<elli>")
text = text.replace("i.e.", "i<prd>e<prd>")
text = text.replace("etc.", "etc<prd>")
text = text.replace("u.s.", "u<prd>s<prd>")
text = text.replace("v.s.", "v<prd>s<prd>")
text = text.replace("vs.", "vs<prd>")
text = text.replace(" v. ", " v<prd> ")
text = text.replace("med.sc.d", "med<prd>sc<prd>d")
text = text.replace("ecl.", "ecl<prd>")
text = text.replace("hma.", "hma<prd>")
text = text.replace("(r.", "(r<prd>") # for some year related staff
text = text.replace("(d.", "(d<prd>")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
text = text.replace("<elli>", "...")
sentences = text.split("<stop>")
sentences = [s.strip() \
for s in sentences if s.strip() != '']
return sentences
def SentenceToWord(sentences):
'''
cut sentences to list of words
@param sentences: a list of sentences
@return sentencesInWords: a list containing list of words
'''
delimiters = "[ ,;\"\n\(\)]+"
sentencesInWords = list()
for sentence in sentences:
sentence = StripPunct(sentence)
sentence = sentence.replace("...", " ...")
sentencesInWords.append(re.split(delimiters, sentence) )
# omit the empty word produced by re.split
sentencesInWords[-1] = [s.strip().lower() for s in sentencesInWords[-1] if s.strip() != '']
return sentencesInWords
############### helper to multiprocess per article task with MultiprocessorCPU
def MultipleProcess(agent, titleList, targetFunc, conservative=True, debug=False):
'''
target function is the one we want to execute
for each article. When conservative == True, the num of threads
is equal to the number of cores on the machine
'''
procs = []
manager = mp.Manager()
returnDict = manager.dict()
if debug:
for title in titleList:
targetFunc(agent, title, returnDict)
else:
for title in titleList:
p = mp.Process(target=targetFunc,
args=(agent, title, returnDict) )
procs.append(p)
processor = MultiProcessorCPU(procs)
processor.run(conservative)
return returnDict
################ helpers for protobuf based dataset#################
def ReconstructStrFromSpan(tokens, span=None):
'''
@param tokens: a protobuf object representing a list of tokens
@param span: a pair (beginId, endId). Note endId is excluded.
'''
if span is None:
span = (0, len(tokens))
string = ""
beginId, endId = span
for i in range(beginId, endId):
string += tokens[i].word + tokens[i].after
string = string.strip()
return string
def GetContextBigram(article):
'''
article is an protobuf object for apecific article
'''
bigram = []
for paragraph in article.paragraphs:
bigramByPara = list()
for s in paragraph.context.sentence:
bigramByPara.append(GetBigramBySentence(s.token) )
bigram.append(bigramByPara)
return bigram
def GetContextUnigram(article):
unigram = []
for paragraph in article.paragraphs:
unigramByPara = list()
for s in paragraph.context.sentence:
unigramBySentence = [token.word.lower() for token in s.token]
unigramByPara.append(unigramBySentence)
unigram.append(unigramByPara)
return unigram
def GetBigramBySentence(tokens):
'''
tokens is a list of proto message object tokens
'''
bigram = []
for i in range(len(tokens) - 1):
bigram.append( (tokens[i].word.lower(), tokens[i + 1].word.lower() ) )
return bigram
def GetContextConstituentSpan(article):
'''
@return span: the spans are organized by the following hierarchy
span = [spanByPara1, spanByPara2, ...] Where
spanByPara1 = [spanBySentence1, spanBySentence2, ...]
spanBySentence1 is a list of spans extracted from the parsing tree
'''
span = []
for paragraph in article.paragraphs:
spanByPara = list()
for s in paragraph.context.sentence:
# tokens = [token.word for token in s.token]
spanBySentence = GetConstituentSpanBySentence(s.parseTree)
spanByPara.append(spanBySentence)
span.append(spanByPara)
return span
def GetConstituentSpanBySentence(parseTree):
'''
@param parseTree: a protobuf object
extract span represented by nodes in the parsing trees
'''
def AddSpanToParseTree(parseTree, nextLeaf):
'''
@param parseTree: a protobuf object
fill in the yieldBeginIndex and yieldEndIndex fields for parsing trees
'''
if len(parseTree.child) == 0:
parseTree.yieldBeginIndex = nextLeaf
parseTree.yieldEndIndex = nextLeaf + 1
return parseTree, nextLeaf + 1
else:
for i in range(len(parseTree.child) ):
child, nextLeaf = \
AddSpanToParseTree(parseTree.child[i], nextLeaf)
parseTree.child[i].CopyFrom(child)
parseTree.yieldBeginIndex = parseTree.child[0].yieldBeginIndex
parseTree.yieldEndIndex = parseTree.child[-1].yieldEndIndex
return parseTree, nextLeaf
parseTree, _ = AddSpanToParseTree(parseTree, nextLeaf=0)
spans = list()
visitList = list()
visitList.append(parseTree)
tokenList = list()
while len(visitList) != 0:
node = visitList.pop(0)
spans.append( (node.yieldBeginIndex, node.yieldEndIndex) )
for subTree in node.child:
visitList.append(subTree)
spansUniq = []
[spansUniq.append(span) for span in spans if span not in spansUniq]
return spansUniq
# some functions for debug
def GetCandidateAnsListInStr(candDataPerArticle, origDataPerArtice, ids, predId):
'''
for detailed use browse to prediction function of context rnn
'''
ansList = list()
for idx in ids:
predInfo = candDataPerArticle.candidateAnswers[idx]
predParaId = predInfo.paragraphIndex
predSenId = predInfo.sentenceIndex
predSpanStart = predInfo.spanBeginIndex
predSpanEnd = predInfo.spanBeginIndex + predInfo.spanLength
tokens = origDataPerArticle.paragraphs[predParaId].context.sentence[predSenId].token[predSpanStart:predSpanEnd]
predStr = ReconstructStrFromSpan(tokens, (0, len(tokens) ) )
ansList.append(predStr)
return ansList
# for serializing complex results
def ObjDict(obj):
return obj.__dict__
# display proto tokens
def PrintProtoToken(tokens):
print([t.word for t in tokens])
# remove the and . from tokens
def StandarizeToken(tokens):
if tokens[-1].word == ".":
tokens = tokens[:-1]
if len(tokens) > 0 and (tokens[0].word == "The" or tokens[0].word == "the"):
tokens = tokens[1:]
return tokens
def GetLongestCommonSubList(s1, s2):
m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
longest, x_longest = 0, 0
for x in xrange(1, 1 + len(s1)):
for y in xrange(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def UnkrizeData(data, rate, padId, unkId):
'''
artificially set non-<pad> tokens to <unk>. The portion of
the artificial <unk> is indicated by rate.
'''
mask = np.random.uniform(low=0.0, high=1.0, size=data.shape)
mask = np.logical_and( (data != padId), (mask >= (1 - rate) ) )
data[mask] = unkId
return data
| 32.605744
| 119
| 0.596493
| 1,463
| 12,488
| 5.084074
| 0.255639
| 0.043022
| 0.044367
| 0.017478
| 0.1452
| 0.080936
| 0.070449
| 0.051089
| 0.045577
| 0.045577
| 0
| 0.01018
| 0.252723
| 12,488
| 382
| 120
| 32.691099
| 0.786862
| 0.170964
| 0
| 0.103734
| 0
| 0.004149
| 0.092795
| 0.025141
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087137
| false
| 0
| 0.033195
| 0.004149
| 0.20332
| 0.004149
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47f00c4575c588196fb02578a13c75df9196c8ba
| 476
|
py
|
Python
|
super_nft/blueprints/datasprint/datasprint.py
|
Blockchain-Key/Super-NFT
|
3983621127636bf9d4da740a5ac60451a3e5bbe8
|
[
"MIT"
] | 5
|
2021-05-02T00:06:41.000Z
|
2021-11-30T10:34:08.000Z
|
super_nft/blueprints/datasprint/datasprint.py
|
Blockchain-Key/Super-NFT
|
3983621127636bf9d4da740a5ac60451a3e5bbe8
|
[
"MIT"
] | 3
|
2021-05-06T09:31:49.000Z
|
2021-05-11T05:14:32.000Z
|
super_nft/blueprints/datasprint/datasprint.py
|
Blockchain-Key/Super-NFT
|
3983621127636bf9d4da740a5ac60451a3e5bbe8
|
[
"MIT"
] | 1
|
2021-05-06T15:34:24.000Z
|
2021-05-06T15:34:24.000Z
|
# -*- coding: utf-8 -*-
"""User views."""
from flask import Blueprint, render_template, jsonify
from flask_login import login_required
from super_nft.extensions import csrf_protect
datasprint_bp = Blueprint("datasprint", __name__, url_prefix="/datasprint", static_folder="../static")
@csrf_protect.exempt
@datasprint_bp.route("/", methods=["GET", "POST"])
def index():
data = {
"result": "hello, world",
"code": "200"
}
return jsonify(data), 200
| 28
| 102
| 0.682773
| 57
| 476
| 5.45614
| 0.701754
| 0.057878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017456
| 0.157563
| 476
| 16
| 103
| 29.75
| 0.758105
| 0.071429
| 0
| 0
| 0
| 0
| 0.144495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.416667
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47f0f24b25872b88a91afd63b72991904ea663bc
| 658
|
py
|
Python
|
python/AULAS/aula20.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
python/AULAS/aula20.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
python/AULAS/aula20.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
def linhas(cor, txt):
if pintar in cores:
print(cores[pintar])
print(txt)
cores = {'vermelho': '\033[31m',
'azul': '\033[34m',
'amarelo': '\033[33m',
'branco': '\033[30m',
'roxo': '\033[35m',
'verde': '\033[32m',
'ciano': '\033[36m',
'limpa': '\033[m',
'preto e branco': '\033[7;30;m'}
pintar = str(input('Deseja pintar o seu texto com qual cor? ')).lower()
while pintar not in cores:
pintar = str(input('Erro! Essa cor não existe. Tente novamente: '))
if pintar in cores:
break
texto = str(input('Digite seu texto: '))
linhas(cor=pintar, txt=texto)
| 26.32
| 71
| 0.542553
| 88
| 658
| 4.056818
| 0.556818
| 0.058824
| 0.056022
| 0.084034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091667
| 0.270517
| 658
| 24
| 72
| 27.416667
| 0.652083
| 0
| 0
| 0.1
| 0
| 0
| 0.354642
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0
| 0
| 0.05
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47f182f38e59b731af6d6326b1c317ab14b2b7e5
| 992
|
py
|
Python
|
FatherSon/HelloWorld2_source_code/Listing_20-2.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | 1
|
2019-01-04T05:47:50.000Z
|
2019-01-04T05:47:50.000Z
|
FatherSon/HelloWorld2_source_code/Listing_20-2.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | null | null | null |
FatherSon/HelloWorld2_source_code/Listing_20-2.py
|
axetang/AxePython
|
3b517fa3123ce2e939680ad1ae14f7e602d446a6
|
[
"Apache-2.0"
] | null | null | null |
# Listing_20-2.py
# Copyright Warren & Csrter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Adding an event handler for the button
import sys
from PyQt4 import QtCore, QtGui, uic
form_class = uic.loadUiType("MyFirstGui.ui")[0]
# Class definition for the main window
class MyWindowClass(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.pushButton.clicked.connect(self.button_clicked) # connect the event handler
# the event handler for the button click
def button_clicked(self):
x = self.pushButton.x()
y = self.pushButton.y()
x += 50
y += 50
self.pushButton.move(x, y) # Move the button when we click it
app = QtGui.QApplication(sys.argv)
myWindow = MyWindowClass()
myWindow.show()
app.exec_()
| 31
| 90
| 0.654234
| 127
| 992
| 5
| 0.535433
| 0.088189
| 0.047244
| 0.056693
| 0.075591
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016861
| 0.222782
| 992
| 31
| 91
| 32
| 0.806744
| 0.356855
| 0
| 0
| 0
| 0
| 0.021776
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47f231b8a668477769e2a9abd3723ae4eedc3e54
| 1,072
|
py
|
Python
|
raspberry/serial_stub.py
|
idf/Robot-In-Maze
|
2301021c39f36a01ff97af26c54d41fedbe1608c
|
[
"MIT"
] | 16
|
2015-04-04T15:26:01.000Z
|
2019-10-15T16:13:03.000Z
|
raspberry/serial_stub.py
|
idf/Robot-In-Maze
|
2301021c39f36a01ff97af26c54d41fedbe1608c
|
[
"MIT"
] | null | null | null |
raspberry/serial_stub.py
|
idf/Robot-In-Maze
|
2301021c39f36a01ff97af26c54d41fedbe1608c
|
[
"MIT"
] | 7
|
2015-10-12T21:23:12.000Z
|
2021-10-13T02:41:25.000Z
|
from serial_comminication import *
from utils.decorators import Override
__author__ = 'Danyang'
class SerialAPIStub(SerialAPI):
@Override(SerialAPI)
def __init__(self):
super(SerialAPIStub, self).__init__(production=False)
@Override(SerialAPI)
def command_put(self, function, parameter):
if function==10:
self.responses_outgoing.put([False, SENSOR, json.dumps({
"sensors": [{"sensor": 0, "value": 20}, {"sensor": 1, "value": 20}, {"sensor": 2, "value": 40},
{"sensor": 10, "value": 10}, {"sensor": 11, "value": 30}, {"sensor": 12, "value": 30}]})])
self.responses_outgoing.put([True, FUNCTION, json.dumps({"function": function, "status": 200})])
else:
self.responses_outgoing.put([True, FUNCTION, json.dumps({"function": function, "status": 200})])
@Override(SerialAPI)
def response_pop(self):
"""
:return: [ack, type_data, data] : [bool, int, json_str]
"""
return super(SerialAPIStub, self).response_pop()
| 38.285714
| 118
| 0.602612
| 115
| 1,072
| 5.434783
| 0.452174
| 0.0816
| 0.096
| 0.1152
| 0.224
| 0.224
| 0.224
| 0.224
| 0.224
| 0.224
| 0
| 0.035152
| 0.23041
| 1,072
| 27
| 119
| 39.703704
| 0.722424
| 0.051306
| 0
| 0.263158
| 0
| 0
| 0.108871
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.105263
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47f2d05914db9e80d9759d21867bc5761abeee91
| 1,550
|
py
|
Python
|
algorithms/counting_sort.py
|
ArziPL/Other
|
1319ac85b19a5c49fb70e902e3e37f2e7a192d0b
|
[
"MIT"
] | null | null | null |
algorithms/counting_sort.py
|
ArziPL/Other
|
1319ac85b19a5c49fb70e902e3e37f2e7a192d0b
|
[
"MIT"
] | null | null | null |
algorithms/counting_sort.py
|
ArziPL/Other
|
1319ac85b19a5c49fb70e902e3e37f2e7a192d0b
|
[
"MIT"
] | null | null | null |
# Best : O(n + k)
# Avg : O(n + k)
# Worst O(n + k)
# Space worst : O(k) - CAN GET VERY BIG BIG
# k - range of values in array
# Take every number in arr then add += 1 to index of that number in temporary arrays, then
# for every index in temporary arrays add to final_arr that amount of that index number of
# how big number at that index is - if arr[23] = 3 then add 23 23 23 and same for negatives
# 1. The whole thing can get very ineffective if numbers in arr are big
# 2. Possibility of sorting negatives greatly increase time/space complexity
# create array len of min(to_sort), do the same as positives, then for final result multiply by -1 to get negatives
# and reverse them because we were counting them as positive => [1,5,10] => [-1,-5,-10] => [-10,-5,-1] and
# add that array at beginning of positive_sorted
to_sort = [52, 63, 12, 6, 631, 6, 24, 637,
64, 421, 74, 124, 0, -5, 523, -10, -529]
def counting_sort(arr: list):
positive_list = [0] * (max(arr)+1)
negative_list = [0] * (-1*(min(arr)-1))
final_positive = []
final_negative = []
for i in arr:
if i < 0:
ti = -i
negative_list[ti] += 1
else:
positive_list[i] += 1
for inx, i in enumerate(positive_list):
final_positive.append(i*[inx])
for inx, i in enumerate(negative_list):
final_negative.append(i*[-inx])
final_negative.reverse()
return [num for sublist in final_negative + final_positive for num in sublist]
print(counting_sort(to_sort))
| 34.444444
| 119
| 0.642581
| 259
| 1,550
| 3.76834
| 0.393822
| 0.053279
| 0.009221
| 0.018443
| 0.036885
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059329
| 0.249677
| 1,550
| 44
| 120
| 35.227273
| 0.77988
| 0.52129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0
| 0
| 0.1
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47f4980d53b9e0ce1e873da3c9bbca1b3052a8de
| 5,881
|
py
|
Python
|
scheduler/notebooks/figures/evaluation/utils.py
|
akshayka/gavel
|
40a22a725f2e70478483e98c9b07c6fc588e0c40
|
[
"MIT"
] | 67
|
2020-09-07T11:50:03.000Z
|
2022-03-31T04:09:08.000Z
|
scheduler/notebooks/figures/evaluation/utils.py
|
akshayka/gavel
|
40a22a725f2e70478483e98c9b07c6fc588e0c40
|
[
"MIT"
] | 7
|
2020-09-27T01:41:59.000Z
|
2022-03-25T05:16:43.000Z
|
scheduler/notebooks/figures/evaluation/utils.py
|
akshayka/gavel
|
40a22a725f2e70478483e98c9b07c6fc588e0c40
|
[
"MIT"
] | 12
|
2020-10-13T14:31:01.000Z
|
2022-02-14T05:44:38.000Z
|
import os
import random
import re
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import sys; sys.path.append("../../..")
from job_table import JobTable
def get_logfile_paths_helper(directory_name):
logfile_paths = []
for root, _, file_names in os.walk(directory_name):
if len(file_names) > 0:
logfile_paths.extend(
[os.path.join(root, file_name)
for file_name in file_names])
return logfile_paths
def get_logfile_paths(directory_name, static_trace=False):
logfile_paths = []
for logfile_path in get_logfile_paths_helper(directory_name):
if static_trace:
m = re.match(
r'.*v100=(\d+)\.p100=(\d+)\.k80=(\d+)/(.*)/seed=(\d+)/'
'num_total_jobs=(\d+)\.log', logfile_path)
else:
m = re.match(
r'.*v100=(\d+)\.p100=(\d+)\.k80=(\d+)/(.*)/seed=(\d+)/'
'lambda=(\d+\.\d+)\.log', logfile_path)
if m is None: continue
v100s = int(m.group(1))
p100s = int(m.group(2))
k80s = int(m.group(3))
policy = m.group(4)
seed = int(m.group(5))
lambda_or_num_total_jobs = float(m.group(6))
logfile_paths.append((v100s, p100s, k80s, policy, seed,
lambda_or_num_total_jobs, logfile_path))
return logfile_paths
def prune(logfile_paths, v100s, p100s, k80s, policy, seed=None):
if seed is None:
return sorted([(x[5], x[6], x[4]) for x in logfile_paths
if x[0] == v100s and x[1] == p100s and
x[2] == k80s and x[3] == policy])
else:
return sorted([(x[5], x[6]) for x in logfile_paths
if x[0] == v100s and x[1] == p100s and
x[2] == k80s and x[3] == policy and
x[4] == seed])
def average_jct_fn(logfile_path, min_job_id=None, max_job_id=None):
job_completion_times = []
with open(logfile_path, 'r') as f:
lines = f.readlines()
for line in lines[-10000:]:
m = re.match(r'Job (\d+): (\d+\.\d+)', line)
if m is not None:
job_id = int(m.group(1))
job_completion_time = float(m.group(2))
if min_job_id is None or min_job_id <= job_id:
if max_job_id is None or job_id <= max_job_id:
job_completion_times.append(
job_completion_time)
if len(job_completion_times) == 0:
return None
return np.mean(job_completion_times) / 3600
def average_jct_low_priority_fn(logfile_path, min_job_id=None,
max_job_id=None):
job_completion_times = []
with open(logfile_path, 'rb') as f:
f.seek(-8192, os.SEEK_END)
text = f.read().decode('utf-8')
lines = text.split('\n')
for line in lines[-5:]:
m = re.match(r'Average job completion time \(low priority\): (\d+\.\d+) seconds', line)
if m is not None:
return float(m.group(1)) / 3600
return None
def average_jct_high_priority_fn(logfile_path, min_job_id=None,
max_job_id=None):
job_completion_times = []
with open(logfile_path, 'rb') as f:
f.seek(-8192, os.SEEK_END)
text = f.read().decode('utf-8')
lines = text.split('\n')
for line in lines[-5:]:
m = re.match(r'Average job completion time \(high priority\): (\d+\.\d+) seconds', line)
if m is not None:
return float(m.group(1)) / 3600
return None
def makespan_fn(logfile_path):
job_completion_times = []
with open(logfile_path, 'r') as f:
lines = f.readlines()
for line in lines[-10000:]:
m = re.match(r'Total duration: (\d+\.\d+) seconds', line)
if m is not None:
makespan = float(m.group(1)) / 3600.
return makespan
return None
def get_job_durations(seed, generate_multigpu_jobs):
job_generator = random.Random()
job_generator.seed(seed+2)
job_durations = []
for i in range(5000):
r = job_generator.uniform(0, 1)
scale_factor = 1
if 0.7 <= r <= 0.8:
scale_factor = 2
elif 0.8 <= r <= 0.95:
scale_factor = 4
elif 0.95 <= r:
scale_factor = 8
if not generate_multigpu_jobs:
scale_factor = 1
if job_generator.random() >= 0.8:
job_duration = 60 * (10 ** job_generator.uniform(3, 4))
else:
job_duration = 60 * (10 ** job_generator.uniform(1.5, 3))
while True:
job_template = job_generator.choice(JobTable)
if (scale_factor == 1 or
(scale_factor > 1 and job_template.distributed)):
break
job_durations.append((job_duration, job_template, scale_factor))
return job_durations
def get_jcts(logfile_path, seed, min_job_id=None, max_job_id=None):
job_completion_times = []
job_durations = get_job_durations(seed, generate_multigpu_jobs=True)
with open(logfile_path, 'r') as f:
lines = f.readlines()
for line in lines[-10000:]:
m = re.match(r'Job (\d+): (\d+\.\d+)', line)
if m is not None:
job_id = int(m.group(1))
job_completion_time = float(m.group(2))
if min_job_id is None or min_job_id <= job_id:
if max_job_id is None or job_id <= max_job_id:
job_duration, job_template, scale_factor = job_durations[job_id]
job_completion_times.append(
(job_completion_time, job_duration))
return [(x[0] / 3600.0, x[1] / 3600.0) for x in job_completion_times]
| 38.188312
| 100
| 0.550757
| 814
| 5,881
| 3.762899
| 0.163391
| 0.037545
| 0.058766
| 0.020568
| 0.582109
| 0.553379
| 0.492981
| 0.445315
| 0.445315
| 0.409729
| 0
| 0.045798
| 0.324265
| 5,881
| 153
| 101
| 38.437909
| 0.724962
| 0
| 0
| 0.463768
| 0
| 0
| 0.065465
| 0.025676
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.043478
| 0
| 0.210145
| 0.007246
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47f4fb021dc13ce9ce0d5ff354639ce8927eaf9b
| 883
|
py
|
Python
|
scripts/practice/FB-reRun/ MoveZeroesToEnd.py
|
bhimeshchauhan/competitive_programming
|
e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5
|
[
"MIT"
] | null | null | null |
scripts/practice/FB-reRun/ MoveZeroesToEnd.py
|
bhimeshchauhan/competitive_programming
|
e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5
|
[
"MIT"
] | 8
|
2020-09-05T16:04:31.000Z
|
2022-02-27T09:57:51.000Z
|
scripts/practice/FB-reRun/ MoveZeroesToEnd.py
|
bhimeshchauhan/competitive_programming
|
e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5
|
[
"MIT"
] | null | null | null |
"""
Move Zeroes - https://leetcode.com/problems/move-zeroes/
Given an integer array nums, move all 0's to the end of it while maintaining the relative order of the non-zero elements.
Note that you must do this in-place without making a copy of the array.
Example 1:
Input: nums = [0,1,0,3,12]
Output: [1,3,12,0,0]
Example 2:
Input: nums = [0]
Output: [0]
Constraints:
1 <= nums.length <= 104
-231 <= nums[i] <= 231 - 1
Follow up: Could you minimize the total number of operations done?
"""
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
pos = 0
for idx, val in enumerate(nums):
if val != 0:
if pos!= idx:
nums[idx],nums[pos] = nums[pos],nums[idx]
pos +=1
return
| 20.534884
| 121
| 0.583239
| 133
| 883
| 3.87218
| 0.578947
| 0.038835
| 0.038835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050081
| 0.298981
| 883
| 42
| 122
| 21.02381
| 0.781906
| 0.621744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47f6c684577e0c1a7c425c6ef180e8ab456e667e
| 1,503
|
py
|
Python
|
django_stormpath/id_site.py
|
stormpath/stormpath-django
|
af60eb5da2115d94ac313613c5d4e6b9f3d16157
|
[
"Apache-2.0"
] | 36
|
2015-01-13T00:21:07.000Z
|
2017-11-07T11:45:25.000Z
|
django_stormpath/id_site.py
|
stormpath/stormpath-django
|
af60eb5da2115d94ac313613c5d4e6b9f3d16157
|
[
"Apache-2.0"
] | 55
|
2015-01-07T09:53:50.000Z
|
2017-02-07T00:31:20.000Z
|
django_stormpath/id_site.py
|
stormpath/stormpath-django
|
af60eb5da2115d94ac313613c5d4e6b9f3d16157
|
[
"Apache-2.0"
] | 24
|
2015-01-06T16:17:33.000Z
|
2017-04-21T14:00:16.000Z
|
from django.contrib.auth import login as django_login
from django.contrib.auth import logout as django_logout
from django.http import HttpResponseRedirect
from django.shortcuts import resolve_url
from django.conf import settings
from .backends import StormpathIdSiteBackend
ID_SITE_STATUS_AUTHENTICATED = 'AUTHENTICATED'
ID_SITE_STATUS_LOGOUT = 'LOGOUT'
ID_SITE_STATUS_REGISTERED = 'REGISTERED'
ID_SITE_AUTH_BACKEND = 'django_stormpath.backends.StormpathIdSiteBackend'
def _get_django_user(account):
backend = StormpathIdSiteBackend()
return backend.authenticate(account=account)
def _handle_authenticated(request, id_site_response):
user = _get_django_user(id_site_response.account)
user.backend = ID_SITE_AUTH_BACKEND
django_login(request, user)
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return HttpResponseRedirect(redirect_to)
def _handle_logout(request, id_site_response):
django_logout(request)
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return HttpResponseRedirect(redirect_to)
_handle_registered = _handle_authenticated
def handle_id_site_callback(request, id_site_response):
if id_site_response:
action = CALLBACK_ACTIONS[id_site_response.status]
return action(request, id_site_response)
else:
return None
CALLBACK_ACTIONS = {
ID_SITE_STATUS_AUTHENTICATED: _handle_authenticated,
ID_SITE_STATUS_LOGOUT: _handle_logout,
ID_SITE_STATUS_REGISTERED: _handle_registered,
}
| 28.358491
| 73
| 0.809714
| 185
| 1,503
| 6.151351
| 0.221622
| 0.084359
| 0.086116
| 0.073814
| 0.332162
| 0.140598
| 0.140598
| 0.140598
| 0.140598
| 0.140598
| 0
| 0
| 0.133067
| 1,503
| 52
| 74
| 28.903846
| 0.873369
| 0
| 0
| 0.114286
| 0
| 0
| 0.051231
| 0.031936
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.171429
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47f782c40ce2bf55510e810deac00bf9b89ac029
| 445
|
py
|
Python
|
dp/sequence/perfect-square.py
|
windowssocket/py_leetcode
|
241dbf8d7dab7db5215c2526321fcdb378b45492
|
[
"Apache-2.0"
] | 3
|
2018-05-29T02:29:40.000Z
|
2020-02-05T03:28:16.000Z
|
dp/sequence/perfect-square.py
|
xidongc/py_leetcode
|
241dbf8d7dab7db5215c2526321fcdb378b45492
|
[
"Apache-2.0"
] | 1
|
2019-03-08T13:22:32.000Z
|
2019-03-08T13:22:32.000Z
|
dp/sequence/perfect-square.py
|
xidongc/py_leetcode
|
241dbf8d7dab7db5215c2526321fcdb378b45492
|
[
"Apache-2.0"
] | 3
|
2018-05-29T11:50:24.000Z
|
2018-11-27T12:31:01.000Z
|
# https://leetcode.com/problems/perfect-squares/description/
# dp alg, time complexity: O(n^2)
class Solution(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
dp = [n for _ in range(n+1)]
dp[0] = 0
for i in range(1, n+1):
j = 1
while i-j*j >= 0:
dp[i] = min(dp[i], dp[i-j*j] + 1)
j += 1
return dp[n]
| 22.25
| 60
| 0.438202
| 65
| 445
| 2.984615
| 0.507692
| 0.030928
| 0.030928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037736
| 0.404494
| 445
| 19
| 61
| 23.421053
| 0.69434
| 0.260674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47f8383750414c949b888bd3081dff4a804800b1
| 1,416
|
py
|
Python
|
Projetos_Pessoais/projeto_sorteio/sorteio.py
|
thiagomath/Python
|
dd73154e347c75a65a74e047ba880cc1f7dc1f91
|
[
"MIT"
] | null | null | null |
Projetos_Pessoais/projeto_sorteio/sorteio.py
|
thiagomath/Python
|
dd73154e347c75a65a74e047ba880cc1f7dc1f91
|
[
"MIT"
] | null | null | null |
Projetos_Pessoais/projeto_sorteio/sorteio.py
|
thiagomath/Python
|
dd73154e347c75a65a74e047ba880cc1f7dc1f91
|
[
"MIT"
] | null | null | null |
# Programa para sorteio
from tkinter import *
'''import PySimpleGUI as sg'''
'''
#Layout
layout = [
[sg.Text('Nome:'), sg.Input()],
[sg.Button('OK')]
]
#Janela
janela = sg.Window('Janela teste', layout)
#Interação
eventos, valores = janela.Read()
#Mensagem
print(f'Olá {valores[0]}, obrigado por usar PySimpleGUI!')
#Encerramento da janela
janela.close()
'''
'''
cont = 0
participantes = dict()
for cont in range(0, 2):
participantes["nome"] = str(input('Digite o nome do participante: '))
participantes["numero"] = int(input('Digite o número do participante: '))
cont += 1
print(f'{cont} pessoas concorrendo ao sorteio!')
print(participantes)
'''
'''theme_name_list = sg.theme_list()
print(theme_name_list)'''
def pegar_cotacoes():
texto = 'xxx'
texto_cotacoes["text"] = texto
# Sempre inicia com:
janela = Tk()
janela.title('Sorteio T-force')
janela.geometry("400x400")
# Texto de orientação:
texto_de_orientacao = Label(janela, text='Clique no botão para ver as cotações das moedas')
# Posição do texto:
texto_de_orientacao.grid(column=0, row=0, padx=10, pady=10)
# Botão + função
botao = Button(janela, text="Buscar cotações Dólar, Euro e BTC", command=pegar_cotacoes)
botao.grid(column=0, row=1, padx=10, pady=10)
# Texto das cotações:
texto_cotacoes = Label(janela, text="")
texto_cotacoes.grid(column=0, row=2, padx=10, pady=10)
# Sempre termina com:
janela.mainloop()
| 24.413793
| 91
| 0.699859
| 200
| 1,416
| 4.885
| 0.485
| 0.039918
| 0.033777
| 0.042989
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023908
| 0.143362
| 1,416
| 57
| 92
| 24.842105
| 0.781533
| 0.094633
| 0
| 0
| 0
| 0
| 0.186325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47fa30bd997d1a1670c7fee27600bd53764e519c
| 481
|
py
|
Python
|
flask_tutorial/flask_sqlite3/__init__.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 1
|
2018-12-19T22:07:56.000Z
|
2018-12-19T22:07:56.000Z
|
flask_tutorial/flask_sqlite3/__init__.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 12
|
2020-03-14T05:32:26.000Z
|
2022-03-12T00:08:49.000Z
|
flask_tutorial/flask_sqlite3/__init__.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 1
|
2018-12-19T22:08:00.000Z
|
2018-12-19T22:08:00.000Z
|
"""
@author: magician
@file: __init__.py.py
@date: 2020/9/7
"""
from flask import Flask
from flask_tutorial.flask_sqlite3.flask_sqlite3 import SQLite3
app = Flask(__name__)
app.config.from_pyfile('the-config.cfg')
db = SQLite3(app)
@app.route('/')
def show_all():
"""
show_all
@return:
"""
# cur = db.connection.cursor()
# cur.execute('SELECT 1=1')
with app.app_context():
cur = db.connection.cursor()
cur.execute('SELECT 1=1')
| 17.814815
| 62
| 0.636175
| 66
| 481
| 4.409091
| 0.515152
| 0.061856
| 0.103093
| 0.14433
| 0.268041
| 0.268041
| 0.268041
| 0.268041
| 0.268041
| 0
| 0
| 0.036458
| 0.201663
| 481
| 26
| 63
| 18.5
| 0.721354
| 0.276507
| 0
| 0
| 0
| 0
| 0.078125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47fd0f1fa3538b0a659731489a61f441085833ad
| 516
|
py
|
Python
|
str_to_ num.py
|
maiconloure/Learning_Python
|
2999508909ace5f8ca0708cdea93b82abaaeafb2
|
[
"MIT"
] | null | null | null |
str_to_ num.py
|
maiconloure/Learning_Python
|
2999508909ace5f8ca0708cdea93b82abaaeafb2
|
[
"MIT"
] | null | null | null |
str_to_ num.py
|
maiconloure/Learning_Python
|
2999508909ace5f8ca0708cdea93b82abaaeafb2
|
[
"MIT"
] | null | null | null |
"""Transformando um string de numeros, em uma lista
com conjunto de numeros separads por \n"""
matrix = "1 2 3 4\n4 5 6 5\n7 8 9 6\n8 7 6 7"
print(matrix)
matrix = matrix.split("\n")
print(matrix)
matrix2 = []
for n in range(len(matrix)):
matrix[n] = matrix[n].split()
matrix[n] = list(map(int, matrix[n])) # EX: Tranforma a string '1' em um inteiro
matrix2.append(matrix[n][0]) # Pegar o primeiro elemento/numero de cada indice
for index in range(4):
print(matrix[index])
print()
print(matrix2)
| 28.666667
| 85
| 0.672481
| 91
| 516
| 3.813187
| 0.549451
| 0.100865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052133
| 0.182171
| 516
| 18
| 86
| 28.666667
| 0.770142
| 0.341085
| 0
| 0.153846
| 0
| 0
| 0.108434
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.384615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
47ff4464ecaa8b0b0480823b9cf4bf43b54abcec
| 3,520
|
py
|
Python
|
pdf_poc/search.py
|
cr0hn/TestingBench
|
37975343cf9ccb019e8dc42404b5b321285b04b3
|
[
"BSD-3-Clause"
] | 5
|
2018-05-10T19:50:29.000Z
|
2018-05-10T20:07:08.000Z
|
pdf_poc/search.py
|
cr0hn/TestingBench
|
37975343cf9ccb019e8dc42404b5b321285b04b3
|
[
"BSD-3-Clause"
] | null | null | null |
pdf_poc/search.py
|
cr0hn/TestingBench
|
37975343cf9ccb019e8dc42404b5b321285b04b3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from collections import defaultdict
from PyPDF2 import PdfFileReader
from PyPDF2.pdf import PageObject, ContentStream, TextStringObject, u_, i, b_
def is_continuation(content, item):
if content.operations[item - 1][1] == b_("Tm"):
# Search previous "Tm"
for bef in range(-2, -15, -1):
try:
if content.operations[item - bef][1] == b_("Tm"):
prev_val = content.operations[item - bef][0]
break
except IndexError:
return False
else:
return False
key_1_preve = '{0:.5f}'.format(prev_val[4]).split(".")[1]
key_2_preve = '{0:.5f}'.format(prev_val[5]).split(".")[1]
prev_curr = content.operations[item - 1][0]
key_1_curr = '{0:.5f}'.format(prev_curr[4]).split(".")[1]
key_2_curr = '{0:.5f}'.format(prev_curr[5]).split(".")[1]
# if key_1_curr != key_1_preve or key_2_curr != key_2_preve:
if key_1_curr == key_1_preve:
return True
return False
def is_header(content, item):
if content.operations[item - 1][1] == b_("Td"):
return True
elif content.operations[item - 1][1] == b_("Tm") and \
content.operations[item - 2][1] == b_("Tf"):
if content.operations[item - 3][1] == b_("BT") or \
content.operations[item - 3][1] == b_("scn"):
return True
else:
return False
else:
return False
def extractText_with_separator(self, remove_headers=False):
text = u_("")
content = self["/Contents"].getObject()
if not isinstance(content, ContentStream):
content = ContentStream(content, self.pdf)
# Note: we check all strings are TextStringObjects. ByteStringObjects
# are strings where the byte->string encoding was unknown, so adding
# them to the text here would be gibberish.
for item, (operands, operator) in enumerate(content.operations):
if operator == b_("Tj"):
# Skip headers?
if is_header(content, item):
continue
if not is_continuation(content, item):
text += "\n"
_text = operands[0]
if isinstance(_text, TextStringObject):
text += _text
elif operator == b_("T*"):
text += "\n"
elif operator == b_("'"):
text += "\n"
_text = operands[0]
if isinstance(_text, TextStringObject):
text += operands[0]
elif operator == b_('"'):
_text = operands[2]
if isinstance(_text, TextStringObject):
text += "\n"
text += _text
elif operator == b_("TJ"):
# Skip headers?
if is_header(content, item):
continue
if not is_continuation(content, item):
text += "\n"
for i in operands[0]:
if isinstance(i, TextStringObject):
text += i
# text += "\n"
return text
PageObject.extractText_with_separator = extractText_with_separator
KEYWORDS = ["procesos electorales"]
def find_in_pdf(pdf_path, keywords):
"""
Try to find a word list into pdf file.
.. note:
The line number is approximately, not exactly.
:param pdf_path: path to pdf
:type pdf_path: str
:param keywords: list of keyword to search
:type keywords: list(str)
:return: a structure like this: { PAGE_NUM: { LINE_NUM: TEXT_OF_LINE}
:rtype: dict(str: dict(int: str))
"""
pdf = PdfFileReader(open(pdf_path, 'rb'))
matches = defaultdict(dict)
for page_no, page in enumerate(pdf.pages, 1):
text = page.extractText_with_separator()
line_no = 1
# search
for keyword in keywords:
for line in text.split("\n"):
if not line:
continue
line_no += 1
if keyword in line.lower():
matches["page_%s" % page_no][line_no] = line
return matches
if __name__ == '__main__':
r = find_in_pdf("BOE.pdf", KEYWORDS)
print(r)
| 23.311258
| 77
| 0.658523
| 501
| 3,520
| 4.447106
| 0.283433
| 0.076302
| 0.084829
| 0.041293
| 0.307899
| 0.245512
| 0.186266
| 0.156643
| 0.156643
| 0.123429
| 0
| 0.02012
| 0.19517
| 3,520
| 151
| 78
| 23.311258
| 0.766325
| 0.186932
| 0
| 0.340909
| 0
| 0
| 0.042209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.034091
| 0
| 0.193182
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a0316a49bbe3e0c8ccbf65e47f3d0ad6d7d1eaf
| 6,299
|
py
|
Python
|
src/folio_migration_tools/folder_structure.py
|
chadmcinnis/folio_migration_tools
|
39ee044a713a34c323324a956e3e8b54ee05c194
|
[
"MIT"
] | 1
|
2022-03-30T07:48:33.000Z
|
2022-03-30T07:48:33.000Z
|
src/folio_migration_tools/folder_structure.py
|
chadmcinnis/folio_migration_tools
|
39ee044a713a34c323324a956e3e8b54ee05c194
|
[
"MIT"
] | 76
|
2022-02-04T16:36:49.000Z
|
2022-03-31T11:20:29.000Z
|
src/folio_migration_tools/folder_structure.py
|
chadmcinnis/folio_migration_tools
|
39ee044a713a34c323324a956e3e8b54ee05c194
|
[
"MIT"
] | 1
|
2022-02-02T17:19:05.000Z
|
2022-02-02T17:19:05.000Z
|
import logging
import sys
from pathlib import Path
import time
from folio_uuid.folio_namespaces import FOLIONamespaces
class FolderStructure:
def __init__(
self,
base_path: Path,
object_type: FOLIONamespaces,
migration_task_name: str,
iteration_identifier: str,
add_time_stamp_to_file_names: bool,
):
logging.info("Setting up folder structure")
self.object_type: FOLIONamespaces = object_type
self.migration_task_name = migration_task_name
self.add_time_stamp_to_file_names = add_time_stamp_to_file_names
self.iteration_identifier = iteration_identifier
self.base_folder = Path(base_path)
if not self.base_folder.is_dir():
logging.critical("Base Folder Path is not a folder. Exiting.")
sys.exit(1)
self.data_folder = self.base_folder / "data"
verify_folder(self.data_folder)
verify_folder(self.data_folder / str(FOLIONamespaces.instances.name).lower())
verify_folder(self.data_folder / str(FOLIONamespaces.holdings.name).lower())
verify_folder(self.data_folder / str(FOLIONamespaces.items.name).lower())
verify_folder(self.data_folder / str(FOLIONamespaces.users.name).lower())
self.archive_folder = self.base_folder / "archive"
verify_folder(self.data_folder)
self.results_folder = self.base_folder / "results"
verify_folder(self.results_folder)
self.reports_folder = self.base_folder / "reports"
verify_folder(self.reports_folder)
self.mapping_files_folder = self.base_folder / "mapping_files"
verify_folder(self.mapping_files_folder)
gitignore = self.base_folder / ".gitignore"
verify_git_ignore(gitignore)
def log_folder_structure(self):
logging.info("Mapping files folder is %s", self.mapping_files_folder)
logging.info("Git ignore is set up correctly")
logging.info("Base folder is %s", self.base_folder)
logging.info("Reports and logs folder is %s", self.reports_folder)
logging.info("Results folder is %s", self.results_folder)
logging.info("Data folder is %s", self.data_folder)
logging.info("Source records files folder is %s", self.legacy_records_folder)
logging.info("Log file will be located at %s", self.transformation_log_path)
logging.info("Extra data will be stored at%s", self.transformation_extra_data_path)
logging.info("Data issue reports %s", self.data_issue_file_path)
logging.info("Created objects will be stored at %s", self.created_objects_path)
logging.info("Migration report file will be saved at %s", self.migration_reports_file)
def setup_migration_file_structure(self, source_file_type: str = ""):
time_stamp = f'_{time.strftime("%Y%m%d-%H%M%S")}'
time_str = time_stamp if self.add_time_stamp_to_file_names else ""
file_template = f"{self.iteration_identifier}{time_str}_{self.migration_task_name}"
object_type_string = str(self.object_type.name).lower()
if source_file_type:
self.legacy_records_folder = self.data_folder / source_file_type
elif self.object_type == FOLIONamespaces.other:
self.legacy_records_folder = self.data_folder
else:
self.legacy_records_folder = self.data_folder / object_type_string
verify_folder(self.legacy_records_folder)
self.transformation_log_path = self.reports_folder / (
f"log_{object_type_string}_{file_template}.log"
)
self.failed_recs_path = (
self.results_folder / f"failed_records_{file_template}_{time_stamp}.txt"
)
self.transformation_extra_data_path = (
self.results_folder / f"extradata_{file_template}.extradata"
)
self.data_issue_file_path = (
self.reports_folder / f"data_issues_log_{object_type_string}_{file_template}.tsv"
)
self.created_objects_path = (
self.results_folder / f"folio_{object_type_string}_{file_template}.json"
)
self.failed_bibs_file = (
self.results_folder / f"failed_bib_records_{self.iteration_identifier}{time_str}.mrc"
)
self.failed_mfhds_file = (
self.results_folder / f"failed_mfhd_records_{self.iteration_identifier}{time_str}.mrc"
)
self.migration_reports_file = (
self.reports_folder / f"transformation_report_{object_type_string}_{file_template}.md"
)
self.srs_records_path = (
self.results_folder / f"folio_srs_{object_type_string}_{file_template}.json"
)
self.instance_id_map_path = (
self.results_folder / f"instance_id_map_{self.iteration_identifier}.json"
)
self.holdings_id_map_path = (
self.results_folder / f"holdings_id_map_{self.iteration_identifier}.json"
)
# Mapping files
self.temp_locations_map_path = self.mapping_files_folder / "temp_locations.tsv"
self.material_type_map_path = self.mapping_files_folder / "material_types.tsv"
self.loan_type_map_path = self.mapping_files_folder / "loan_types.tsv"
self.temp_loan_type_map_path = self.mapping_files_folder / "temp_loan_types.tsv"
self.statistical_codes_map_path = self.mapping_files_folder / "statcodes.tsv"
self.item_statuses_map_path = self.mapping_files_folder / "item_statuses.tsv"
def verify_git_ignore(gitignore: Path):
with open(gitignore, "r+") as f:
contents = f.read()
if "results/" not in contents:
f.write("results/\n")
if "archive/" not in contents:
f.write("archive/\n")
if "data/" not in contents:
f.write("data/\n")
if "*.data" not in contents:
f.write("*.data\n")
logging.info("Made sure there was a valid .gitignore file at %s", gitignore)
def verify_folder(folder_path: Path):
if not folder_path.is_dir():
logging.critical("There is no folder located at %s. Exiting.", folder_path)
logging.critical("Create a folder by calling\n\tmkdir %s", folder_path)
sys.exit(1)
else:
logging.info("Located %s", folder_path)
| 42.275168
| 98
| 0.680743
| 814
| 6,299
| 4.932432
| 0.170762
| 0.054795
| 0.038356
| 0.044832
| 0.423412
| 0.290162
| 0.196015
| 0.095143
| 0.054795
| 0.015193
| 0
| 0.000409
| 0.223845
| 6,299
| 148
| 99
| 42.560811
| 0.820822
| 0.002064
| 0
| 0.04918
| 0
| 0
| 0.223584
| 0.104233
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040984
| false
| 0
| 0.040984
| 0
| 0.090164
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a03f4a30283fc5811ab209f5fab981571d780d6
| 6,064
|
py
|
Python
|
ftrl_noise.py
|
google-research/DP-FTRL
|
513500a8e31e412972a7d457e9c66756e4a48348
|
[
"Apache-2.0"
] | 8
|
2021-04-09T18:00:18.000Z
|
2022-03-11T01:13:13.000Z
|
ftrl_noise.py
|
google-research/DP-FTRL
|
513500a8e31e412972a7d457e9c66756e4a48348
|
[
"Apache-2.0"
] | 1
|
2021-08-18T04:59:42.000Z
|
2021-12-08T00:24:24.000Z
|
ftrl_noise.py
|
google-research/DP-FTRL
|
513500a8e31e412972a7d457e9c66756e4a48348
|
[
"Apache-2.0"
] | 3
|
2021-11-05T15:42:31.000Z
|
2022-03-03T07:38:46.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The tree aggregation protocol for noise addition in DP-FTRL."""
import torch
from collections import namedtuple
from absl import app
class CummuNoiseTorch:
@torch.no_grad()
def __init__(self, std, shapes, device, test_mode=False):
"""
:param std: standard deviation of the noise
:param shapes: shapes of the noise, which is basically shape of the gradients
:param device: device for pytorch tensor
:param test_mode: if in test mode, noise will be 1 in each node of the tree
"""
assert std >= 0
self.std = std
self.shapes = shapes
self.device = device
self.step = 0
self.binary = [0]
self.noise_sum = [torch.zeros(shape).to(self.device) for shape in shapes]
self.recorded = [[torch.zeros(shape).to(self.device) for shape in shapes]]
self.test_mode = test_mode
@torch.no_grad()
def __call__(self):
"""
:return: the noise to be added by DP-FTRL
"""
if self.std <= 0 and not self.test_mode:
return self.noise_sum
self.step += 1
idx = 0
while idx < len(self.binary) and self.binary[idx] == 1:
self.binary[idx] = 0
for ns, re in zip(self.noise_sum, self.recorded[idx]):
ns -= re
idx += 1
if idx >= len(self.binary):
self.binary.append(0)
self.recorded.append([torch.zeros(shape).to(self.device) for shape in self.shapes])
for shape, ns, re in zip(self.shapes, self.noise_sum, self.recorded[idx]):
if not self.test_mode:
n = torch.normal(0, self.std, shape).to(self.device)
else:
n = torch.ones(shape).to(self.device)
ns += n
re.copy_(n)
self.binary[idx] = 1
return self.noise_sum
@torch.no_grad()
def proceed_until(self, step_target):
"""
Proceed until the step_target-th step. This is for the binary tree completion trick.
:return: the noise to be added by DP-FTRL
"""
if self.step >= step_target:
raise ValueError(f'Already reached {step_target}.')
while self.step < step_target:
noise_sum = self.__call__()
return noise_sum
Element = namedtuple('Element', 'height value')
class CummuNoiseEffTorch:
"""
The tree aggregation protocol with the trick in Honaker, "Efficient Use of Differentially Private Binary Trees", 2015
"""
@torch.no_grad()
def __init__(self, std, shapes, device):
"""
:param std: standard deviation of the noise
:param shapes: shapes of the noise, which is basically shape of the gradients
:param device: device for pytorch tensor
"""
self.std = std
self.shapes = shapes
self.device = device
self.step = 0
self.noise_sum = [torch.zeros(shape).to(self.device) for shape in shapes]
self.stack = []
@torch.no_grad()
def get_noise(self):
return [torch.normal(0, self.std, shape).to(self.device) for shape in self.shapes]
@torch.no_grad()
def push(self, elem):
for i in range(len(self.shapes)):
self.noise_sum[i] += elem.value[i] / (2.0 - 1 / 2 ** elem.height)
self.stack.append(elem)
@torch.no_grad()
def pop(self):
elem = self.stack.pop()
for i in range(len(self.shapes)):
self.noise_sum[i] -= elem.value[i] / (2.0 - 1 / 2 ** elem.height)
@torch.no_grad()
def __call__(self):
"""
:return: the noise to be added by DP-FTRL
"""
self.step += 1
# add new element to the stack
self.push(Element(0, self.get_noise()))
# pop the stack
while len(self.stack) >= 2 and self.stack[-1].height == self.stack[-2].height:
# create new element
left_value, right_value = self.stack[-2].value, self.stack[-1].value
new_noise = self.get_noise()
new_elem = Element(
self.stack[-1].height + 1,
[x + (y + z) / 2 for x, y, z in zip(new_noise, left_value, right_value)])
# pop the stack, update sum
self.pop()
self.pop()
# append to the stack, update sum
self.push(new_elem)
return self.noise_sum
@torch.no_grad()
def proceed_until(self, step_target):
"""
Proceed until the step_target-th step. This is for the binary tree completion trick.
:return: the noise to be added by DP-FTRL
"""
if self.step >= step_target:
raise ValueError(f'Already reached {step_target}.')
while self.step < step_target:
noise_sum = self.__call__()
return noise_sum
def main(argv):
# This is a small test. If we set the noise in each node as 1 (by setting
# test_mode=True), we should be seeing the returned noise as the number of
# 1s in the binary representations of i when cummu_noises is called i times.
def countSetBits(n):
count = 0
while (n):
n &= (n - 1)
count += 1
return count
cummu_noises = CummuNoiseTorch(1.0, [(1,)], 'cuda', test_mode=True)
for epoch in range(31):
random_noise = cummu_noises()
assert random_noise[0].cpu().numpy()[0] == countSetBits(epoch + 1)
if __name__ == '__main__':
app.run(main)
| 32.956522
| 121
| 0.59812
| 845
| 6,064
| 4.184615
| 0.224852
| 0.029412
| 0.027998
| 0.035633
| 0.464649
| 0.444853
| 0.429581
| 0.429581
| 0.429581
| 0.369344
| 0
| 0.013596
| 0.296504
| 6,064
| 184
| 122
| 32.956522
| 0.815284
| 0.297823
| 0
| 0.411765
| 0
| 0
| 0.022614
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 1
| 0.107843
| false
| 0
| 0.029412
| 0.009804
| 0.22549
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a04e9a41ace038d4a501f35036632f201b9f71d
| 2,782
|
py
|
Python
|
ladder/tests/test_models.py
|
jzahedieh/django-tennis-ladder
|
03a9fc9ec6d0830ac1d6648428eca11755eabb00
|
[
"MIT"
] | 13
|
2015-04-30T21:07:20.000Z
|
2021-01-08T13:52:14.000Z
|
ladder/tests/test_models.py
|
jzahedieh/django-tennis-ladder
|
03a9fc9ec6d0830ac1d6648428eca11755eabb00
|
[
"MIT"
] | 13
|
2015-04-05T22:48:14.000Z
|
2021-12-12T17:29:16.000Z
|
ladder/tests/test_models.py
|
jzahedieh/django-tennis-ladder
|
03a9fc9ec6d0830ac1d6648428eca11755eabb00
|
[
"MIT"
] | 5
|
2016-10-12T16:24:09.000Z
|
2019-11-26T10:16:44.000Z
|
from django.test import TestCase
from ladder.models import Player, Result, League, Season
from django.db.models import Avg
class PlayerModelTest(TestCase):
def test_player_stats(self):
"""
Tests a player stats is calculated correctly.
"""
# fresh player test
player = Player(first_name='New', last_name='Player')
self.assertEqual(player.player_stats(), {
'played': "-",
'win_rate': "- %",
'average': "-"
})
# player with matches test
player = Player.objects.first()
stats = player.player_stats()
results = Result.objects.filter(player=player)
# assert games played is correct
games_played = results.count()
self.assertEqual(stats['played'], games_played)
# assert completion rate is correct
match_count = 0
for league in League.objects.filter(player=player):
match_count += league.ladder.league_set.count() - 1
self.assertEqual(stats['completion_rate'], "{0:.2f} %".format(games_played / match_count * 100.00))
# assert win rate is correct
won = player.result_player.filter(result=9).count()
self.assertEqual(stats['win_rate'], "{0:.2f} %".format(won / games_played * 100.00))
# assert average is correct
# two points for winning + 1 point for playing
additional_points = ((won * 2) + games_played) / games_played
average = list(player.result_player.aggregate(Avg('result')).values())[0]
average_with_additional = average + additional_points
self.assertEqual(stats['average'], "{0:.2f}".format(average_with_additional))
class SeasonModelTest(TestCase):
def test_season_stats(self):
season = Season.objects.first()
stats = season.get_stats()
player_count = 0
results_count = 0
total_games_count = 0.0
for ladder in season.ladder_set.all():
player_count += ladder.league_set.count()
results_count += ladder.result_set.count() / 2
total_games_count += (ladder.league_set.count() * (ladder.league_set.count() - 1)) / 2
# division stat assertion
self.assertEqual(stats['divisions'], season.ladder_set.count())
# perc played assertion
percentage_played = (results_count / total_games_count) * 100
self.assertEqual(stats['percentage_played'], "{0:.2f}".format(percentage_played))
# total games assertion
self.assertEqual(stats['total_games_count'], total_games_count)
# result count assertion
self.assertEqual(stats['results_count'], results_count)
# player count assertion
self.assertEqual(stats['player_count'], player_count)
| 36.605263
| 107
| 0.638749
| 321
| 2,782
| 5.35514
| 0.233645
| 0.08726
| 0.104712
| 0.046539
| 0.093077
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016206
| 0.245866
| 2,782
| 76
| 108
| 36.605263
| 0.803146
| 0.13156
| 0
| 0
| 0
| 0
| 0.074307
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 1
| 0.046512
| false
| 0
| 0.069767
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a06d9877e200d7e5cdcb16fe42f60b4884f0200
| 6,779
|
py
|
Python
|
src/authub/idp/google.py
|
fantix/authub
|
1f8a30fe32c579e556d2b962f258e0f99527a006
|
[
"BSD-3-Clause"
] | null | null | null |
src/authub/idp/google.py
|
fantix/authub
|
1f8a30fe32c579e556d2b962f258e0f99527a006
|
[
"BSD-3-Clause"
] | null | null | null |
src/authub/idp/google.py
|
fantix/authub
|
1f8a30fe32c579e556d2b962f258e0f99527a006
|
[
"BSD-3-Clause"
] | null | null | null |
"""Google OpenID Connect identity provider."""
from uuid import UUID
from fastapi import Depends, status, Request
from pydantic import BaseModel
from .base import IdPRouter, oauth
from ..http import get_edgedb_pool
from ..models import IdPClient, Identity as BaseIdentity, Href, User
from ..orm import ExtendedComputableProperty, ExclusiveConstraint, with_block
class Client(IdPClient):
client_id: str
client_secret: str
class Identity(BaseIdentity):
iss: str # "https://accounts.google.com"
azp: str # client_id
aud: str # client_id
sub: str # "112506503767939677396"
hd: str # "edgedb.com"
email: str
email_verified: bool
at_hash: str # "Gn_Xy8b7J7qdPrAPTSJxqA"
name: str
picture: str # URL
given_name: str
family_name: str
locale: str # "en"
iat: int
exp: int
access_token: str
expires_in: int
scope: str
token_type: str
id_token: str
expires_at: int
# We only need the second, refs edgedb/edgedb#1939
ExtendedComputableProperty("iss_sub", "(.iss, .sub)", exclusive=True)
ExclusiveConstraint("iss", "sub")
idp = IdPRouter("google")
class GoogleClientOut(BaseModel):
name: str
client_id: str
redirect_uri: str
@idp.get(
"/clients/{idp_client_id}",
response_model=GoogleClientOut,
responses={status.HTTP_404_NOT_FOUND: {}},
summary="Get details of the specified Google OIDC client.",
)
async def get_client(
idp_client_id: UUID, request: Request, db=Depends(get_edgedb_pool)
):
result = await db.query_one(
"""
SELECT google::Client {
name,
client_id,
} FILTER .id = <uuid>$id
""",
id=idp_client_id,
)
return GoogleClientOut(
redirect_uri=request.url_for(
f"{idp.name}.authorize", idp_client_id=idp_client_id
),
**Client.from_obj(result).dict(),
)
class GoogleClientIn(BaseModel):
name: str
client_id: str
client_secret: str
@idp.post(
"/clients",
response_model=Href,
status_code=status.HTTP_201_CREATED,
summary="Configure a new Google OIDC client.",
)
async def add_client(
client: GoogleClientIn, request: Request, db=Depends(get_edgedb_pool)
):
result = await db.query_one(
"""
INSERT google::Client {
name := <str>$name,
client_id := <str>$client_id,
client_secret := <str>$client_secret
}
""",
**client.dict(),
)
return Href(
href=request.url_for(f"{idp.name}.get_client", idp_client_id=result.id)
)
async def _get_google_client(db, idp_client_id):
try:
client = getattr(oauth, idp_client_id.hex)
except AttributeError:
result = await db.query_one(
"""
SELECT google::Client {
client_id,
client_secret,
} FILTER .id = <uuid>$id
""",
id=idp_client_id,
)
client = Client.from_obj(result)
client = oauth.register(
name=idp_client_id.hex,
server_metadata_url="https://accounts.google.com/.well-known/openid-configuration",
client_id=client.client_id,
client_secret=client.client_secret,
client_kwargs={"scope": "openid email profile"},
)
return client
@idp.get(
"/clients/{idp_client_id}/login",
summary="Login through the specified Google OIDC client.",
status_code=status.HTTP_307_TEMPORARY_REDIRECT,
)
async def login(
idp_client_id: UUID, request: Request, db=Depends(get_edgedb_pool)
):
google_client = await _get_google_client(db, idp_client_id)
return await google_client.authorize_redirect(
request,
request.url_for(f"{idp.name}.authorize", idp_client_id=idp_client_id),
)
@idp.get(
"/clients/{idp_client_id}/authorize",
summary="Google OIDC redirect URI.",
)
async def authorize(
idp_client_id: UUID, request: Request, db=Depends(get_edgedb_pool)
):
google_client = await _get_google_client(db, idp_client_id)
token = await google_client.authorize_access_token(request)
user = await google_client.parse_id_token(request, token)
identity = Identity.construct(**token, **user)
client = Client.select(filters=".id = <uuid>$client_id")
result = await db.query_one(
"SELECT ("
+ identity.insert(
user=User().insert(),
client=client,
conflict_on=".iss_sub",
conflict_else=identity.update(
exclude={"iss", "sub"}, client=client
),
)
+ ") { id, user: { id }, client: { id } }",
client_id=idp_client_id,
**identity.dict(exclude={"nonce"}, exclude_unset=True),
)
if "client_id" in request.session:
from authub.oauth2 import oauth2_authorized
return await oauth2_authorized(request, User.from_obj(result.user))
else:
identity = Identity(
id=result.id,
user=User.from_obj(result.user),
client=Client.from_obj(result.client),
**identity.dict(exclude_unset=True),
)
return identity.dict()
class IdentityOut(BaseModel):
iss: str # "https://accounts.google.com"
hd: str # "edgedb.com"
email: str
email_verified: bool
name: str
picture: str # URL
given_name: str
family_name: str
locale: str # "en"
@idp.get(
"/identities/{identity_id}",
response_model=IdentityOut,
response_model_exclude_unset=True,
response_model_exclude={"user", "client"},
summary="Get the profile of the specified Google identity.",
)
async def get_identity(identity_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
Identity.select(
*IdentityOut.schema()["properties"],
filters=".id = <uuid>$id",
),
id=identity_id,
)
return IdentityOut(**Identity.from_obj(result).dict())
@idp.patch(
"/identities/{identity_id}/utilize",
response_model=User,
summary="Update the user's profile with the specified Google identity.",
)
async def utilize_identity(identity_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
with_block(
identity=Identity.select(
"user: { id }",
"email",
"name",
filters=".id = <uuid>$identity_id",
)
)
+ "SELECT ("
+ User.construct().update(
filters=".id = identity.user.id",
email="identity.email",
name="identity.name",
)
+ ") { id, email, name }",
identity_id=identity_id,
)
return User.from_obj(result)
| 27.445344
| 95
| 0.621478
| 796
| 6,779
| 5.081658
| 0.198492
| 0.069221
| 0.051669
| 0.0267
| 0.374536
| 0.317676
| 0.218541
| 0.211619
| 0.184178
| 0.164895
| 0
| 0.008005
| 0.262871
| 6,779
| 246
| 96
| 27.556911
| 0.801481
| 0.038649
| 0
| 0.25
| 0
| 0
| 0.140287
| 0.027562
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.040816
| 0
| 0.30102
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a0859c884c636f6f47e39ee23feff85000d7d1d
| 656
|
py
|
Python
|
412.fizz-buzz.py
|
SprintGhost/LeetCode
|
cdf1a86c83f2daedf674a871c4161da7e8fad17c
|
[
"Unlicense"
] | 1
|
2019-03-26T13:49:14.000Z
|
2019-03-26T13:49:14.000Z
|
412.fizz-buzz.py
|
SprintGhost/LeetCode
|
cdf1a86c83f2daedf674a871c4161da7e8fad17c
|
[
"Unlicense"
] | 5
|
2020-01-04T15:13:06.000Z
|
2020-08-31T14:20:23.000Z
|
412.fizz-buzz.py
|
SprintGhost/LeetCode
|
cdf1a86c83f2daedf674a871c4161da7e8fad17c
|
[
"Unlicense"
] | null | null | null |
#
# @lc app=leetcode.cn id=412 lang=python3
#
# [412] Fizz Buzz
#
# Accepted
# 8/8 cases passed (48 ms)
# Your runtime beats 76.37 % of python3 submissions
# Your memory usage beats 25 % of python3 submissions (14.5 MB)
# @lc code=start
class Solution:
def fizzBuzz(self, n: int):
result = list()
for each in range(1,n+1):
if each % 15 == 0:
result.append("FizzBuzz")
elif each % 3 == 0:
result.append("Fizz")
elif each % 5 == 0:
result.append("Buzz")
else:
result.append(str(each))
return result
# @lc code=end
| 22.62069
| 63
| 0.532012
| 87
| 656
| 4.011494
| 0.632184
| 0.137536
| 0.111748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0726
| 0.349085
| 656
| 28
| 64
| 23.428571
| 0.744731
| 0.349085
| 0
| 0
| 0
| 0
| 0.038647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a08f540ce3f12537d5b6d4be1caf8051f4c1c27
| 5,875
|
py
|
Python
|
selector/from_model.py
|
uberkinder/Robusta-AutoML
|
9faee4c17ad9f37b09760f9fffea715cdbf2d1fb
|
[
"MIT"
] | 2
|
2019-04-26T19:40:31.000Z
|
2019-10-12T15:18:29.000Z
|
selector/from_model.py
|
uberkinder/Robusta-AutoML
|
9faee4c17ad9f37b09760f9fffea715cdbf2d1fb
|
[
"MIT"
] | null | null | null |
selector/from_model.py
|
uberkinder/Robusta-AutoML
|
9faee4c17ad9f37b09760f9fffea715cdbf2d1fb
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.model_selection import check_cv
from sklearn.exceptions import NotFittedError
from sklearn.base import clone, is_classifier
from robusta.importance import get_importance
from robusta.crossval import crossval
from .base import _Selector
# Original: sklearn.feature_selection.SelectFromModel
class SelectFromModel(_Selector):
"""Meta-transformer for selecting features based on importance weights.
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
This can be both a fitted (if cv='prefit') or a non-fitted estimator.
The estimator must have either a <feature_importances_> or <coef_>
attribute after fitting.
threshold : string, float, optional (default None)
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the <threshold> value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None, drop features
only based on <max_features>.
max_features : int, float or None, optional (default 0.5)
The maximum number of features selected scoring above <threshold>.
If float, interpreted as proportion of all features.
To disable <threshold> and only select based on <max_features>,
set <threshold> to -np.inf.
cv : int, cross-validation generator, iterable or "prefit"
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to disable cross-validation and train single estimator
on whole dataset (default).
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
- "prefit" string constant.
If "prefit" is passed, it is assumed that <estimator> has been
fitted already and <fit> function will raise error.
Attributes
----------
estimator_ : list of fitted estimators, or single fitted estimator
If <cv> is 'prefit'. If <cv> is None, return single estimator.
Otherwise return list of fitted estimators, length (n_folds, ).
feature_importances_ : Series of shape (n_features, )
Feature importances, extracted from estimator(s)
threshold_ : float
The threshold value used for feature selection
max_features_ : int
Maximum number of features for feature selection
use_cols_ : list of str
Columns to select
"""
def __init__(self, estimator, cv=None, threshold=None, max_features=None):
self.estimator = estimator
self.threshold = threshold
self.max_features = max_features
self.cv = cv
def fit(self, X, y, groups=None):
if self.cv is 'prefit':
raise NotFittedError("Since 'cv=prefit', call transform directly")
elif self.cv is None:
self.estimator_ = clone(self.estimator).fit(X, y)
else:
self.estimator_ = []
cv = check_cv(self.cv, y, is_classifier(self.estimator_))
for trn, _ in cv.split(X, y, groups):
X_trn, y_trn = X.iloc[trn], y.iloc[trn]
estimator = clone(self.estimator).fit(X_trn, y_trn)
self.estimator_.append(estimator)
return self
@property
def feature_importances_(self):
imps = []
if self.cv is 'prefit':
estimators = [self.estimator]
elif self.cv is None:
estimators = [self.estimator_]
else:
estimators = self.estimator_
for estimator in estimators:
imp = get_importance(estimator)
imps.append(imp)
return pd.concat(imps, axis=1).mean(axis=1)
def get_features(self):
imp = self.feature_importances_
self.threshold_ = _check_threshold(imp, self.threshold)
threshold_mask = (imp >= self.threshold_)
self.max_features_ = _check_max_features(imp, self.max_features)
ranking_mask = (imp.rank(ascending=False) <= self.max_features_)
use_cols = imp.index[threshold_mask & ranking_mask]
return list(use_cols)
def _check_max_features(importances, max_features):
"""Interpret the max_features value"""
n_features = len(importances)
if max_features is None:
max_features = n_features
elif isinstance(max_features, int):
max_features = min(n_features, max_features)
elif isinstance(max_features, float):
max_features = int(n_features * max_features)
return max_features
def _check_threshold(importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
threshold = -np.inf
elif isinstance(threshold, str):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
| 30.440415
| 79
| 0.637787
| 696
| 5,875
| 5.258621
| 0.280172
| 0.069126
| 0.018579
| 0.009836
| 0.072131
| 0.036612
| 0
| 0
| 0
| 0
| 0
| 0.001656
| 0.280681
| 5,875
| 192
| 80
| 30.598958
| 0.864411
| 0.389617
| 0
| 0.109756
| 0
| 0
| 0.043927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.219512
| 0
| 0.365854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a0a7b6a486f4199dd2e8181f3e83788c1d07d18
| 1,875
|
py
|
Python
|
trainer.py
|
jinxixiang/PC-TMB
|
c6f2fc62629c7f026865774cdfb9d826464397ea
|
[
"MIT"
] | null | null | null |
trainer.py
|
jinxixiang/PC-TMB
|
c6f2fc62629c7f026865774cdfb9d826464397ea
|
[
"MIT"
] | null | null | null |
trainer.py
|
jinxixiang/PC-TMB
|
c6f2fc62629c7f026865774cdfb9d826464397ea
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch_optimizer as optim
import pandas as pd
# customized libs
import criterions
import models
import datasets
def get_model(conf):
net = getattr(models, conf.Model.base)
return net(**conf.Model.params)
def get_loss(conf):
conf_loss = conf.Loss.base_loss
assert hasattr(nn, conf_loss.name) or hasattr(criterions, conf_loss.name)
loss = None
if hasattr(nn, conf_loss.name):
loss = getattr(nn, conf_loss.name)
elif hasattr(criterions, conf_loss.name):
loss = getattr(criterions, conf_loss.name)
if len(conf_loss.weight) > 0:
weight = torch.Tensor(conf_loss.weight)
conf_loss["weight"] = weight
return loss(**conf_loss.params)
def get_optimizer(conf):
conf_optim = conf.Optimizer
name = conf_optim.optimizer.name
if hasattr(torch.optim, name):
optimizer_cls = getattr(torch.optim, name)
else:
optimizer_cls = getattr(optim, name)
if hasattr(conf_optim, "lr_scheduler"):
scheduler_cls = getattr(torch.optim.lr_scheduler, conf_optim.lr_scheduler.name)
else:
scheduler_cls = None
return optimizer_cls, scheduler_cls
def get_dataset(conf, kfold, mode='train'):
folds_csv = pd.read_csv(conf.General.folds)
if conf.General.cross_validation:
if mode == 'train':
data_idx = folds_csv[folds_csv['fold'] != kfold].index
else:
data_idx = folds_csv[folds_csv['fold'] == kfold].index
else:
data_idx = folds_csv[folds_csv['fold'] == mode].index
name = conf.Data.dataset.name
dataset_cls = getattr(datasets, name)
dataset_ = dataset_cls(folds_csv.loc[data_idx].reset_index(drop=True),
folds_csv.loc[data_idx].reset_index(drop=True)[conf.General.target_col],
conf)
return dataset_
| 29.296875
| 99
| 0.670933
| 255
| 1,875
| 4.72549
| 0.227451
| 0.079668
| 0.059751
| 0.034855
| 0.254772
| 0.204979
| 0.150207
| 0.150207
| 0.150207
| 0.090456
| 0
| 0.000687
| 0.224
| 1,875
| 64
| 100
| 29.296875
| 0.827491
| 0.008
| 0
| 0.081633
| 0
| 0
| 0.021517
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 1
| 0.081633
| false
| 0
| 0.142857
| 0
| 0.306122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a0b750755a4f2eb69f71eb1f7890678edaaee12
| 1,733
|
py
|
Python
|
falmer/search/queries.py
|
sussexstudent/services-api
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 2
|
2017-04-27T19:35:59.000Z
|
2017-06-13T16:19:33.000Z
|
falmer/search/queries.py
|
sussexstudent/falmer
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 975
|
2017-04-13T11:31:07.000Z
|
2022-02-10T07:46:18.000Z
|
falmer/search/queries.py
|
sussexstudent/services-api
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
[
"MIT"
] | 3
|
2018-05-09T06:42:25.000Z
|
2020-12-10T18:29:30.000Z
|
import graphene
from fuzzywuzzy import process
from falmer.search.types import SearchQuery
from falmer.search.utils import get_falmer_results_for_term, get_msl_results_for_term, \
SearchTermResponseData
def get_item_id(item):
model = item.__class__.__name__ if hasattr(item, '__class__') else 'MSL'
if model == 'Page':
model = 'PageResult'
id = item.pk if hasattr(item, 'pk') else item.uuid
return f'{model}_{id}'
def get_item_title(item):
if hasattr(item, 'title'):
return item.title
if hasattr(item, 'name'):
return item.name
return ''
class Query(graphene.ObjectType):
search = graphene.Field(SearchQuery, query=graphene.String())
def resolve_search(self, info, query):
falmer_results = get_falmer_results_for_term(query)
msl_results = get_msl_results_for_term(query)
all_unsorted = falmer_results.content \
+ falmer_results.groups \
+ falmer_results.events \
+ msl_results.pages \
+ msl_results.news
title_map = {}
for item in all_unsorted:
title_map[get_item_title(item)] = get_item_id(item)
try:
fuzz_sorted = process.extract(query, title_map.keys(), limit=15)
top = [title_map[fuzz_result[0]] for fuzz_result in fuzz_sorted]
except RuntimeError:
top = []
results = SearchTermResponseData(
content=falmer_results.content,
events=falmer_results.events,
groups=falmer_results.groups,
pages=msl_results.pages,
news=msl_results.news,
top=top,
)
return results
| 28.883333
| 88
| 0.623774
| 201
| 1,733
| 5.079602
| 0.298507
| 0.114594
| 0.054848
| 0.037218
| 0.084231
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002433
| 0.288517
| 1,733
| 59
| 89
| 29.372881
| 0.825629
| 0
| 0
| 0
| 0
| 0
| 0.028275
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.090909
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a0ef79b4f00de681e34f8ae67dfe78a084e7151
| 700
|
py
|
Python
|
SYMBOLS/heart.py
|
charansaim1819/Python_Patterns
|
02e636855003346ec84c3d69f2be174dc9e9e3cb
|
[
"MIT"
] | null | null | null |
SYMBOLS/heart.py
|
charansaim1819/Python_Patterns
|
02e636855003346ec84c3d69f2be174dc9e9e3cb
|
[
"MIT"
] | null | null | null |
SYMBOLS/heart.py
|
charansaim1819/Python_Patterns
|
02e636855003346ec84c3d69f2be174dc9e9e3cb
|
[
"MIT"
] | null | null | null |
#Shape of heart:
def for_heart():
"""printing shape of'heart' using for loop"""
for row in range(6):
for col in range(7):
if row-col==2 or row+col==8 or col%3!=0 and row==0 or col%3==0 and row==1:
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_heart():
"""printing shape of'heart' using while loop"""
i=0
while i<6:
j=0
while j<7:
if i-j==2 or i+j==8 or j%3!=0 and i==0 or j%3==0 and i==1:
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
print()
i+=1
| 24.137931
| 87
| 0.4
| 103
| 700
| 2.699029
| 0.271845
| 0.028777
| 0.071942
| 0.143885
| 0.52518
| 0.52518
| 0
| 0
| 0
| 0
| 0
| 0.060914
| 0.437143
| 700
| 28
| 88
| 25
| 0.64467
| 0.141429
| 0
| 0.4
| 0
| 0
| 0.014235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.1
| 0.3
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a14a2d004a0836d3daffc7ee2ad09d95986fb4d
| 2,190
|
py
|
Python
|
runtests.py
|
ojii/django-statictemplate
|
73a541b19ff39e92b02de5d2ee74e4df7d486d81
|
[
"BSD-3-Clause"
] | 4
|
2015-09-28T10:06:45.000Z
|
2019-09-20T05:53:03.000Z
|
runtests.py
|
ojii/django-statictemplate
|
73a541b19ff39e92b02de5d2ee74e4df7d486d81
|
[
"BSD-3-Clause"
] | 8
|
2015-06-15T13:06:43.000Z
|
2018-12-23T13:37:20.000Z
|
runtests.py
|
ojii/django-statictemplate
|
73a541b19ff39e92b02de5d2ee74e4df7d486d81
|
[
"BSD-3-Clause"
] | 2
|
2015-09-23T05:07:00.000Z
|
2015-10-20T15:43:19.000Z
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
urlpatterns = [
]
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites',
'statictemplate',
],
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3'
}
},
LANGUAGES=(
('en-us', 'English'),
('it', 'Italian'),
),
ROOT_URLCONF='runtests',
SITE_ID=1,
MIDDLEWARE_CLASSES=[
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
)
def runtests():
import django
from django.conf import settings
DEFAULT_SETTINGS['TEMPLATES'] = [{
'NAME': 'django',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
}
}]
# Compatibility with Django 1.7's stricter initialization
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
if hasattr(django, 'setup'):
django.setup()
from django.test.runner import DiscoverRunner
test_args = ['statictemplate']
failures = DiscoverRunner(
verbosity=1, interactive=True, failfast=False
).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests()
| 28.815789
| 70
| 0.622374
| 192
| 2,190
| 6.958333
| 0.5
| 0.114521
| 0.094311
| 0.139222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004893
| 0.253425
| 2,190
| 75
| 71
| 29.2
| 0.812232
| 0.044749
| 0
| 0.048387
| 0
| 0
| 0.447797
| 0.364943
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.064516
| 0
| 0.080645
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a1583710b1d1ad4cc13f28020664d7f22387e1e
| 585
|
py
|
Python
|
Courses/YandexAlgo/1/petya_the_inventor.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
Courses/YandexAlgo/1/petya_the_inventor.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
Courses/YandexAlgo/1/petya_the_inventor.py
|
searayeah/sublime-snippets
|
deff53a06948691cd5e5d7dcfa85515ddd8fab0b
|
[
"MIT"
] | null | null | null |
x = input()
z = input()
splitter = [x[i:] for i in range(len(x))]
found_splitter = False
next_z = ""
for i in range(1, len(x) + 1):
if z[:i] in splitter:
found_splitter = True
next_z = z[i:]
if next_z[: len(x)] == x:
break
if i == len(z):
break
if next_z == "":
if found_splitter is False:
print(z)
else:
if found_splitter is True:
while True:
if next_z[0 : len(x)] == x:
next_z = next_z.replace(x, "", 1)
else:
print(next_z)
break
| 19.5
| 49
| 0.471795
| 87
| 585
| 3.034483
| 0.252874
| 0.151515
| 0.079545
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011268
| 0.393162
| 585
| 29
| 50
| 20.172414
| 0.732394
| 0
| 0
| 0.208333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a1837e6b67fec245ab3af4f52d7d449ca21cff5
| 4,013
|
py
|
Python
|
nvtabular/ds_writer.py
|
benfred/NVTabular
|
5ab6d557868ac01eda26e9725a1a6e5bf7eda007
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/ds_writer.py
|
benfred/NVTabular
|
5ab6d557868ac01eda26e9725a1a6e5bf7eda007
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/ds_writer.py
|
benfred/NVTabular
|
5ab6d557868ac01eda26e9725a1a6e5bf7eda007
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import cudf
import numpy as np
import pyarrow.parquet as pq
try:
import cupy as cp
except ImportError:
import numpy as cp
class FileIterator:
def __init__(self, path, nfiles, shuffle=True, **kwargs):
self.path = path
self.nfiles = nfiles
self.shuffle = shuffle
self.ind = 0
self.inds = np.arange(self.nfiles)
if self.shuffle:
np.random.shuffle(self.inds)
def __iter__(self):
self.ind = 0
self.inds = np.arange(self.nfiles)
if self.shuffle:
np.random.shuffle(self.inds)
return self
def __next__(self):
if self.ind >= self.nfiles:
raise StopIteration
self.ind += 1
# if self.name, return that naming convention.
return "%s/ds_part.%d.parquet" % (self.path, self.ind - 1)
class DatasetWriter:
def __init__(self, path, nfiles=1, **kwargs):
self.path = path
self.nfiles = nfiles
self.writers = {fn: None for fn in FileIterator(path, nfiles)}
self.shared_meta_path = str(path) + "/_metadata"
self.metadata = None
self.new_metadata = {fn: [] for fn in FileIterator(path, nfiles)}
# Check for _metadata
metafile = glob.glob(self.shared_meta_path)
if metafile:
self.metadata = pq.ParquetDataset(metafile[0]).metadata
def write(self, gdf, shuffle=True):
# Shuffle the dataframe
gdf_size = len(gdf)
if shuffle:
sort_key = "__sort_index__"
arr = cp.arange(gdf_size)
cp.random.shuffle(arr)
gdf[sort_key] = cudf.Series(arr)
gdf = gdf.sort_values(sort_key).drop(columns=[sort_key])
# Write to
chunk_size = int(gdf_size / self.nfiles)
for i, fn in enumerate(FileIterator(self.path, self.nfiles)):
s1 = i * chunk_size
s2 = (i + 1) * chunk_size
if i == (self.nfiles - 1):
s2 = gdf_size
chunk = gdf[s1:s2]
pa_table = chunk.to_arrow()
if self.writers[fn] is None:
self.writers[fn] = pq.ParquetWriter(
fn, pa_table.schema, metadata_collector=self.new_metadata[fn],
)
self.writers[fn].write_table(pa_table)
def write_metadata(self):
self.close_writers() # Writers must be closed to get metadata
fns = [fn for fn in FileIterator(self.path, self.nfiles, shuffle=False)]
if self.metadata is not None:
_meta = self.metadata
i_start = 0
else:
_meta = self.new_metadata[fns[0]]
if _meta:
_meta = _meta[0]
i_start = 1
for i in range(i_start, len(fns)):
_meta_new = self.new_metadata[fns[i]]
if _meta_new:
_meta.append_row_groups(_meta_new[0])
with open(self.shared_meta_path, "wb") as fil:
_meta.write_metadata_file(fil)
self.metadata = _meta
return
def close_writers(self):
for fn, writer in self.writers.items():
if writer is not None:
writer.close()
# Set row-group file paths
self.new_metadata[fn][0].set_file_path(os.path.basename(fn))
writer = None
def __del__(self):
self.close_writers()
| 32.104
| 82
| 0.595315
| 526
| 4,013
| 4.382129
| 0.315589
| 0.039046
| 0.032538
| 0.024729
| 0.167896
| 0.118872
| 0.093709
| 0.093709
| 0.060738
| 0.060738
| 0
| 0.009747
| 0.309743
| 4,013
| 124
| 83
| 32.362903
| 0.822383
| 0.179666
| 0
| 0.159091
| 0
| 0
| 0.014373
| 0.006422
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.238636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a18608a6d3310b926afa6ca71ff25504d52035f
| 481
|
py
|
Python
|
example/my_hook.py
|
Globidev/github-docker-hook
|
716de2f79ca30221edd2b70f3f7c85e5d033bae9
|
[
"MIT"
] | 2
|
2015-09-24T07:38:07.000Z
|
2015-11-05T18:33:43.000Z
|
example/my_hook.py
|
Globidev/github-docker-hook
|
716de2f79ca30221edd2b70f3f7c85e5d033bae9
|
[
"MIT"
] | 2
|
2015-11-04T17:34:14.000Z
|
2015-11-09T02:05:31.000Z
|
example/my_hook.py
|
Globidev/github-docker-hook
|
716de2f79ca30221edd2b70f3f7c85e5d033bae9
|
[
"MIT"
] | null | null | null |
ROUTE = '/push'
PORT = 4242
IMAGE_NAME = 'globidocker/github-hook'
import docker
cli = docker.Client()
from lib.git import clone_tmp
def on_push(data, logger):
url = data['repository']['html_url']
logger.info('Cloning repository: "{}"...'.format(url))
with clone_tmp(url) as repo:
logger.info('Building image...')
cli.build(repo.path, IMAGE_NAME)
logger.info('Pushing image...')
cli.push(IMAGE_NAME)
logger.info('done')
| 19.24
| 58
| 0.634096
| 63
| 481
| 4.730159
| 0.571429
| 0.134228
| 0.100671
| 0.127517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010499
| 0.2079
| 481
| 24
| 59
| 20.041667
| 0.771654
| 0
| 0
| 0
| 0
| 0
| 0.22869
| 0.047817
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a1861ac2df97b1bcfbdb3654e5d9c31f32e9e49
| 12,403
|
py
|
Python
|
scripts/TestSuite/run_tests.py
|
ghorwin/MasterSim
|
281b71e228435ca8fa02319bf2ce86b66b8b2b45
|
[
"BSD-3-Clause"
] | 5
|
2021-11-17T07:12:54.000Z
|
2022-03-16T15:06:39.000Z
|
scripts/TestSuite/run_tests.py
|
ghorwin/MasterSim
|
281b71e228435ca8fa02319bf2ce86b66b8b2b45
|
[
"BSD-3-Clause"
] | 25
|
2021-09-09T07:39:13.000Z
|
2022-01-23T13:00:19.000Z
|
scripts/TestSuite/run_tests.py
|
ghorwin/MasterSim
|
281b71e228435ca8fa02319bf2ce86b66b8b2b45
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Solver test suite runner script, used for
# * regression tests (default)
# * test-init runs (with --test-init option)
# * performance evaluation (with --performance option)
#
# 1. Regression tests (the default)
# - runs set of projects and compares physical results and solver stats
# - meant to be run with either sequential or parallel solver
# - performance is monitored, but not so important (very short tests!)
# - expects jobs to have reference result directory, otherwise warning is issued
# and simulation is skipped (with --run-always option all simulations are done even without
# reference result dirs)
# - result of script:
# for each job show old/new stats and metrics
# show summary table with timings for all successful jobs
#
# 2. Initialization tests
# - checks if solver can initialize set of project files
# - script parses directory structure, generates list of test-init jobs
# and executes test initialization
# - result of script:
# for each job result status and time needed for test init (only for information)
#
# 3. Performance tests
# - collects list of jobs, runs each job 3 times and stores timings for all cases
# - result of script:
# for each job print individual timings and best evalualtion time in a table
#
# License:
# BSD License
#
# Authors:
# Andreas Nicolai <andreas.nicolai@tu-dresden.de>
#
# Syntax:
# > python run_tests.py --path <path/to/testsuite> --solver <path/to/solver/binary> --extension <project file extension>
#
# Example:
# > python run_tests.py --path ../../data/tests --solver ./DelphinSolver --extension d6p
# > python run_tests.py -p ../../data/tests -s ./DelphinSolver -e d6p
#
# Returns:
# 0 - if all tests could be simulated successfully and if all solver results/metrics match those of reference results
# 1 - if anything failed
#
# Note: if run with --run-all option, test cases without reference results will always be accepted.
#
import subprocess # import the module for calling external programs (creating subprocesses)
import sys
import os
import os.path
import shutil
import filecmp # for result file comparison
import argparse
import platform # to detect current OS
from colorama import *
from SolverStats import *
from print_funcs import *
from config import USE_COLORS
def configCommandLineArguments():
"""
This method sets the available input parameters and parses them.
Returns a configured argparse.ArgumentParser object.
"""
parser = argparse.ArgumentParser("run_tests.py")
parser.description = '''
Runs the regression test suite. Can be used for init-tests (--test-init)
or performance evaluation (--performance) as well.'''
parser.add_argument('-p', '--path', dest='path', required=True, type=str,
help='Path to test suite root directory.')
parser.add_argument('-s', '--solver', dest='solver', required=True, type=str,
help='Path to solver binary.')
parser.add_argument('-e', '--extension', dest="extension", required=True, type=str,
help='Project file extension.')
parser.add_argument('--no-colors', dest="no_colors", action='store_true',
help='Disables colored console output.')
parser.add_argument('--test-init', dest="test_init", action='store_true',
help='Enables test-initialization mode (runs solvers with --test-init argument and '
'skips result evaluation).')
parser.add_argument('--performance', dest="performance", action='store_true',
help='Enables performance evaluation mode (runs solvers three times '
'without result evaluation and dumps timings of all cases and best-of-three timings).')
parser.add_argument('--run-all', dest="run_all", action='store_true',
help='If set (in regression test mode), also the test cases without reference results '
'are simulated (can be used to generate reference results for all cases).')
return parser.parse_args()
def checkResults(dir1, dir2, evalTimes):
"""
Compares two result directories for equal contents.
Compared are:
- physical results
- solver counters (/log/summary.txt)
This function uses IBK.SolverStats
Arguments:
* dir1 (reference results) and dir2 (computed results)
* evalTimes is a dictionary with filepath (key) and wall clock time (value),
new entries are always added to the dictionary
Returns: True on success, False on error
"""
try:
# open stat files and compare them
stats1 = SolverStats()
if not stats1.read(dir1 + "/log/summary.txt"):
return False
stats2 = SolverStats()
if not stats2.read(dir2 + "/log/summary.txt"):
return False
if not SolverStats.compareStats(stats1, stats2, []):
printError("Mismatching statistics.")
return False
# compare all result files (d60, tsv), if any reference result files exist
if os.path.exists(dir1 + "/results"):
if not SolverStats.compareResults(dir1 + "/results", dir2 + "/results"):
printError("Mismatching values.")
return False
evalTimes[dir2] = stats2.timers['WallClockTime']
except Exception as e:
printError("Error comparing simulation results, error: {}".format(e))
return True
def run_performance_evaluation(args, projects):
# we basically do the same as the main script, but this time we run all test cases
# whether they have reference results or not and simply remember the run times
# we store evaluation times in a dictionary, key is the path to the project file,
# value is a list of evaluation times
eval_times = dict()
failed_projects = []
ITERATIONS = 3
for iter in range(ITERATIONS):
for project in projects:
print(project)
path, fname = os.path.split(project)
#print "Path : " + path
#print "Project : " + fname
cmdline = [args.solver, project]
# try to read commandline file
cmdlineFilePath = project + ".cmdline"
if os.path.exists(cmdlineFilePath):
fobj = open(cmdlineFilePath)
cmdlineAddOn = fobj.readline()
del fobj
cmdline.append(cmdlineAddOn)
print("Applying cmdline addon: " + cmdlineAddOn)
try:
# run solver
FNULL = open(os.devnull, 'w')
if platform.system() == "Windows":
cmdline.append("-x")
cmdline.append("--verbosity-level=0")
retcode = subprocess.call(cmdline, creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
retcode = subprocess.call(cmdline, stdout=FNULL, stderr=subprocess.STDOUT)
# check return code
if retcode == 0:
# read summary file
resultsFolder = project[:-(1+len(args.extension))]
# open stat files and compare them
stats1 = SolverStats()
if stats1.read(resultsFolder + "/log/summary.txt"):
if not eval_times.has_key(project):
eval_times[project] = []
eval_times[project].append(stats1.timers['WallClockTime'])
else:
# mark project as failed
failed_projects.append(project)
# and print error message
printError("Simulation failed, see screenlog file {}".format(os.path.join(os.getcwd(),
resultsFolder+"/log/screenlog.txt" ) ) )
except OSError as e:
printError("Error starting solver executable '{}', error: {}".format(args.solver, e))
exit(1)
print("\nSuccessful projects:\n")
print("{:60s} {}".format("Project path", "Wall clock times [s], last column is min of all runs"))
filenames = eval_times.keys()
filenames.sort()
perfstats = open(os.path.join(args.path, "performance_stats.txt"), 'w')
for filename in filenames:
fname = os.path.basename(filename)
onedir = os.path.join(os.path.basename(os.path.dirname(filename)), os.path.basename(filename))
s = "{:65s}".format(onedir)
minVal = 1e20;
for t in range(len(eval_times[filename])):
duration = eval_times[filename][t]
s = s + (" {:>10.3f}".format(duration))
minVal = min(minVal, duration)
s= s + (" {:>10.3f}".format(minVal))
printNotification(s)
perfstats.write(s + '\n')
del perfstats
if len(failed_projects) > 0:
print("\nFailed projects:")
for p in failed_projects:
printError(p)
print("\n")
printError("*** Failure ***")
exit(1)
return 0
# *** main script ***
args = configCommandLineArguments()
if not args.no_colors:
init() # init ANSI code filtering for windows
config.USE_COLORS = True
printNotification("Enabling colored console output")
if args.test_init and args.performance:
printError("Either use --test-init or --performance, but not both together.")
exit(1)
# process all directories under test suite directory
currentOS = platform.system()
compilerID = None
if currentOS == "Linux" :
compilerID = "gcc_linux"
elif currentOS == "Windows" :
compilerID = "VC14_win64"
elif currentOS == "Darwin" :
compilerID = "gcc_mac"
if compilerID == None:
printError("Unknown/unsupported platform")
exit(1)
else:
print("Compiler ID : " + compilerID)
print("Test suite : " + args.path)
print("Solver : " + args.solver)
print("Project file extension : " + args.extension)
# walk all subdirectories (except .svn) within testsuite and collect project file names
projects = []
for root, dirs, files in os.walk(args.path, topdown=False):
for name in files:
if name.endswith('.'+args.extension):
projectFilePath = os.path.join(root, name)
projects.append(projectFilePath)
projects.sort()
print("Number of projects : {}\n".format(len(projects)))
# performance tests?
if args.performance:
res = run_performance_evaluation(args, projects)
exit(res)
failed_projects = []
eval_times = dict() # key - file path to project, value - eval time in [s]
for project in projects:
print(project)
path, fname = os.path.split(project)
#print("Path : " + path)
#print ("Project : " + fname)
# compose path of result folder
resultsFolder = project[:-(1+len(args.extension))]
# remove entire directory with previous results
if os.path.exists(resultsFolder):
shutil.rmtree(resultsFolder)
cmdline = [args.solver, project]
# if in test-init mode, append --test-init to command line
if args.test_init:
cmdline.append("--test-init")
skipResultCheck = True
args.run_all = True
else:
skipResultCheck = False
referenceFolder = resultsFolder + "." + compilerID
if not os.path.exists(referenceFolder):
if not args.run_all:
failed_projects.append(project)
printError("Missing reference data directory '{}'".format(os.path.split(referenceFolder)[1]))
continue
else:
skipResultCheck = True
try:
# run solver
FNULL = open(os.devnull, 'w')
if platform.system() == "Windows":
cmdline.append("-x")
cmdline.append("--verbosity-level=0")
retcode = subprocess.call(cmdline, creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
if args.test_init:
# in test-init mode we want to see the output
retcode = subprocess.call(cmdline)
else:
retcode = subprocess.call(cmdline, stdout=FNULL, stderr=subprocess.STDOUT)
# check return code
if retcode == 0:
# successful run
if not skipResultCheck:
# now check against reference results
if not checkResults(referenceFolder, resultsFolder, eval_times):
if not project in failed_projects:
failed_projects.append(project) # mark as failed
printError("Mismatching results.")
else:
# mark project as failed
failed_projects.append(project)
# and print error message
printError("Simulation failed, see screenlog file {}".format(os.path.join(os.getcwd(),
resultsFolder+"/log/screenlog.txt" ) ) )
except OSError as e:
printError("Error starting solver executable '{}', error: {}".format(args.solver, e))
exit(1)
print("\nSuccessful projects:\n")
print("{:80s} {}".format("Project path", "Wall clock time [s]"))
filenames = eval_times.keys()
filenames = sorted(filenames)
for filename in filenames:
fname = os.path.basename(filename)
onedir = os.path.join(os.path.basename(os.path.dirname(filename)), os.path.basename(filename))
printNotification("{:80s} {:>10.3f}".format(onedir, eval_times[filename]))
if len(failed_projects) > 0:
print("\nFailed projects:")
for p in failed_projects:
printError(p)
print("\n")
printError("*** Failure ***")
exit(1)
printNotification("*** Success ***")
exit(0)
| 33.252011
| 120
| 0.689511
| 1,582
| 12,403
| 5.365992
| 0.246523
| 0.01555
| 0.014018
| 0.016492
| 0.284486
| 0.233007
| 0.215809
| 0.208976
| 0.208976
| 0.198139
| 0
| 0.007303
| 0.194066
| 12,403
| 372
| 121
| 33.341398
| 0.841937
| 0.299605
| 0
| 0.410138
| 0
| 0
| 0.231685
| 0.00245
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013825
| false
| 0
| 0.0553
| 0
| 0.101382
| 0.165899
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a1b2360ed8259c0fd8d46c53ed3e0ed659879cf
| 4,815
|
py
|
Python
|
sigr/Anechoic.py
|
JerameyATyler/sigR
|
25c895648c5f90f57baa95f2cdd097cd33259a07
|
[
"MIT"
] | null | null | null |
sigr/Anechoic.py
|
JerameyATyler/sigR
|
25c895648c5f90f57baa95f2cdd097cd33259a07
|
[
"MIT"
] | null | null | null |
sigr/Anechoic.py
|
JerameyATyler/sigR
|
25c895648c5f90f57baa95f2cdd097cd33259a07
|
[
"MIT"
] | null | null | null |
from torch.utils.data import Dataset
class Anechoic(Dataset):
def __init__(self, root, ttv, download=False, transform=None, target_transform=None, columns=None,
output_path=None):
from pathlib import Path
import os
ttvs = ['train', 'test', 'validate']
assert ttv in ttvs, f'Acceptable values for ttv are {", ".join(ttvs)}'
self.ttv = ttv
self.transform = transform
self.target_transform = target_transform
self.root = Path(root).__str__()
self.data_path = (Path(self.root) / self.ttv).__str__()
self.label_path = f'{self.data_path}_recipe'
self.output_path = output_path
if download:
self.download()
else:
assert os.path.isdir(self.root), f'Root directory {self.root} must exist if download=False'
assert os.path.isdir(self.data_path), f'Data directory {self.data_path} must exist if download=False'
assert os.path.isdir(self.label_path), f'Label directory {self.label_path} must exist if download=False'
self.labels = self.set_labels(columns)
def download(self):
from pathlib import Path
import requests
import zipfile
import io
import shutil
import os
if not os.path.isdir(self.root):
os.mkdir(self.root)
_download_url = 'https://reflections.speakeasy.services'
print(f'Downloading dataset at {_download_url}/{self.ttv}.zip')
r = requests.get(f'{_download_url}/{self.ttv}.zip', stream=True)
z = zipfile.ZipFile(io.BytesIO(r.content))
print(f'Finished downloading')
if not os.path.isdir(self.data_path):
os.mkdir(self.data_path)
if not os.path.isdir(self.label_path):
os.mkdir(self.label_path)
print('Extracting dataset')
for f in z.namelist():
filename = Path(f).name
if not filename:
continue
source = z.open(f)
if filename.endswith('.zip'):
target = open((Path(self.root) / filename).__str__(), 'wb')
else:
target = open((Path(self.data_path) / filename).__str__(), 'wb')
print(f'\tExtracting file: {filename}')
with source, target:
shutil.copyfileobj(source, target)
assert os.path.isfile(f'{self.label_path}.zip'), f'{self.label_path}.zip missing'
z = zipfile.ZipFile(f'{self.label_path}.zip')
z.extractall(self.label_path)
def set_labels(self, columns):
from data_loader import read_recipe
if columns is not None:
if type(columns) is not None:
columns = [columns]
if 'filepath' not in columns:
columns.append('filepath')
return read_recipe(self.label_path)[columns]
return read_recipe(self.label_path)
def __len__(self):
return self.labels.shape[0]
def __getitem__(self, item):
from pydub import AudioSegment
from pathlib import Path
from utils import audiosegment_to_array
labels = self.labels.iloc[item]
audio = AudioSegment.from_wav((Path(self.data_path) / f"{labels['filepath']}.wav").__str__())
if self.transform:
audio = self.transform(audio)
else:
audio = audiosegment_to_array(audio)
if self.target_transform:
labels = self.target_transform(labels)
return audio, labels
def play_sample(self, item):
from pathlib import Path
from pydub import AudioSegment
from utils import play_audio
from IPython.display import display
import os
filepath = f'{(Path(self.data_path) / self.labels.iloc[item]["filepath"]).__str__()}.wav'
assert os.path.isfile(filepath), f'{filepath} does not exist'
audio = AudioSegment.from_wav(filepath)
return display(play_audio(audio))
def get_ttv(root, download=False, transform=None, target_transform=None, columns=None, batch_size=60):
from torch.utils.data import DataLoader
train = DataLoader(
Anechoic(root, 'train', download=download, transform=transform, target_transform=target_transform,
columns=columns), batch_size=batch_size, shuffle=True)
test = DataLoader(Anechoic(root, 'test', download=download, transform=transform, target_transform=target_transform,
columns=columns), batch_size=batch_size, shuffle=False)
validate = DataLoader(
Anechoic(root, 'validate', download=download, transform=transform, target_transform=target_transform,
columns=columns), batch_size=batch_size, shuffle=True)
return train, test, validate
| 37.038462
| 119
| 0.627414
| 584
| 4,815
| 5
| 0.195205
| 0.061644
| 0.048973
| 0.030822
| 0.353082
| 0.240753
| 0.17774
| 0.17774
| 0.17774
| 0.139384
| 0
| 0.000851
| 0.267705
| 4,815
| 129
| 120
| 37.325581
| 0.827283
| 0
| 0
| 0.138614
| 0
| 0.009901
| 0.142887
| 0.05026
| 0
| 0
| 0
| 0
| 0.059406
| 1
| 0.069307
| false
| 0
| 0.188119
| 0.009901
| 0.326733
| 0.039604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a1b3e2dbb66fc996ec081ab5ef13e302246dd49
| 1,410
|
py
|
Python
|
scripts/update_covid_tracking_data.py
|
TomGoBravo/covid-data-public
|
76cdf384f4e6b5088f0a8105a4fabc37c899015c
|
[
"MIT"
] | null | null | null |
scripts/update_covid_tracking_data.py
|
TomGoBravo/covid-data-public
|
76cdf384f4e6b5088f0a8105a4fabc37c899015c
|
[
"MIT"
] | null | null | null |
scripts/update_covid_tracking_data.py
|
TomGoBravo/covid-data-public
|
76cdf384f4e6b5088f0a8105a4fabc37c899015c
|
[
"MIT"
] | null | null | null |
import logging
import datetime
import pathlib
import pytz
import requests
import pandas as pd
DATA_ROOT = pathlib.Path(__file__).parent.parent / "data"
_logger = logging.getLogger(__name__)
class CovidTrackingDataUpdater(object):
"""Updates the covid tracking data."""
HISTORICAL_STATE_DATA_URL = "http://covidtracking.com/api/states/daily"
COVID_TRACKING_ROOT = DATA_ROOT / "covid-tracking"
@property
def output_path(self) -> pathlib.Path:
return self.COVID_TRACKING_ROOT / "covid_tracking_states.csv"
@property
def version_path(self) -> pathlib.Path:
return self.COVID_TRACKING_ROOT / "version.txt"
@staticmethod
def _stamp():
# String of the current date and time.
# So that we're consistent about how we mark these
pacific = pytz.timezone('US/Pacific')
d = datetime.datetime.now(pacific)
return d.strftime('%A %b %d %I:%M:%S %p %Z')
def update(self):
_logger.info("Updating Covid Tracking data.")
response = requests.get(self.HISTORICAL_STATE_DATA_URL)
data = response.json()
df = pd.DataFrame(data)
df.to_csv(self.output_path, index=False)
version_path = self.version_path
version_path.write_text(f"Updated at {self._stamp()}\n")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
CovidTrackingDataUpdater().update()
| 30
| 75
| 0.685816
| 179
| 1,410
| 5.156425
| 0.513966
| 0.098592
| 0.055255
| 0.047671
| 0.099675
| 0.099675
| 0.099675
| 0.099675
| 0.099675
| 0
| 0
| 0
| 0.205674
| 1,410
| 46
| 76
| 30.652174
| 0.824107
| 0.085816
| 0
| 0.060606
| 0
| 0
| 0.150546
| 0.019501
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.181818
| 0.060606
| 0.484848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a21fd4ed5ae1c86fb6e590a1edd2f37df8e132c
| 1,220
|
py
|
Python
|
CustomerProfiles/delete-customer-profile.py
|
adavidw/sample-code-python
|
e02f8856c11439cebd67d98fb43431cd4b95316e
|
[
"MIT"
] | 36
|
2015-11-18T22:35:39.000Z
|
2022-03-21T10:13:23.000Z
|
CustomerProfiles/delete-customer-profile.py
|
adavidw/sample-code-python
|
e02f8856c11439cebd67d98fb43431cd4b95316e
|
[
"MIT"
] | 23
|
2016-02-02T06:09:16.000Z
|
2020-03-06T22:54:55.000Z
|
CustomerProfiles/delete-customer-profile.py
|
adavidw/sample-code-python
|
e02f8856c11439cebd67d98fb43431cd4b95316e
|
[
"MIT"
] | 82
|
2015-11-22T11:46:33.000Z
|
2022-03-18T02:46:48.000Z
|
import os, sys
import imp
from authorizenet import apicontractsv1
from authorizenet.apicontrollers import *
constants = imp.load_source('modulename', 'constants.py')
def delete_customer_profile(customerProfileId):
merchantAuth = apicontractsv1.merchantAuthenticationType()
merchantAuth.name = constants.apiLoginId
merchantAuth.transactionKey = constants.transactionKey
deleteCustomerProfile = apicontractsv1.deleteCustomerProfileRequest()
deleteCustomerProfile.merchantAuthentication = merchantAuth
deleteCustomerProfile.customerProfileId = customerProfileId
controller = deleteCustomerProfileController(deleteCustomerProfile)
controller.execute()
response = controller.getresponse()
if (response.messages.resultCode=="Ok"):
print("Successfully deleted customer with customer profile id %s" % deleteCustomerProfile.customerProfileId)
else:
print(response.messages.message[0]['text'].text)
print("Failed to delete customer profile with customer profile id %s" % deleteCustomerProfile.customerProfileId)
return response
if(os.path.basename(__file__) == os.path.basename(sys.argv[0])):
delete_customer_profile(constants.customerProfileId)
| 38.125
| 120
| 0.788525
| 108
| 1,220
| 8.824074
| 0.481481
| 0.078699
| 0.066107
| 0.044071
| 0.125918
| 0.125918
| 0.125918
| 0
| 0
| 0
| 0
| 0.004717
| 0.131148
| 1,220
| 31
| 121
| 39.354839
| 0.89434
| 0
| 0
| 0
| 0
| 0
| 0.119672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.173913
| 0
| 0.26087
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a25b6f7b3f250cb0ca3c95cee4acba5e53203f1
| 3,659
|
py
|
Python
|
python/xskipper/indexbuilder.py
|
guykhazma/xskipper
|
058712e744e912bd5b22bc337b9d9ff2fc6b1036
|
[
"Apache-2.0"
] | 31
|
2021-01-27T15:03:18.000Z
|
2021-12-13T11:09:58.000Z
|
python/xskipper/indexbuilder.py
|
guykhazma/xskipper
|
058712e744e912bd5b22bc337b9d9ff2fc6b1036
|
[
"Apache-2.0"
] | 20
|
2021-02-01T16:42:17.000Z
|
2022-01-26T10:48:59.000Z
|
python/xskipper/indexbuilder.py
|
guykhazma/xskipper
|
058712e744e912bd5b22bc337b9d9ff2fc6b1036
|
[
"Apache-2.0"
] | 12
|
2021-01-27T14:50:11.000Z
|
2021-08-10T22:13:46.000Z
|
# Copyright 2021 IBM Corp.
# SPDX-License-Identifier: Apache-2.0
from pyspark.sql.dataframe import DataFrame
from py4j.java_collections import MapConverter
class IndexBuilder:
"""
Helper class for building indexes
:param sparkSession: SparkSession object
:param uri: the URI of the dataset / the identifier of the hive table on which the index is defined
:param xskipper: the xskipper instance associated with this IndexBuilder
"""
def __init__(self, spark, uri, xskipper):
self._jindexBuilder = spark._jvm.io.xskipper.index.execution.IndexBuilder(spark._jsparkSession, uri,
xskipper.xskipper)
self.xskipper = xskipper
self.spark = spark
def addMinMaxIndex(self, col, keyMetadata=None):
"""
Adds a MinMax index for the given column
:param col: the column to add the index on
:param keyMetadata: optional key metadata
"""
if keyMetadata:
self._jindexBuilder.addMinMaxIndex(col, keyMetadata)
else:
self._jindexBuilder.addMinMaxIndex(col)
return self
def addValueListIndex(self, col, keyMetadata=None):
"""
Adds a ValueList index on the given column
:param col: the column to add the index on
:param keyMetadata: optional key metadata
"""
if keyMetadata:
self._jindexBuilder.addValueListIndex(col, keyMetadata)
else:
self._jindexBuilder.addValueListIndex(col)
return self
def addBloomFilterIndex(self, col, keyMetadata=None):
"""
Adds a BloomFilter index on the given column
:param col: the column to add the index on
:param keyMetadata: optional key metadata
"""
if keyMetadata:
self._jindexBuilder.addBloomFilterIndex(col, keyMetadata)
else:
self._jindexBuilder.addBloomFilterIndex(col)
return self
def addCustomIndex(self, indexClass, cols, params, keyMetadata=None):
"""
Adds a Custom index on the given columns
:param cols: a sequence of cols
:param params: a map of index specific parameters
:param keyMetadata: optional key metadata
"""
gateway = self.spark.sparkContext._gateway
jmap = MapConverter().convert(params, gateway._gateway_client)
objCls = gateway.jvm.String
colsArr = gateway.new_array(objCls, len(cols))
for i in range(len(cols)):
colsArr[i] = cols[i]
if keyMetadata:
self._jindexBuilder.addCustomIndex(indexClass, colsArr, jmap, keyMetadata)
else:
self._jindexBuilder.addCustomIndex(indexClass, colsArr, jmap)
return self
def build(self, reader=None):
"""
Builds the index
:param dataFrameReader: if uri in the xskipper instance is a table identifier \
a DataFrameReader instance to enable reading the URI as a DataFrame
Note: The reader is assumed to have all of the parameters configured.
`reader.load(Seq(<path>))` will be used by the indexing code to read each
object separately
:return: dataFrame object containing statistics about the build operation
"""
if reader:
return DataFrame(self._jindexBuilder.build(reader._jreader), self.spark._wrapped)
else:
# build for tables
return DataFrame(self._jindexBuilder.build(), self.spark._wrapped)
| 38.114583
| 108
| 0.625854
| 391
| 3,659
| 5.792839
| 0.329923
| 0.082561
| 0.033554
| 0.03532
| 0.32936
| 0.234879
| 0.153201
| 0.153201
| 0.153201
| 0.153201
| 0
| 0.00276
| 0.306914
| 3,659
| 95
| 109
| 38.515789
| 0.890379
| 0.378245
| 0
| 0.302326
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139535
| false
| 0
| 0.046512
| 0
| 0.348837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a26ea77dac1512349aaac759f21f3e326122e27
| 746
|
py
|
Python
|
src/graphs/python/bfs/src/bfs.py
|
djeada/GraphAlgorithms
|
0961303ec20430f90053a4efb9074185f96dfddc
|
[
"MIT"
] | 2
|
2021-05-31T13:01:33.000Z
|
2021-12-20T19:48:18.000Z
|
src/graphs/python/bfs/src/bfs.py
|
djeada/GraphAlgorithms
|
0961303ec20430f90053a4efb9074185f96dfddc
|
[
"MIT"
] | null | null | null |
src/graphs/python/bfs/src/bfs.py
|
djeada/GraphAlgorithms
|
0961303ec20430f90053a4efb9074185f96dfddc
|
[
"MIT"
] | null | null | null |
from graph import Graph
def bfs(graph, source, destination):
if not (graph.contains(source) and graph.contains(destination)):
return float("inf")
distances = dict()
visited = dict()
for vertex in graph.vertices():
distances[vertex] = float("inf")
visited[vertex] = False
queue = [source]
distances[source] = 0
visited[source] = True
while queue:
u = queue.pop(0)
for edge in graph.edges_from_vertex(u):
v = edge.destination
if not visited[v]:
visited[v] = True
distances[v] = distances[u] + edge.distance
queue.append(v)
return distances[destination]
| 21.314286
| 69
| 0.548257
| 81
| 746
| 5.024691
| 0.407407
| 0.063882
| 0.078624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004124
| 0.349866
| 746
| 34
| 70
| 21.941176
| 0.835052
| 0
| 0
| 0
| 0
| 0
| 0.008427
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a27eb495106ade83e880e4a8a449d48c322f96d
| 2,708
|
py
|
Python
|
bin/main.py
|
ubern-mia/point-cloud-segmentation-miccai2019
|
b131b62dc5016de53611f3a743c56cc0061e050f
|
[
"MIT"
] | 20
|
2019-10-14T06:03:10.000Z
|
2022-02-04T04:44:38.000Z
|
bin/main.py
|
ubern-mia/point-cloud-segmentation-miccai2019
|
b131b62dc5016de53611f3a743c56cc0061e050f
|
[
"MIT"
] | 11
|
2019-06-10T12:31:23.000Z
|
2022-03-12T00:04:28.000Z
|
bin/main.py
|
fabianbalsiger/point-cloud-segmentation-miccai2019
|
b131b62dc5016de53611f3a743c56cc0061e050f
|
[
"MIT"
] | 3
|
2019-11-06T14:06:44.000Z
|
2021-08-11T18:46:25.000Z
|
import argparse
import os.path
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import pymia.deeplearning.logging as log
import tensorflow as tf
import pc.configuration.config as cfg
import pc.data.handler as hdlr
import pc.data.split as split
import pc.model.point_cnn as net
import pc.utilities.filesystem as fs
import pc.utilities.seeding as seed
import pc.utilities.training as train
def main(config_file: str):
config = cfg.load(config_file, cfg.Configuration)
# set up directories and logging
model_dir, result_dir = fs.prepare_directories(config_file, cfg.Configuration,
lambda: fs.get_directory_name(config))
config.model_dir = model_dir
config.result_dir = result_dir
print(config)
# set seed before model instantiation
print('Set seed to {}'.format(config.seed))
seed.set_seed(config.seed, config.cudnn_determinism)
# load train and valid subjects from split file
subjects_train, subjects_valid, _ = split.load_split(config.split_file)
print('Train subjects:', subjects_train)
print('Valid subjects:', subjects_valid)
# set up data handling
data_handler = hdlr.PointCloudDataHandler(config, subjects_train, subjects_valid, None)
with tf.Session() as sess:
# extract a sample for model initialization
data_handler.dataset.set_extractor(data_handler.extractor_train)
data_handler.dataset.set_transform(data_handler.extraction_transform_train)
sample = data_handler.dataset[0]
model = net.PointCNN(sess, sample, config)
logger = log.TensorFlowLogger(config.model_dir, sess,
model.epoch_summaries(),
model.batch_summaries(),
model.visualization_summaries())
# trainer = train.AssemblingTester(data_handler, logger, config, model, sess)
trainer = train.SegmentationTrainer(data_handler, logger, config, model, sess)
tf.get_default_graph().finalize() # to ensure that no ops are added during training, which would lead to
# a growing graph
trainer.train()
logger.close()
if __name__ == '__main__':
"""The program's entry point.
Parse the arguments and run the program.
"""
parser = argparse.ArgumentParser(description='Deep learning for shape learning on point clouds')
parser.add_argument(
'--config_file',
type=str,
default='./bin/config.json',
help='Path to the configuration file.'
)
args = parser.parse_args()
main(args.config_file)
| 33.85
| 113
| 0.679838
| 336
| 2,708
| 5.303571
| 0.375
| 0.055556
| 0.02862
| 0.029181
| 0.035915
| 0.035915
| 0
| 0
| 0
| 0
| 0
| 0.000482
| 0.233383
| 2,708
| 79
| 114
| 34.278481
| 0.8579
| 0.124077
| 0
| 0
| 0
| 0
| 0.070614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.24
| 0
| 0.26
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a287484855658cc91349375e1c4b8e475ab1fe0
| 1,317
|
py
|
Python
|
manage_env.py
|
sandeep-gh/OpenBSDRemoteIT
|
1690e67b6e2eb106c5350c75915065457fb1b9b2
|
[
"MIT"
] | null | null | null |
manage_env.py
|
sandeep-gh/OpenBSDRemoteIT
|
1690e67b6e2eb106c5350c75915065457fb1b9b2
|
[
"MIT"
] | null | null | null |
manage_env.py
|
sandeep-gh/OpenBSDRemoteIT
|
1690e67b6e2eb106c5350c75915065457fb1b9b2
|
[
"MIT"
] | null | null | null |
import os
import pickle
from deployConfig import workDir
import sys
env_fp = f"{workDir}/env.pickle"
def add_to_env(varname, path):
with open(env_fp, "rb") as fh:
envvars = pickle.load(fh)
if varname in envvars.keys():
if path not in envvars[varname]:
envvars[varname].append(path)
else:
envvars[varname] = []
envvars[varname].append(path)
with open(env_fp, "wb") as fh:
pickle.dump(envvars, fh)
def build_env():
with open(env_fp, "rb") as fh:
envvars = pickle.load(fh)
# env_str = "\n".join(
# [f"""export {key}={":".join(envvars[key])}:${key}""" for key in envvars.keys()])
env_str = ""
for key in envvars.keys():
suffix = ""
if key in ["PATH", "LD_LIBRARY_PATH"]:
suffix = f":${key}:"
env_str = f"""{env_str}\nexport {key}={":".join(envvars[key])}{suffix}"""
return env_str
if not os.path.exists(env_fp):
env = {}
with open(env_fp, "wb") as fh:
pickle.dump(env, fh)
# add_to_env("LD_LIBRARY_PATH", "/usr/local/lib/eopenssl11/")
# add_to_env("LD_LIBRARY_PATH", f"{project_root}/Builds/Python-3.10.0/")
# add_to_env("PATH", f"{project_root}/Builds/Python-3.10.0/bin")
# add_to_env("PATH", f"{project_root}/Builds/postgresql-14.0/bin")
| 28.630435
| 90
| 0.59757
| 196
| 1,317
| 3.857143
| 0.290816
| 0.039683
| 0.05291
| 0.068783
| 0.526455
| 0.462963
| 0.312169
| 0.312169
| 0.261905
| 0.100529
| 0
| 0.012745
| 0.225513
| 1,317
| 45
| 91
| 29.266667
| 0.728431
| 0.279423
| 0
| 0.266667
| 0
| 0
| 0.117834
| 0.04034
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a29485e3ae58c67b4c0c486240c276c76016ab2
| 3,328
|
py
|
Python
|
redress/tests/test_geometries.py
|
maximlamare/REDRESS
|
a6caa9924d0f6df7ed49f188b35a7743fde1486e
|
[
"MIT"
] | 1
|
2021-09-16T08:03:31.000Z
|
2021-09-16T08:03:31.000Z
|
redress/tests/test_geometries.py
|
maximlamare/REDRESS
|
a6caa9924d0f6df7ed49f188b35a7743fde1486e
|
[
"MIT"
] | null | null | null |
redress/tests/test_geometries.py
|
maximlamare/REDRESS
|
a6caa9924d0f6df7ed49f188b35a7743fde1486e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unittests for the GDAl tools.
This file is part of the REDRESS algorithm
M. Lamare, M. Dumont, G. Picard (IGE, CEN).
"""
import pytest
from geojson import Polygon, Feature, FeatureCollection, dump
from redress.geospatial.gdal_ops import (build_poly_from_coords,
build_poly_from_geojson,
geom_contains)
@pytest.fixture(scope="session")
def write_geojson(tmpdir_factory):
""" Write a geojson file with predetermined coordinates."""
# Create a polygon
poly = Polygon([[(5.51, 44.71), (6.91, 44.71),
(6.91, 45.46), (5.51, 45.46)]])
# Create a feature
features = []
features.append(Feature(geometry=poly))
# Add to collection
feature_collection = FeatureCollection(features)
# Write to file
fn = tmpdir_factory.mktemp("data").join("poly.geojson")
with open(fn, 'w') as f:
dump(feature_collection, f)
return fn
class Test_polygons(object):
"""Test the extent of built polygons and if they overlap."""
def test_built_poly(self):
"""Test that a polygon is correctly built from coordinates."""
# Create 4 coordinates that form a rectangle
coord_box = [(5.51, 44.71), (6.91, 44.71),
(6.91, 45.46), (5.51, 45.46)]
# Build a polygon
poly = build_poly_from_coords(coord_box)
# Test if the coords are correctly built
assert min(poly.GetEnvelope()) == coord_box[0][0]
assert max(poly.GetEnvelope()) == coord_box[-1][1]
def test_geojson_poly(self, write_geojson):
"""Test that a polygon is correctly built from a geojson file."""
# Get polygon from file
poly = build_poly_from_geojson(str(write_geojson))
# Create base coordinates
coord_box = [(5.51, 44.71), (6.91, 44.71),
(6.91, 45.46), (5.51, 45.46)]
# Test if the coords are correctly built
assert min(poly.GetEnvelope()) == coord_box[0][0]
assert max(poly.GetEnvelope()) == coord_box[-1][1]
def test_poly_contains(self):
"""Test if a created polygon is contained with an other"""
# Creat a first polygon (not a rectangle)
main_coord_box = [(6.2869, 45.2729), (6.6165, 45.0735),
(6.0919, 45.0191)]
main_poly = build_poly_from_coords(main_coord_box)
# Create a polygon inside of the main one
inside_coords = [(6.2855, 45.1316), (6.3871, 45.1316),
(6.3871, 45.1810), (6.2855, 45.1810)]
inside_poly = build_poly_from_coords(inside_coords)
# Create a polygon that overlaps the main one
overlap_coords = [(5.9559, 45.0977), (6.0026, 44.9259),
(6.6384, 45.0356)]
overlap_poly = build_poly_from_coords(overlap_coords)
# Create a polygon outside the main one
outside_coords = [(6.6439, 45.7119), (6.8829, 45.7119),
(6.8829, 45.8708), (6.6439, 45.8708)]
outside_poly = build_poly_from_coords(outside_coords)
assert geom_contains(main_poly, inside_poly)
assert not geom_contains(main_poly, overlap_poly)
assert not geom_contains(main_poly, outside_poly)
| 36.571429
| 73
| 0.60607
| 455
| 3,328
| 4.279121
| 0.298901
| 0.03698
| 0.053416
| 0.058552
| 0.315871
| 0.232152
| 0.232152
| 0.198254
| 0.161274
| 0.161274
| 0
| 0.103277
| 0.275541
| 3,328
| 90
| 74
| 36.977778
| 0.704272
| 0.255409
| 0
| 0.177778
| 0
| 0
| 0.009877
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 1
| 0.088889
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a2cec396ceac73b9f9e17a3fefcecf0959ae15d
| 33,258
|
py
|
Python
|
utility/visualize.py
|
richban/behavioral.neuroevolution
|
bb850bda919a772538dc86a9624a6e86623f9b80
|
[
"Apache-2.0"
] | null | null | null |
utility/visualize.py
|
richban/behavioral.neuroevolution
|
bb850bda919a772538dc86a9624a6e86623f9b80
|
[
"Apache-2.0"
] | 2
|
2020-03-31T01:45:13.000Z
|
2020-09-25T23:39:43.000Z
|
utility/visualize.py
|
richban/behavioral.neuroevolution
|
bb850bda919a772538dc86a9624a6e86623f9b80
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
import csv
import graphviz
import numpy as np
import plotly.graph_objs as go
import plotly
import plotly.plotly as py
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import copy
import warnings
import matplotlib as mpl
from plotly.offline import download_plotlyjs, plot, iplot
mpl.use('TkAgg')
plotly.tools.set_credentials_file(username=os.environ['PLOTLY_USERNAME'],
api_key=os.environ['PLOTLY_API_KEY'])
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn(
"This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
median_fitness = np.array(statistics.get_fitness_median())
plt.figure(figsize=(12, 9))
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.plot(generation, avg_fitness, 'b-', label="average")
plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.plot(generation, median_fitness, 'y-', label="median")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
t_values = [t for t, I, v, u, f in spikes]
v_values = [v for t, I, v, u, f in spikes]
u_values = [u for t, I, v, u, f in spikes]
I_values = [I for t, I, v, u, f in spikes]
f_values = [f for t, I, v, u, f in spikes]
fig = plt.figure()
plt.subplot(4, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(4, 1, 2)
plt.ylabel("Fired")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, f_values, "r-")
plt.subplot(4, 1, 3)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(4, 1, 4)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
plt.close()
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn(
"This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
plt.figure(figsize=(12, 9))
_, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn(
"This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled',
'shape': 'box'}
input_attrs['fillcolor'] = node_colors.get(k, 'lightgray')
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled'}
node_attrs['fillcolor'] = node_colors.get(k, 'lightblue')
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add((cg.in_node_id, cg.out_node_id))
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled',
'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
# if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={
'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
def plot_species_stagnation(body, imgfilename):
body = body[3:-2]
stagnation = []
id = []
fitness = []
size = []
adj_fit = []
age = []
for line in body:
line = line.split(' ')
line = [x for x in line if x]
line[-1] = line[-1].strip()
id.append(line[0])
age.append(line[1])
size.append(line[2])
fitness.append(line[3])
adj_fit.append(line[4])
stagnation.append(line[5])
if len(id) < 2:
return None
plt.figure(figsize=(12, 9))
stagnation = np.array(stagnation).astype(np.float)
id = np.array(id)
points = plt.bar(id, stagnation, width=0.7)
for ind, bar in enumerate(points):
height = bar.get_height()
plt.text(bar.get_x() + bar.get_width() / 2, height + 1, 'fit {} / size {}'.format(fitness[ind], size[ind]),
ha='center', va='bottom', rotation=90, fontsize=7)
plt.ylabel('stagnation')
plt.xlabel('Species ID')
plt.axis([0, plt.axis()[1], 0, plt.axis()[3] + 20])
plt.xticks(rotation='vertical')
plt.tight_layout()
plt.subplots_adjust(top=0.85)
plt.savefig(imgfilename)
plt.clf()
plt.close('all')
return imgfilename
def plot_fitness_over_gen(file, imgfilename):
with open(file, 'r') as csvfile:
data = csv.reader(csvfile)
gen = []
avg_fit = []
stdv = []
max_fit = []
median = []
for row in data:
gen.append(int(row[0]))
avg_fit.append(float(row[1]))
stdv.append(float(row[2]))
max_fit.append(float(row[3]))
median.append(float(row[4]))
if len(gen) < 2:
return None
plt.figure(figsize=(12, 9))
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.grid()
plt.plot(gen, avg_fit, 'b', linewidth=0.5,)
plt.plot(gen, stdv, 'g', linewidth=0.5,)
plt.plot(gen, max_fit, 'r', linewidth=0.5,)
plt.plot(gen, median, 'y', linewidth=0.5,)
plt.plot(gen, max_fit, 'r', markersize=5, label='Max fitness')
plt.plot(gen, avg_fit, 'b', markersize=5, label='Average fitness')
plt.plot(gen, stdv, 'g', markersize=5, label='Standard deviation')
plt.plot(gen, median, 'y', markersize=5, label='Median')
plt.ylabel('Fitness')
plt.xlabel('Generation')
# xmin, xmax, ymin, ymax = plt.axis()
# plt.axis([xmin, xmax, ymin, ymax])
plt.legend(bbox_to_anchor=(1, 1), loc='best')
plt.tight_layout()
plt.savefig(imgfilename)
plt.clf()
plt.close('all')
return imgfilename
def plot_single_run_scatter(scatter, dt, title):
"""Plots a single run with MAX, AVG, MEDIAN, All individuals"""
l = []
y = []
N = len(scatter.gen.unique())
c = ['hsl('+str(h)+',50%'+',50%)' for h in np.linspace(0, 360, N)]
for i in range(int(N)):
subset = scatter.loc[scatter['gen'] == i]
trace0 = go.Scatter(
x=subset.loc[:, 'gen'],
y=subset.loc[:, 'fitness'],
mode='markers',
marker=dict(size=7,
line=dict(width=1),
color=c[i],
opacity=0.5
),
name='gen {}'.format(i),
text=subset.loc[:, 'genome']
)
l.append(trace0)
trace0 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'max'],
mode='lines',
name='Max',
line=dict(
color="rgb(204, 51, 51)",
dash="solid",
shape="spline",
smoothing=1.0,
width=2
),
)
trace1 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'median'],
mode='lines',
name='Median',
line=dict(
color="rgb(173, 181, 97)",
shape="spline",
dash="solid",
smoothing=1.0,
width=2
)
)
trace2 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'avg'],
mode='lines',
name='Average',
line=dict(
color="rgb(62, 173, 212)",
shape="spline",
dash="solid",
smoothing=1.0,
width=2
)
)
data = [trace0, trace1, trace2]
layout = go.Layout(
title='Fitness of Population Individuals - {}'.format(title),
hovermode='closest',
xaxis=dict(
title='Generations',
ticklen=5,
zeroline=False,
gridwidth=2,
),
yaxis=dict(
title='Fitness',
ticklen=5,
gridwidth=1,
),
showlegend=False
)
fig = go.Figure(data=data+l, layout=layout)
return py.iplot(fig, filename='single-run-scater-line-plot', layout=layout)
def _set_plot_params(title, ratio):
# Optionally fix the aspect ratio
if ratio:
plt.figure(figsize=plt.figaspect(ratio))
mpl.style.use('seaborn-dark-palette')
if title:
plt.title(title)
def _save_or_show(save):
if save:
plt.savefig(save)
else:
plt.show()
# exit()
def plot_single_run(gen, fit_mins, fit_avgs, fit_maxs, title=None, ratio=None, save=None):
_set_plot_params(title, ratio)
line1 = plt.plot(gen, fit_mins, 'C1:', label="Minimum Fitness")
line2 = plt.plot(gen, fit_avgs, "C2-", label="Average Fitness")
line3 = plt.plot(gen, fit_maxs, "C3:", label="Max Fitness")
lns = line1 + line2 + line3
labs = [l.get_label() for l in lns]
plt.legend(lns, labs, loc="lower right")
_save_or_show(save)
def plot_runs(dt, title, offline=True):
"""Plots the Max/Average/Median"""
trace0 = go.Scatter(
x=dt.index,
y=dt.loc[:, 'max'],
mode='lines',
name='Max',
line=dict(
color="rgb(204, 51, 51)",
dash="solid",
shape="spline",
smoothing=0.0,
width=2
),
)
trace1 = go.Scatter(
x=dt.index,
y=dt.loc[:, 'median'],
mode='lines',
name='Median',
line=dict(
color="rgb(173, 181, 97)",
shape="spline",
dash="solid",
smoothing=0.0,
width=2
)
)
trace2 = go.Scatter(
x=dt.index,
y=dt.loc[:, 'avg'],
mode='lines',
name='Average',
line=dict(
color="rgb(62, 173, 212)",
shape="spline",
dash="solid",
smoothing=0.0,
width=2
)
)
layout = go.Layout(
showlegend=True,
hovermode='closest',
title=title,
xaxis=dict(
autorange=False,
range=[0, 20],
showspikes=False,
title="Generations",
ticklen=5,
gridwidth=1,
),
yaxis=dict(
autorange=True,
title="Fitness",
ticklen=5,
gridwidth=1,
),
)
data = [trace0, trace1, trace2]
fig = go.Figure(data, layout=layout)
return py.iplot(fig, filename=title)
l = []
y = []
N = len(scatter.gen.unique())
c = ['hsl('+str(h)+',50%'+',50%)' for h in np.linspace(0, 360, N)]
for i in range(int(N)):
subset = scatter.loc[scatter['gen'] == i]
trace0 = go.Scatter(
x=subset.loc[:, 'gen'],
y=subset.loc[:, 'fitness'],
mode='markers',
marker=dict(size=7,
line=dict(width=1),
color=c[i],
opacity=0.5
),
name='gen {}'.format(i),
text=subset.loc[:, 'genome']
)
l.append(trace0)
trace0 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'max'],
mode='lines',
name='Max',
line=dict(
color="rgb(204, 51, 51)",
dash="solid",
shape="spline",
smoothing=0.0,
width=2
),
)
trace1 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'median'],
mode='lines',
name='Median',
line=dict(
color="rgb(173, 181, 97)",
shape="spline",
dash="solid",
smoothing=0.0,
width=2
)
)
trace2 = go.Scatter(
x=dt.loc[:, 'gen'],
y=dt.loc[:, 'avg'],
mode='lines',
name='Average',
line=dict(
color="rgb(62, 173, 212)",
shape="spline",
dash="solid",
smoothing=0.0,
width=2
)
)
data = [trace0, trace1, trace2]
layout = go.Layout(
title='Fitness of Population Individuals - {}'.format(title),
hovermode='closest',
xaxis=dict(
title='Generations',
ticklen=5,
zeroline=False,
gridwidth=2,
),
yaxis=dict(
title='Fitness',
ticklen=5,
gridwidth=1,
),
showlegend=False
)
fig = go.Figure(data=data+l, layout=layout)
return py.iplot(fig, filename='fitness-average-n-runs', layout=layout)
def plot_scatter(dt, title):
"""Plots a Scatter plot of each individual in the population"""
l = []
y = []
N = len(dt.gen.unique())
c = ['hsl('+str(h)+',50%'+',50%)' for h in np.linspace(0, 360, N)]
for i in range(int(N)):
subset = dt.loc[dt['gen'] == i]
trace0 = go.Scatter(
x=subset.loc[:, 'gen'],
y=subset.loc[:, 'fitness'],
mode='markers',
marker=dict(size=14,
line=dict(width=1),
color=c[i],
opacity=0.3
),
name='gen {}'.format(i),
text=subset.loc[:, 'genome'],
)
l.append(trace0)
layout = go.Layout(
title='Fitness of Population Individuals - {}'.format(title),
hovermode='closest',
xaxis=dict(
title='Generations',
ticklen=5,
zeroline=False,
gridwidth=2,
),
yaxis=dict(
title='Fitness',
ticklen=5,
gridwidth=1,
),
showlegend=False
)
fig = go.Figure(data=l, layout=layout)
return py.iplot(fig, filename='population-scatter')
def plot_grid(grid):
trace = go.Heatmap(z=grid, colorscale='Viridis')
data = [trace]
layout = go.Layout(
title='Environment and obstacles',
showlegend=False
)
return py.iplot(data, filename='grid-heatmap', layout=layout)
def plot_fitness(dt, title):
upper_bound = go.Scatter(
name='75%',
x=dt.index.values,
y=dt.loc[:, 'q3'],
mode='lines',
marker=dict(color="#444"),
line=dict(width=0),
fillcolor='rgba(68, 68, 68, 0.3)',
fill='tonexty')
trace = go.Scatter(
name='Median',
x=dt.index.values,
y=dt.loc[:, 'q2'],
mode='lines',
line=dict(color='rgb(31, 119, 180)'),
fillcolor='rgba(68, 68, 68, 0.3)',
fill='tonexty')
lower_bound = go.Scatter(
name='25%',
x=dt.index.values,
y=dt.loc[:, 'q1'],
marker=dict(color="#444"),
line=dict(width=0),
mode='lines')
trace_max = go.Scatter(
x=dt.index.values,
y=dt.loc[:, 'q4'],
mode='lines',
name='Max',
line=dict(
color="rgb(204, 51, 51)",
dash="solid",
shape="spline",
smoothing=0.0,
width=2
),
)
data = [lower_bound, trace, upper_bound, trace_max]
layout = go.Layout(
title=title,
hovermode='closest',
xaxis=dict(
title='Generations',
ticklen=5,
zeroline=False,
gridwidth=1,
),
yaxis=dict(
title='Fitness',
ticklen=5,
gridwidth=1,
),
showlegend=True
)
fig = go.Figure(data=data, layout=layout)
return py.iplot(fig, filename='fitness-graph-quartile')
def plot_n_fitness(dt_list, title):
rgb_colors = [
"rgb(204, 51, 51)",
"rgb(255, 153, 204)",
"rgb(255, 204, 102)",
"rgb(102, 204, 0)",
"rgb(51, 51, 255)"
]
data = [
go.Scatter(
name=str(dt.genome_id.iloc[0]),
x=dt.index.values,
y=dt.loc[:, 'fitness'],
mode='lines',
line=dict(
color=rgb,
dash="solid",
shape="spline",
smoothing=0.0,
width=2
)
)
for (rgb, dt) in zip(rgb_colors, dt_list)
]
layout = go.Layout(
title=title,
hovermode='closest',
xaxis=dict(
title='# of the post-evaluation',
ticklen=5,
zeroline=False,
gridwidth=1,
),
yaxis=dict(
title='Fitness',
ticklen=5,
gridwidth=1,
),
showlegend=True
)
fig = go.Figure(data=data, layout=layout)
return py.iplot(fig, filename='fitness-post-evaluated-individuals')
def plot_boxplot_sensors(dt):
colors = [
"#3D9970",
"#FF4136",
"#ff9933",
"#6666ff",
"#33cccc",
"#39e600",
"#3333cc"
]
data = [
go.Box(
y=dt.loc[:, 's{}'.format(i+1)],
name='sensor {}'.format(i+1),
marker=dict(color=color)
)
for i, color in enumerate(colors)
]
layout = go.Layout(
yaxis=dict(
title='Sensors Activations',
zeroline=False
),
title='Sensors Behavioral Features of Individual {}'.format(
dt.loc[:, 'genome_id'].iloc[0]),
)
fig = go.Figure(data=data, layout=layout)
return py.iplot(fig)
def plot_boxplot_fitness(dt_list):
colors = [
"#3D9970",
"#FF4136",
"#ff9933",
"#6666ff",
"#33cccc",
"#39e600",
"#3333cc"
]
data = [
go.Box(
y=dt.loc[:, 'fitness'],
name='Individual {}'.format(dt.loc[:, 'genome_id'].iloc[0]),
marker=dict(color=color)
)
for (color, dt) in zip(colors, dt_list)
]
layout = go.Layout(
yaxis=dict(
title='Fitness',
zeroline=False
),
title='Noise in fitness performance of best controllers.',
)
fig = go.Figure(data=data, layout=layout)
return py.iplot(fig)
def plot_boxplot_wheels(dt_list):
data = [
go.Box(
x=['individual {0}'.format(genome_id)
for genome_id in dt.loc[:, 'genome_id']],
y=dt.loc[:, '{}'.format(wheel)],
name='individual {0} {1}'.format(
dt.loc[:, 'genome_id'].iloc[0], wheel),
marker=dict(color=color),
)
for dt in dt_list
for (color, wheel) in zip(['#FF9933', '#6666FF'], ['avg_left', 'avg_right'])
]
layout = go.Layout(
yaxis=dict(
title='Wheel Speed Activation Values',
zeroline=False
),
boxmode='group'
)
fig = go.Figure(data=data, layout=layout)
return py.iplot(fig)
def plot_path(genomes, title):
colors = [
"#3D9970",
"#FF4136",
"#ff9933",
"#6666ff",
"#33cccc",
"#39e600",
"#3333cc",
"#42f498",
"#3c506d",
"#ada387"
]
data = [
go.Scatter(
x=np.array(genome.position)[:, 0],
y=np.array(genome.position)[:, 1],
mode='lines',
name='path {0} {1}'.format(genome.key, genome.evaluation),
marker=dict(color=color)
) for (color, genome) in zip(colors, genomes)
]
layout = go.Layout(
title=title, #.format(genomes[0].key),
xaxis=dict(
zeroline=True,
showline=True,
mirror='ticks',
zerolinecolor='#969696',
zerolinewidth=4,
linecolor='#636363',
linewidth=6,
range=[0.06, 1.10]
),
yaxis=dict(
zeroline=True,
showline=True,
mirror='ticks',
zerolinecolor='#969696',
zerolinewidth=4,
linecolor='#636363',
linewidth=6,
range=[0.0, 0.78]
),
shapes=[
# filled Rectangle
dict(
type='rect',
x0=0.83,
y0=0.0,
x1= 0.89,
y1= 0.3,
line=dict(
color="rgba(128, 0, 128, 1)",
width=2,
),
fillcolor='rgba(128, 0, 128, 0.7)',
),
dict(
type='rect',
x0=0.06,
y0=0.40,
x1= 0.33,
y1= 0.46,
line=dict(
color="rgba(128, 0, 128, 1)",
width=2,
),
fillcolor='rgba(128, 0, 128, 0.7)',
),
dict(
type='rect',
x0=0.57,
y0=0.40,
x1= 0.68,
y1= 0.78,
line=dict(
color="rgba(128, 0, 128, 1)",
width=2,
),
fillcolor='rgba(128, 0, 128, 0.7)',
)
]
)
fig = go.Figure(data=data, layout=layout)
return iplot(fig, filename='path-traveled-genomes')
def plot_thymio_fitness(thymio1, thymio2, title):
thymio1 = go.Scatter(
name='Thymio 1 - genome_id: {0}'.format(thymio1.genome_id.iloc[0]),
x=thymio1.index.values,
y=thymio1.loc[:, 'fitness'],
mode='lines',
line=dict(
color="rgb(255, 204, 102)",
dash="solid",
shape="spline",
smoothing=0.0,
width=2
)
)
thymio2 = go.Scatter(
name='Thymio 2 - genome_id: {0}'.format(thymio2.genome_id.iloc[0]),
x=thymio2.index.values,
y=thymio2.loc[:, 'fitness'],
mode='lines',
line=dict(
color="rgb(102, 204, 0)",
dash="solid",
shape="spline",
smoothing=0.0,
width=2
)
)
data = [thymio1, thymio2]
layout = go.Layout(
title=title,
hovermode='closest',
xaxis=dict(
title='# of the post-evaluation',
ticklen=5,
zeroline=False,
gridwidth=1,
),
yaxis=dict(
title='Fitness',
ticklen=5,
gridwidth=1,
),
showlegend=True
)
fig = go.Figure(data=data, layout=layout)
return py.iplot(fig, filename='fitness-difference-thymio1-thymio2')
def plot_thymio_behaviors(behaviors_list):
colors = [
"#3D9970",
"#FF4136",
"#ff9933",
"#6666ff",
"#33cccc",
"#39e600",
"#3333cc",
"#42f498",
"#3c506d",
"#ada387"
]
data = [
go.Box(
y=dt.iloc[:, 2:].sum(axis=1),
name='Behavioral Features {0}'.format(dt.loc[:, 'genome_id'].iloc[0]),
marker=dict(color=color)
)
for (color, dt) in zip(colors, behaviors_list)
]
# thymio1 = go.Box(
# y=thymio1,
# name='Behavioral Features Thymio 1',
# marker=dict(color="#FF4136")
# )
# thymio2 = go.Box(
# y=thymio2,
# name='Behavioral Features Thymio 2',
# marker=dict(color="#39e600")
# )
# data = [thymio1, thymio2]
layout = go.Layout(
yaxis=dict(
title='Summed Behavioral Featuers of 10 runs',
zeroline=False
),
title='Behavioral differences of controllers'
)
fig = go.Figure(data=data, layout=layout)
return py.iplot(fig)
def plot_moea_fitness(fitness_data, hof, title='Evaluation objectives. MOEA. Transferability.'):
trace1 = go.Scatter3d(
x=fitness_data.loc[:, 'fitness'],
y=fitness_data.loc[:, 'str_disparity'],
z=fitness_data.loc[:, 'diversity'],
mode='markers',
marker=dict(
size=4,
# color=fitness_data.loc[:, 'diversity'], # set color to an array/list of desired values
# colorscale='Viridis', # choose a colorscale
opacity=0.8
),
text=fitness_data.loc[:, 'genome_id'],
)
data = [trace1]
layout = go.Layout(
title=title,
margin=dict(
l=0,
r=0,
b=0,
t=0
),
scene = dict(
xaxis = dict(
title='Task-fitness'),
yaxis = dict(
title='STR Disparity'),
zaxis = dict(
title='Diversity'),
annotations= [dict(
showarrow = True,
x = ind.fitness.values[0],
y = ind.fitness.values[1],
z = ind.fitness.values[2],
text = ind.key,
xanchor = "left",
xshift = 10,
opacity = 0.7,
textangle = 0,
ax = 0,
ay = -75,
font = dict(
color = "black",
size = 12
),
arrowcolor = "black",
arrowsize = 3,
arrowwidth = 1,
arrowhead = 1
) for ind in hof
]
),
showlegend=True
)
fig = go.Figure(data=data, layout=layout)
return py.iplot(fig, filename='3d-scatter-colorscale')
def plot_surrogate_model(fitness_data, title='STR Disparity Over Generations'):
dt = fitness_data[['gen', 'str_disparity']].groupby('gen').first()
trace0 = go.Scatter(
x=dt.index,
y=dt.loc[:, 'str_disparity'],
mode='lines',
name='STR Disparity',
line=dict(
color="rgb(204, 51, 51)",
dash="solid",
shape="spline",
smoothing=0.0,
width=2
),
)
layout = go.Layout(
showlegend=True,
hovermode='closest',
title=title,
xaxis=dict(
autorange=False,
range=[0, 20],
showspikes=False,
title="Generations",
ticklen=5,
gridwidth=1,
),
yaxis=dict(
autorange=True,
title="Approximated STR Disparity",
ticklen=5,
gridwidth=1,
),
)
data = [trace0]
fig = go.Figure(data, layout=layout)
return iplot(fig, filename=title)
def plot_str_disparity(str_disparities, title='STR Disparities of transfered controllers'):
genome_id = np.array([str_disparity[1] for str_disparity in str_disparities])
str_disparity = np.array([str_disparity[3] for str_disparity in str_disparities])
real_disparity = np.array([real_disparity[4] for real_disparity in str_disparities])
trace0 = go.Scatter(
x=str_disparity,
y=real_disparity,
mode='markers',
name='STR Disparity values',
line=dict(
color="rgb(204, 51, 51)",
dash="solid",
shape="spline",
smoothing=0.0,
width=2
),
text=genome_id
)
trace1 = go.Scatter(
x=np.arange(0, 15),
y=np.arange(0, 15),
mode='lines',
line=dict(color='rgb(31, 119, 180)'),
)
layout = go.Layout(
showlegend=True,
hovermode='closest',
title=title,
xaxis=dict(
autorange=False,
range=[0, 20],
showspikes=False,
title="Approximated STR Disparity value",
ticklen=5,
gridwidth=1,
),
yaxis=dict(
autorange=True,
title="Exact STR Disparity value",
ticklen=5,
gridwidth=1,
),
)
data = [trace0, trace1]
fig = go.Figure(data, layout=layout)
return iplot(fig, filename=title)
def plot_moea_fitness_2d(fitness_data, hof, title='Evaluation objectives. MOEA. Transferability.'):
trace1 = go.Scatter(
x=fitness_data.loc[:, 'fitness'],
y=fitness_data.loc[:, 'str_disparity'],
mode='markers',
marker=dict(
size=6,
opacity=0.8
),
text=fitness_data.loc[:, 'genome_id'],
name='Individuals'
)
pareto_x = [ind.fitness.values[0] for ind in hof]
pareto_y = [ind.fitness.values[1] for ind in hof]
pareto_ids = [ind.key for ind in hof]
pareto_front = go.Scatter(
x=pareto_x,
y=pareto_y,
mode='lines+markers',
marker=dict(
size=6,
opacity=0.8,
color="red"
),
text=pareto_ids,
name='Pareto-front'
)
data = [trace1, pareto_front]
layout = go.Layout(
title=title,
xaxis = dict(
title='Task-fitness'),
yaxis = dict(
title='STR Disparity'),
# annotations= [dict(
# showarrow = True,
# x = ind.fitness.values[0],
# y = ind.fitness.values[1],
# xref = 'x',
# yref = 'y',
# text = ind.key,
# ax = 0,
# ay = -40,
# font = dict(
# color = "black",
# size = 12
# ),
# arrowcolor = "#636363",
# arrowsize = 1,
# arrowwidth = 1,
# arrowhead = 7
# ) for ind in hof
# ],
showlegend=True
)
fig = go.Figure(data=data, layout=layout)
return iplot(fig, filename='moea-scatter-colorscale')
| 25.7017
| 115
| 0.500992
| 3,806
| 33,258
| 4.306096
| 0.130321
| 0.017024
| 0.015864
| 0.016597
| 0.56782
| 0.536396
| 0.498383
| 0.470682
| 0.44359
| 0.407468
| 0
| 0.039828
| 0.356786
| 33,258
| 1,294
| 116
| 25.7017
| 0.7263
| 0.04002
| 0
| 0.583015
| 0
| 0
| 0.124329
| 0.006406
| 0
| 0
| 0
| 0
| 0.001908
| 1
| 0.023855
| false
| 0
| 0.013359
| 0
| 0.062023
| 0.000954
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a2d4e4783b1e8d97223132070735cfa9ed1e2ca
| 1,683
|
py
|
Python
|
CUMCM2014/Problem-A/2014-A-Python_SC/梯度图.py
|
Amoiensis/Mathmatic_Modeling_CUMCM
|
c64ec097d764ec3ae14e26e840bf5642be372d7c
|
[
"Apache-2.0"
] | 27
|
2019-08-30T07:09:53.000Z
|
2021-08-29T07:37:24.000Z
|
CUMCM2014/Problem-A/2014-A-Python_SC/梯度图.py
|
Amoiensis/Mathmatic_Modeling_CUMCM
|
c64ec097d764ec3ae14e26e840bf5642be372d7c
|
[
"Apache-2.0"
] | 2
|
2020-08-10T03:11:32.000Z
|
2020-08-24T13:39:24.000Z
|
CUMCM2014/Problem-A/2014-A-Python_SC/梯度图.py
|
Amoiensis/Mathmatic_Modeling_CUMCM
|
c64ec097d764ec3ae14e26e840bf5642be372d7c
|
[
"Apache-2.0"
] | 28
|
2019-12-14T03:54:42.000Z
|
2022-03-12T14:38:22.000Z
|
# -*- coding: utf-8 -*-
"""
---------------------------------------------
File Name: 粗避障
Desciption:
Author: fanzhiwei
date: 2019/9/5 9:58
---------------------------------------------
Change Activity: 2019/9/5 9:58
---------------------------------------------
"""
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy import ndimage
from PIL import Image
LongRangeScanRaw = plt.imread("./1.tif")
ShortRangeScanRaw = plt.imread("./2.tif")
ShortRangeScanMean = ndimage.median_filter(ShortRangeScanRaw, 10)
LongRangeScanMean = ndimage.median_filter(LongRangeScanRaw, 10)
SizeLong = math.sqrt(LongRangeScanRaw.size)
SizeShort = math.sqrt(ShortRangeScanRaw.size)
def ToBinary(map_data):
mean = map_data.mean()
diff = np.abs(map_data - mean)
variance = math.sqrt(map_data.var()) * 0.7
x, y = np.gradient(map_data)
graded_map = np.hypot(x, y) # 梯度图
mean_graded = graded_map.mean()
diff_graded = np.abs(graded_map - 0)
variance_graded = math.sqrt(graded_map.var()) * 0.84
low_value_indices = diff < variance
high_value_indices = diff >= variance
map_data[low_value_indices] = 0
map_data[high_value_indices] = 255
low_value_indices = diff_graded < variance_graded
high_value_indices = diff_graded >= variance_graded
# map_data[low_value_indices] = 0
map_data[high_value_indices] = 255
return map_data
if __name__ == "__main__":
Longimage = Image.fromarray(ToBinary(LongRangeScanMean))
Shortimage = Image.fromarray(ToBinary(ShortRangeScanMean))
Longimage.save("new_1.bmp")
Shortimage.save("new_2.bmp")
| 29.017241
| 65
| 0.633393
| 203
| 1,683
| 5.009852
| 0.374384
| 0.06883
| 0.058997
| 0.013766
| 0.184857
| 0.167158
| 0.096362
| 0.096362
| 0.096362
| 0.096362
| 0
| 0.029971
| 0.187166
| 1,683
| 58
| 66
| 29.017241
| 0.71345
| 0.215092
| 0
| 0.060606
| 0
| 0
| 0.030488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.151515
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a337713256137d5fcba2e7758391c4a3d42f204
| 4,156
|
py
|
Python
|
scripts/figures/kernels.py
|
qbhan/sample_based_MCdenoising
|
92f5220802ef0668105cdee5fd7e2af8a66201db
|
[
"Apache-2.0"
] | 78
|
2019-10-02T01:34:46.000Z
|
2022-03-21T11:18:04.000Z
|
scripts/figures/kernels.py
|
qbhan/sample_based_MCdenoising
|
92f5220802ef0668105cdee5fd7e2af8a66201db
|
[
"Apache-2.0"
] | 17
|
2019-10-04T17:04:00.000Z
|
2021-05-17T19:02:12.000Z
|
scripts/figures/kernels.py
|
qbhan/sample_based_MCdenoising
|
92f5220802ef0668105cdee5fd7e2af8a66201db
|
[
"Apache-2.0"
] | 18
|
2019-10-03T05:02:21.000Z
|
2021-06-22T15:54:15.000Z
|
import os
import argparse
import logging
import numpy as np
import torch as th
from torch.utils.data import DataLoader
from torchvision import transforms
import ttools
from ttools.modules.image_operators import crop_like
import rendernet.dataset as dset
import rendernet.modules.preprocessors as pre
import rendernet.modules.models as models
import rendernet.interfaces as interfaces
import rendernet.callbacks as cb
import rendernet.viz as viz
from sbmc.utils import make_variable
import skimage.io as skio
log = logging.getLogger("rendernet")
def main(args):
log.info("Loading model {}".format(args.checkpoint))
meta_params = ttools.Checkpointer.load_meta(args.checkpoint)
spp = meta_params["spp"]
use_p = meta_params["use_p"]
use_ld = meta_params["use_ld"]
use_bt = meta_params["use_bt"]
# use_coc = meta_params["use_coc"]
mode = "sample"
if "DisneyPreprocessor" == meta_params["preprocessor"]:
mode = "disney_pixel"
elif "SampleDisneyPreprocessor" == meta_params["preprocessor"]:
mode = "disney_sample"
log.info("Rendering at {} spp".format(spp))
log.info("Setting up dataloader, p:{} bt:{} ld:{}".format(use_p, use_bt, use_ld))
data = dset.FullImageDataset(args.data, dset.RenderDataset, spp=spp, use_p=use_p, use_ld=use_ld, use_bt=use_bt)
preprocessor = pre.get(meta_params["preprocessor"])(data)
xforms = transforms.Compose([dset.ToTensor(), preprocessor])
data.transform = xforms
dataloader = DataLoader(data, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
model = models.get(preprocessor, meta_params["model_params"])
model.cuda()
model.train(False)
checkpointer = ttools.Checkpointer(args.checkpoint, model, None)
extras, meta = checkpointer.load_latest()
log.info("Loading latest checkpoint {}".format("failed" if meta is None else "success"))
for scene_id, batch in enumerate(dataloader):
batch_v = make_variable(batch, cuda=True)
with th.no_grad():
klist = []
out_ = model(batch_v, kernel_list=klist)
lowspp = batch["radiance"]
target = batch["target_image"]
out = out_["radiance"]
cx = 70
cy = 20
c = 128
target = crop_like(target, out)
lowspp = crop_like(lowspp.squeeze(), out)
lowspp = lowspp[..., cy:cy+c, cx:cx+c]
lowspp = lowspp.permute(1, 2, 0, 3)
chan, h, w, s = lowspp.shape
lowspp = lowspp.contiguous().view(chan, h, w*s)
sum_r = []
sum_w = []
max_w = []
maxi = crop_like(klist[-1]["max_w"].unsqueeze(1), out)
kernels = []
updated_kernels = []
for k in klist:
kernels.append(th.exp(crop_like(k["kernels"], out)-maxi))
updated_kernels.append(th.exp(crop_like(k["updated_kernels"], out)-maxi))
out = out[..., cy:cy+c, cx:cx+c]
target = target[..., cy:cy+c, cx:cx+c]
updated_kernels = [k[..., cy:cy+c, cx:cx+c] for k in updated_kernels]
kernels = [k[..., cy:cy+c, cx:cx+c] for k in kernels]
u_kernels_im = viz.kernels2im(kernels)
kmean = u_kernels_im.mean(0)
kvar = u_kernels_im.std(0)
n, h, w = u_kernels_im.shape
u_kernels_im = u_kernels_im.permute(1, 0, 2).contiguous().view(h, w*n)
fname = os.path.join(args.output, "lowspp.png")
save(fname, lowspp)
fname = os.path.join(args.output, "target.png")
save(fname, target)
fname = os.path.join(args.output, "output.png")
save(fname, out)
fname = os.path.join(args.output, "kernels_gather.png")
save(fname, u_kernels_im)
fname = os.path.join(args.output, "kernels_variance.png")
print(kvar.max())
save(fname, kvar)
import ipdb; ipdb.set_trace()
break
def save(fname, im):
os.makedirs(os.path.dirname(fname), exist_ok=True)
im = im.squeeze().cpu()
if len(im.shape) >= 3:
im = im.permute(1, 2, 0)
im = th.clamp(im, 0, 1).numpy()
skio.imsave(fname, im)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint", required=True)
parser.add_argument("--data", required=True)
parser.add_argument("--output", required=True)
args = parser.parse_args()
ttools.set_logger(True)
main(args)
| 31.24812
| 113
| 0.677334
| 606
| 4,156
| 4.493399
| 0.292079
| 0.036724
| 0.025707
| 0.012853
| 0.144326
| 0.099523
| 0.060962
| 0.017628
| 0.017628
| 0.017628
| 0
| 0.007906
| 0.178296
| 4,156
| 132
| 114
| 31.484848
| 0.789458
| 0.0077
| 0
| 0
| 0
| 0
| 0.102377
| 0.005822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.166667
| 0
| 0.185185
| 0.009259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a3726435cdad9b9e21619560262a26d9cbff99c
| 299
|
py
|
Python
|
scripts/alan/clean_pycache.py
|
Pix-00/olea
|
98bee1fd8866a3929f685a139255afb7b6813f31
|
[
"Apache-2.0"
] | 2
|
2020-06-18T03:25:52.000Z
|
2020-06-18T07:33:45.000Z
|
scripts/alan/clean_pycache.py
|
Pix-00/olea
|
98bee1fd8866a3929f685a139255afb7b6813f31
|
[
"Apache-2.0"
] | 15
|
2021-01-28T07:11:04.000Z
|
2021-05-24T07:11:37.000Z
|
scripts/alan/clean_pycache.py
|
Pix-00/olea
|
98bee1fd8866a3929f685a139255afb7b6813f31
|
[
"Apache-2.0"
] | null | null | null |
def clean_pycache(dir_, ignores=''):
import shutil
for path in dir_.glob('**/__pycache__'):
if ignores and path.match(ignores):
continue
shutil.rmtree(path)
if __name__ == "__main__":
from pathlib import Path
clean_pycache(Path(__file__).parents[2])
| 19.933333
| 44
| 0.638796
| 36
| 299
| 4.75
| 0.638889
| 0.140351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004425
| 0.244147
| 299
| 14
| 45
| 21.357143
| 0.752212
| 0
| 0
| 0
| 0
| 0
| 0.073579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a4004b98dc117b5e58a273f30a560e340d87721
| 1,345
|
py
|
Python
|
csv_merge_col.py
|
adrianpope/VelocityCompression
|
eb35f586b18890da93a7ad2e287437118c0327a2
|
[
"BSD-3-Clause"
] | null | null | null |
csv_merge_col.py
|
adrianpope/VelocityCompression
|
eb35f586b18890da93a7ad2e287437118c0327a2
|
[
"BSD-3-Clause"
] | null | null | null |
csv_merge_col.py
|
adrianpope/VelocityCompression
|
eb35f586b18890da93a7ad2e287437118c0327a2
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import numpy as np
import pandas as pd
def df_add_keys(df):
ax = df['fof_halo_angmom_x']
ay = df['fof_halo_angmom_y']
az = df['fof_halo_angmom_z']
mag = np.sqrt(ax**2 + ay**2 + az**2)
dx = ax/mag
dy = ay/mag
dz = az/mag
df['fof_halo_angmom_dx'] = dx
df['fof_halo_angmom_dy'] = dy
df['fof_halo_angmom_dz'] = dz
df['fof_halo_angmom_mag'] = mag
mass = df['fof_halo_mass']
df['fof_halo_specific_angmom_mag'] = mag/mass
return df
def df_merge(df1, df1_suffix, df2, df2_suffix):
merged = pd.DataFrame()
kl = df1.keys()
for i in range(len(kl)):
k = kl[i]
k1 = k + '_' + df1_suffix
k2 = k + '_' + df2_suffix
merged[k1] = df1[k]
merged[k2] = df2[k]
return merged
if __name__ == '__main__':
argv = sys.argv
if len(argv) < 7:
print('USAGE: %s <in1_name> <in1_suffix> <in2_name> <in2_suffix> <out_name> <add_keys>'%argv[0])
sys.exit(-1)
in1_name = argv[1]
in1_suffix = argv[2]
in2_name = argv[3]
in2_suffix = argv[4]
out_name = argv[5]
add_keys = int(argv[6])
in1 = pd.read_csv(in1_name)
in2 = pd.read_csv(in2_name)
if add_keys:
df_add_keys(in1)
df_add_keys(in2)
merged = df_merge(in1, in1_suffix, in2, in2_suffix)
merged.to_csv(out_name)
| 24.907407
| 104
| 0.594052
| 224
| 1,345
| 3.254464
| 0.294643
| 0.061728
| 0.111111
| 0.144033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043611
| 0.266915
| 1,345
| 53
| 105
| 25.377358
| 0.69574
| 0
| 0
| 0
| 0
| 0.021739
| 0.188848
| 0.020818
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.065217
| 0
| 0.152174
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a4099a116dd4efb8f2b5619fb34ffe71a578a58
| 1,845
|
py
|
Python
|
scripts/check-silknow-urls.py
|
silknow/crawler
|
d2632cea9b98ab64a8bca56bc70b34edd3c2de31
|
[
"Apache-2.0"
] | 1
|
2019-04-21T07:09:52.000Z
|
2019-04-21T07:09:52.000Z
|
scripts/check-silknow-urls.py
|
silknow/crawler
|
d2632cea9b98ab64a8bca56bc70b34edd3c2de31
|
[
"Apache-2.0"
] | 35
|
2019-01-21T23:53:52.000Z
|
2022-02-12T04:28:17.000Z
|
scripts/check-silknow-urls.py
|
silknow/crawler
|
d2632cea9b98ab64a8bca56bc70b34edd3c2de31
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import csv
import os
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help="Input path of the missing urls CSV file")
parser.add_argument('-o', '--output', help="Output directory where the new CSV files will be stored")
parser.add_argument('-q', '--quiet', action='store_true', help="Do not print the list of missing files")
args = parser.parse_args()
with open(args.input) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
missing_urls_output = os.path.join(args.output, 'silknow-missing-urls.csv')
missing_files_output = os.path.join(args.output, 'silknow-missing-files.csv')
with open(missing_urls_output, mode='w') as missing_url:
missing_url_writer = csv.writer(missing_url, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
with open(missing_files_output, mode='w') as missing_file:
missing_file_writer = csv.writer(missing_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
header = next(csv_reader)
missing_file_writer.writerow(header);
filepath_cache = []
for row in csv_reader:
museum = row[3].split('/')[5]
filename = os.path.basename(row[3])
filepath = os.path.normpath(os.path.join(museum, filename))
filepath_cache.append(filepath)
if not os.path.exists(filepath):
missing_file_writer.writerow(row)
if not args.quiet:
print(filepath + ' does not exist in files')
for root, dirs, files in os.walk('./'):
for file in files:
if file.endswith('.jpg'):
filepath = os.path.normpath(os.path.join(root, file))
if filepath not in filepath_cache:
missing_url_writer.writerow([filepath])
if not args.quiet:
print(filepath + ' does not exist in query result')
| 38.4375
| 105
| 0.666667
| 253
| 1,845
| 4.715415
| 0.312253
| 0.040235
| 0.033529
| 0.026823
| 0.283319
| 0.24979
| 0.189438
| 0.135792
| 0.068734
| 0.068734
| 0
| 0.002041
| 0.203252
| 1,845
| 47
| 106
| 39.255319
| 0.809524
| 0
| 0
| 0.055556
| 0
| 0
| 0.156098
| 0.026558
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a40c18aa2fcf755b162532d605ac1593ac74650
| 2,302
|
py
|
Python
|
Trabajo 3/auxFunc.py
|
francaracuel/UGR-GII-CCIA-4-VC-Vision_por_computador-17-18-Practicas
|
cb801eb5dfc4a8ea0300eae66a3b9bb2943fe8ab
|
[
"Apache-2.0"
] | 1
|
2019-01-28T09:43:41.000Z
|
2019-01-28T09:43:41.000Z
|
Trabajo 3/auxFunc.py
|
francaracuel/UGR-GII-CCIA-4-VC-Vision_por_computador-17-18-Practicas
|
cb801eb5dfc4a8ea0300eae66a3b9bb2943fe8ab
|
[
"Apache-2.0"
] | null | null | null |
Trabajo 3/auxFunc.py
|
francaracuel/UGR-GII-CCIA-4-VC-Vision_por_computador-17-18-Practicas
|
cb801eb5dfc4a8ea0300eae66a3b9bb2943fe8ab
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 11:20:06 2017
@author: NPB
"""
import cv2
import pickle
def loadDictionary(filename):
with open(filename,"rb") as fd:
feat=pickle.load(fd)
return feat["accuracy"],feat["labels"], feat["dictionary"]
def loadAux(filename, flagPatches):
if flagPatches:
with open(filename,"rb") as fd:
feat=pickle.load(fd)
return feat["descriptors"],feat["patches"]
else:
with open(filename,"rb") as fd:
feat=pickle.load(fd)
return feat["descriptors"]
def click_and_draw(event,x,y,flags,param):
global refPt, imagen,FlagEND
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being
# performed
if event == cv2.EVENT_LBUTTONDBLCLK:
FlagEND= False
cv2.destroyWindow("image")
elif event == cv2.EVENT_LBUTTONDOWN:
refPt.append((x, y))
#cropping = True
print("rfePt[0]",refPt[0])
elif (event == cv2.EVENT_MOUSEMOVE) & (len(refPt) > 0) & FlagEND:
# check to see if the mouse move
clone=imagen.copy()
nPt=(x,y)
print("npt",nPt)
sz=len(refPt)
cv2.line(clone,refPt[sz-1],nPt,(0, 255, 0), 2)
cv2.imshow("image", clone)
cv2.waitKey(0)
elif event == cv2.EVENT_RBUTTONDOWN:
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
refPt.append((x, y))
#cropping = False
sz=len(refPt)
print("refPt[sz]",sz,refPt[sz-1])
cv2.line(imagen,refPt[sz-2],refPt[sz-1],(0, 255, 0), 2)
cv2.imshow("image", imagen)
cv2.waitKey(0)
def extractRegion(image):
global refPt, imagen,FlagEND
imagen=image.copy()
# load the image and setup the mouse callback function
refPt=[]
FlagEND=True
#image = cv2.imread(filename)
cv2.namedWindow("image")
# keep looping until the 'q' key is pressed
cv2.setMouseCallback("image", click_and_draw)
#
while FlagEND:
# display the image and wait for a keypress
cv2.imshow("image", image)
cv2.waitKey(0)
#
print('FlagEND', FlagEND)
refPt.pop()
refPt.append(refPt[0])
cv2.destroyWindow("image")
return refPt
| 26.45977
| 69
| 0.613814
| 307
| 2,302
| 4.576547
| 0.374593
| 0.008541
| 0.037011
| 0.038434
| 0.241993
| 0.186477
| 0.146619
| 0.118149
| 0.118149
| 0.118149
| 0
| 0.031414
| 0.253258
| 2,302
| 86
| 70
| 26.767442
| 0.785922
| 0.222415
| 0
| 0.320755
| 0
| 0
| 0.068439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.037736
| 0
| 0.188679
| 0.075472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|