hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
018e37a3271bbe0ac811dfe2f2b0248dd13424ad
| 5,123
|
py
|
Python
|
tests/ut/python/dataset_deprecated/test_map.py
|
httpsgithu/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | 1
|
2022-02-23T09:13:43.000Z
|
2022-02-23T09:13:43.000Z
|
tests/ut/python/dataset_deprecated/test_map.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset_deprecated/test_map.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pytest
import mindspore.dataset as ds
from mindspore.dataset.transforms import c_transforms
from mindspore.dataset.transforms import py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
DATA_DIR = "../data/dataset/testPK/data"
def test_map_c_transform_exception():
"""
Feature: test c error op def
Description: op defined like c_vision.HWC2CHW
Expectation: success
"""
data_set = ds.ImageFolderDataset(DATA_DIR, num_parallel_workers=1, shuffle=True)
train_image_size = 224
mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
# define map operations
random_crop_decode_resize_op = c_vision.RandomCropDecodeResize(train_image_size,
scale=(0.08, 1.0),
ratio=(0.75, 1.333))
random_horizontal_flip_op = c_vision.RandomHorizontalFlip(prob=0.5)
normalize_op = c_vision.Normalize(mean=mean, std=std)
hwc2chw_op = c_vision.HWC2CHW # exception
data_set = data_set.map(operations=random_crop_decode_resize_op, input_columns="image", num_parallel_workers=1)
data_set = data_set.map(operations=random_horizontal_flip_op, input_columns="image", num_parallel_workers=1)
data_set = data_set.map(operations=normalize_op, input_columns="image", num_parallel_workers=1)
with pytest.raises(ValueError) as info:
data_set = data_set.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=1)
assert "Parameter operations's element of method map should be a " in str(info.value)
# compose exception
with pytest.raises(ValueError) as info:
c_transforms.Compose([
c_vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
c_vision.RandomHorizontalFlip,
c_vision.Normalize(mean=mean, std=std),
c_vision.HWC2CHW()])
assert " should be a " in str(info.value)
# randomapply exception
with pytest.raises(ValueError) as info:
c_transforms.RandomApply([
c_vision.RandomCropDecodeResize,
c_vision.RandomHorizontalFlip(prob=0.5),
c_vision.Normalize(mean=mean, std=std),
c_vision.HWC2CHW()])
assert " should be a " in str(info.value)
# randomchoice exception
with pytest.raises(ValueError) as info:
c_transforms.RandomChoice([
c_vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
c_vision.RandomHorizontalFlip(prob=0.5),
c_vision.Normalize,
c_vision.HWC2CHW()])
assert " should be a " in str(info.value)
def test_map_py_transform_exception():
"""
Feature: test python error op def
Description: op defined like py_vision.RandomHorizontalFlip
Expectation: success
"""
data_set = ds.ImageFolderDataset(DATA_DIR, num_parallel_workers=1, shuffle=True)
# define map operations
decode_op = py_vision.Decode()
random_horizontal_flip_op = py_vision.RandomHorizontalFlip # exception
to_tensor_op = py_vision.ToTensor()
trans = [decode_op, random_horizontal_flip_op, to_tensor_op]
with pytest.raises(ValueError) as info:
data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=1)
assert "Parameter operations's element of method map should be a " in str(info.value)
# compose exception
with pytest.raises(ValueError) as info:
py_transforms.Compose([
py_vision.Decode,
py_vision.RandomHorizontalFlip(),
py_vision.ToTensor()])
assert " should be a " in str(info.value)
# randomapply exception
with pytest.raises(ValueError) as info:
py_transforms.RandomApply([
py_vision.Decode(),
py_vision.RandomHorizontalFlip,
py_vision.ToTensor()])
assert " should be a " in str(info.value)
# randomchoice exception
with pytest.raises(ValueError) as info:
py_transforms.RandomChoice([
py_vision.Decode(),
py_vision.RandomHorizontalFlip(),
py_vision.ToTensor])
assert " should be a " in str(info.value)
if __name__ == '__main__':
test_map_c_transform_exception()
test_map_py_transform_exception()
| 40.65873
| 115
| 0.68007
| 659
| 5,123
| 5.07739
| 0.233687
| 0.037657
| 0.038255
| 0.062164
| 0.653915
| 0.600717
| 0.590855
| 0.534071
| 0.522116
| 0.485057
| 0
| 0.027135
| 0.215889
| 5,123
| 125
| 116
| 40.984
| 0.805825
| 0.203396
| 0
| 0.44
| 0
| 0
| 0.062827
| 0.006731
| 0
| 0
| 0
| 0
| 0.106667
| 1
| 0.026667
| false
| 0
| 0.08
| 0
| 0.106667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
018ecde16201a4f4c059f4251f120ee69a80438a
| 7,685
|
py
|
Python
|
overlays/holo-nixpkgs/hpos-admin/hpos-admin.py
|
samrose/holo-nixpkgs
|
057c92fcef9934d1ba2310e77579b78e61271a59
|
[
"MIT"
] | null | null | null |
overlays/holo-nixpkgs/hpos-admin/hpos-admin.py
|
samrose/holo-nixpkgs
|
057c92fcef9934d1ba2310e77579b78e61271a59
|
[
"MIT"
] | null | null | null |
overlays/holo-nixpkgs/hpos-admin/hpos-admin.py
|
samrose/holo-nixpkgs
|
057c92fcef9934d1ba2310e77579b78e61271a59
|
[
"MIT"
] | null | null | null |
from base64 import b64encode
from flask import Flask, jsonify, request
from functools import reduce
from gevent import subprocess, pywsgi, queue, socket, spawn, lock
from gevent.subprocess import CalledProcessError
from hashlib import sha512
from pathlib import Path
from tempfile import mkstemp
import json
import os
import subprocess
import toml
import requests
import asyncio
import websockets
PROFILES_TOML_PATH = '/etc/nixos/hpos-admin-features.toml'
app = Flask(__name__)
rebuild_queue = queue.PriorityQueue()
state_lock = lock.Semaphore()
def rebuild_worker():
while True:
(_, cmd) = rebuild_queue.get()
rebuild_queue.queue.clear()
subprocess.run(cmd)
def rebuild(priority, args):
rebuild_queue.put((priority, ['nixos-rebuild', 'switch'] + args))
def get_state_path():
hpos_config_file_symlink = os.getenv('HPOS_CONFIG_PATH')
hpos_config_file = os.path.realpath(hpos_config_file_symlink)
return hpos_config_file
def get_state_data():
with open(get_state_path(), 'r') as f:
return json.loads(f.read())
def cas_hash(data):
dump = json.dumps(data, separators=(',', ':'), sort_keys=True)
return b64encode(sha512(dump.encode()).digest()).decode()
@app.route('/config', methods=['GET'])
def get_settings():
return jsonify(get_state_data()['v1']['settings'])
def replace_file_contents(path, data):
fd, tmp_path = mkstemp(dir=os.path.dirname(path))
with open(fd, 'w') as f:
f.write(data)
os.rename(tmp_path, path)
@app.route('/config', methods=['PUT'])
def put_settings():
with state_lock:
state = get_state_data()
expected_cas = cas_hash(state['v1']['settings'])
received_cas = request.headers.get('x-hpos-admin-cas')
if received_cas != expected_cas:
app.logger.warning('CAS mismatch: {} != {}'.format(received_cas, expected_cas))
return '', 409
state['v1']['settings'] = request.get_json(force=True)
state_json = json.dumps(state, indent=2)
try:
subprocess.run(['hpos-config-is-valid'], check=True, input=state_json, text=True)
except CalledProcessError:
return '', 400
replace_file_contents(get_state_path(), state_json)
# FIXME: see next FIXME
# rebuild(priority=5, args=[])
return '', 200
# Toggling HPOS features
def read_profiles():
if Path(PROFILES_TOML_PATH).is_file():
return toml.load(PROFILES_TOML_PATH)
else:
return {}
def write_profiles(profiles):
with open(PROFILES_TOML_PATH, 'w') as f:
f.write(toml.dumps(profiles))
def set_feature_state(profile, feature, enable = True):
profiles = read_profiles()
profiles.update({
profile: {
'features': {
feature: {
'enable': enable
}
}
}
})
write_profiles(profiles)
return jsonify({
'enabled': enable
})
@app.route('/profiles', methods=['GET'])
def get_profiles():
return jsonify({
'profiles': read_profiles()
})
@app.route('/profiles/<profile>/features/<feature>', methods=['GET'])
def get_feature_state(profile, feature):
profiles = read_profiles()
keys = [profile, 'features', feature, 'enable']
enabled = reduce(lambda d, key: d.get(key) if d else None, keys, profiles) or False
return jsonify({
'enabled': enabled
})
@app.route('/profiles/<profile>/features/<feature>', methods=['PUT'])
def enable_feature(profile, feature):
return set_feature_state(profile, feature, True)
@app.route('/profiles/<profile>/features/<feature>', methods=['DELETE'])
def disable_feature(profile, feature):
return set_feature_state(profile, feature, False)
def hosted_happs():
conductor_config = toml.load('/var/lib/holochain-conductor/conductor-config.toml')
return [dna for dna in conductor_config['dnas'] if dna['holo-hosted']]
def hosted_instances():
conductor_config = toml.load('/var/lib/holochain-conductor/conductor-config.toml')
return [instance for instance in conductor_config['instances'] if instance['holo-hosted']]
async def hc_call(method, params):
uri = "ws://localhost:42222"
m = { 'jsonrpc': '2.0', 'id': '0', 'method': method, 'params': params }
data = json.dumps(m, indent=2)
async with websockets.connect(uri) as websocket:
await websocket.send(bytes(data,encoding="utf-8"))
response = await websocket.recv()
return json.loads(response)
TRAFFIC_NULL_STATE = {'start_date': None, 'total_zome_calls':0, 'value': []}
def get_traffic_service_logger_call(instance_id):
response = asyncio.get_event_loop().run_until_complete(hc_call('call', { "instance_id": instance_id ,"zome": "service", "function": "get_traffic", "args": {"filter": "DAY"} }))
if 'result' in response:
return json.loads(response['result'])['Ok']
else:
return TRAFFIC_NULL_STATE
@app.route('/hosted_happs', methods=['GET'])
def get_hosted_happs():
hosted_happs_list = hosted_happs()
hosted_instances_list = hosted_instances()
if len(hosted_happs_list) > 0:
for hosted_happ in hosted_happs_list:
if len(hosted_instances_list) > 0:
num_instances = sum(hosted_happ['id'] in hosted_instance['id'] for hosted_instance in hosted_instances_list)
hosted_happ['stats'] = {"traffic": get_traffic_service_logger_call(hosted_happ['id']+"::servicelogger")}
else:
num_instances = 0
hosted_happ['stats'] = {"traffic": TRAFFIC_NULL_STATE}
hosted_happ['number_instances'] = num_instances
return jsonify({
'hosted_happs': hosted_happs_list
})
def hydra_channel():
with open('/root/.nix-channels') as f:
channel_url = f.read()
return channel_url.split('/')[6]
def hydra_revision():
channel = hydra_channel()
eval_url = 'https://hydra.holo.host/jobset/holo-nixpkgs/' + channel + '/latest-eval'
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
eval_summary = requests.get(eval_url, headers=headers).json()
return eval_summary['jobsetevalinputs']['holo-nixpkgs']['revision']
def local_revision():
try:
with open('/root/.nix-revision') as f:
local_revision = f.read()
except:
local_revision = 'unversioned'
return local_revision
def zerotier_info():
proc = subprocess.run(['zerotier-cli', '-j', 'info'],
capture_output=True, check=True)
return json.loads(proc.stdout)
@app.route('/status', methods=['GET'])
def status():
return jsonify({
'holo_nixpkgs':{
'channel': {
'name': hydra_channel(),
'rev': hydra_revision()
},
'current_system': {
'rev': local_revision()
}
},
'zerotier': zerotier_info()
})
@app.route('/upgrade', methods=['POST'])
def upgrade():
# FIXME: calling nixos-rebuild fails
# rebuild(priority=1, args=['--upgrade'])
return '', 503 # service unavailable
@app.route('/reset', methods=['POST'])
def reset():
try:
subprocess.run(['hpos-reset'], check=True)
except CalledProcessError:
return '', 500
def unix_socket(path):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if os.path.exists(path):
os.remove(path)
sock.bind(path)
sock.listen()
return sock
if __name__ == '__main__':
spawn(rebuild_worker)
pywsgi.WSGIServer(unix_socket('/run/hpos-admin.sock'), app).serve_forever()
| 28.462963
| 180
| 0.645413
| 930
| 7,685
| 5.136559
| 0.269892
| 0.016747
| 0.013607
| 0.013398
| 0.114298
| 0.081851
| 0.081851
| 0.05359
| 0.05359
| 0.030144
| 0
| 0.007935
| 0.212882
| 7,685
| 269
| 181
| 28.568773
| 0.781782
| 0.021861
| 0
| 0.116751
| 0
| 0
| 0.143675
| 0.033156
| 0
| 0
| 0
| 0.003717
| 0
| 1
| 0.137056
| false
| 0
| 0.076142
| 0.030457
| 0.35533
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0196ed4e4760ab9bf312a9416801f8b71d0a5124
| 2,471
|
py
|
Python
|
downloader/download.py
|
inverthermit/sec_edgar_analysis
|
ffdf43b30ab53b0a024790757c8ef0c989acf67a
|
[
"MIT"
] | 1
|
2018-02-03T00:28:53.000Z
|
2018-02-03T00:28:53.000Z
|
downloader/download.py
|
inverthermit/sec_edgar_analysis
|
ffdf43b30ab53b0a024790757c8ef0c989acf67a
|
[
"MIT"
] | null | null | null |
downloader/download.py
|
inverthermit/sec_edgar_analysis
|
ffdf43b30ab53b0a024790757c8ef0c989acf67a
|
[
"MIT"
] | null | null | null |
import urllib
import time
from multiprocessing.dummy import Pool as ThreadPool
excelFolder = 'F://SecExcelDownload2/'
compListUrl = 'C://Users/l1111/Desktop/AlphaCapture/downloadFileUrl.txt'
successFile = excelFolder+'/success.txt'
failFile = excelFolder+'/fail.txt'
logFile = excelFolder+'/log.txt'
def getAlreadyDownload():
lineList = []
count = 0
with open(successFile) as f:
for line in f:
line = line.strip()
lineList.append(line)
return lineList
downloadedList = getAlreadyDownload()
def downloadFile(line):
compName = line.split(',')[0]
cik = line.split(',')[1]
doc = line.split(',')[2]
url = line.split(',')[3]
if url in downloadedList:
return 0
fileURLOpener = urllib.URLopener()
try:
fileURLOpener.retrieve(url,excelFolder+compName+'-'+cik+'-'+doc+'.xlsx' )
with open(successFile, "a") as myfile:
myfile.write(url+'\n')
except:
print('Error: not a xlsx file. Downloading xls file')
try:
fileURLOpener.retrieve(url.replace('.xlsx','.xls'), excelFolder+compName+'-'+cik+'-'+doc+'.xls')
with open(successFile, "a") as myfile:
myfile.write(url+'\n')
except:
print('Error: download failed')
with open(failFile, "a") as myfile:
myfile.write(url+'\n')
def slowSingleThread():
lineList = []
count = 0
with open(compListUrl) as f:
for line in f:
line = line.strip()
lineList.append(line)
# downloadFile(line)
# break
# print(len(lineList))
# total = len(lineList)
# with open(compListUrl) as f:
# for line in f:
# line = line.strip()
# with open(logFile, "a") as myfile:
# myfile.write(str(count)+'/'+str(total)+':'+line+'\n')
# count+=1
# downloadFile(line)
for line in lineList:
downloadFile(line)
def fastMultiThread():
lineList = []
count = 0
with open(compListUrl) as f:
for line in f:
line = line.strip()
lineList.append(line)
# make the Pool of workers
pool = ThreadPool(10)
# open the urls in their own threads
# and return the results
results = pool.map(downloadFile, lineList)
# close the pool and wait for the work to finish
pool.close()
pool.join()
fastMultiThread()
# slowSingleThread()
| 28.402299
| 108
| 0.583974
| 277
| 2,471
| 5.209386
| 0.33574
| 0.044352
| 0.031185
| 0.02772
| 0.295911
| 0.266805
| 0.266805
| 0.250173
| 0.250173
| 0.250173
| 0
| 0.00905
| 0.2845
| 2,471
| 86
| 109
| 28.732558
| 0.807127
| 0.180089
| 0
| 0.433333
| 0
| 0
| 0.103534
| 0.038825
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.05
| 0
| 0.15
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0197d01c354f66f49415a9ef3d542eb61ea7a772
| 20,495
|
py
|
Python
|
HOST/py/tc_TcpEcho.py
|
cloudFPGA/cFp_HelloKale
|
949f8c3005d2824b8bc65345b77ea97bd0b6e692
|
[
"Apache-2.0"
] | null | null | null |
HOST/py/tc_TcpEcho.py
|
cloudFPGA/cFp_HelloKale
|
949f8c3005d2824b8bc65345b77ea97bd0b6e692
|
[
"Apache-2.0"
] | 6
|
2022-01-22T10:04:18.000Z
|
2022-02-01T21:28:19.000Z
|
HOST/py/tc_TcpEcho.py
|
cloudFPGA/cFp_HelloKale
|
949f8c3005d2824b8bc65345b77ea97bd0b6e692
|
[
"Apache-2.0"
] | null | null | null |
#/*
# * Copyright 2016 -- 2021 IBM Corporation
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *
# *****************************************************************************
# * @file : tc_TcpEcho.py
# * @brief : A multi-threaded script to send and receive traffic on the
# * TCP connection of an FPGA module.
# *
# * System: : cloudFPGA
# * Component : cFp_BringUp/ROLE
# * Language : Python 3
# *
# *****************************************************************************
# ### REQUIRED PYTHON PACKAGES ################################################
import argparse
import datetime
import errno
import filecmp
import socket
import threading
import time
# ### REQUIRED TESTCASE MODULES ###############################################
from tc_utils import *
# ### GLOBAL VARIABLES ########################################################
gEchoRxPath = './echoRx.dat'
gEchoTxPath = './echoTx.dat'
def tcp_tx(sock, message, count, verbose=False):
"""TCP Tx Thread.
:param sock, the socket to send to.
:param message, the random string to sent.
:param count, the number of segments to send.
:param verbose, enables verbosity.
:return None"""
if verbose:
print("The following message of %d bytes will be sent out %d times:\n Message=%s\n" %
(len(message), count, message.decode('ascii')))
# Create a Tx Reference File
echoTxFile = open(gEchoTxPath, 'w')
if count <= 1000:
loop = 0
while loop < count:
echoTxFile.write(message.decode('ascii'))
loop += 1
# Start Data Transmission
loop = 0
startTime = datetime.datetime.now()
while loop < count:
try:
sock.sendall(message)
finally:
pass
loop += 1
endTime = datetime.datetime.now()
elapseTime = endTime - startTime;
bandwidth = len(message) * 8 * count * 1.0 / (elapseTime.total_seconds() * 1024 * 1024)
print("##################################################")
print("#### TCP TX DONE with bandwidth = %6.1f Mb/s ####" % bandwidth)
print("##################################################")
print()
# Close the Tx Reference File
echoTxFile.close()
# Push a few more bytes to force the FPGA to flush its buffers
try:
sock.sendall(message)
finally:
pass
def tcp_rx(sock, message, count, verbose):
"""TCP Rx Thread.
:param sock, the socket to receive from.
:param message, the expected string message to be received.
:param count, the number of segment to receive.
:param verbose, enables verbosity.
:return None"""
# Create an Rx Test File
echoRxFile = open(gEchoRxPath, 'w')
# Start Data Reception
loop = 0
rxBytes = 0
expectedBytes = count*len(message)
startTime = datetime.datetime.now()
while rxBytes < expectedBytes:
try:
data = sock.recv(expectedBytes - rxBytes)
rxBytes += len(data)
if count <= 1000:
echoRxFile.write(data.decode('ascii'))
except socket.error as exc:
print("[EXCEPTION] Socket error while receiving :: %s" % exc)
else:
if verbose:
print("Loop=%d | RxBytes=%d" % (loop, rxBytes))
loop += 1
endTime = datetime.datetime.now()
elapseTime = endTime - startTime
bandwidth = len(message) * 8 * count * 1.0 / (elapseTime.total_seconds() * 1024 * 1024)
print("##################################################")
print("#### TCP RX DONE with bandwidth = %6.1f Mb/s ####" % bandwidth)
print("##################################################")
print()
# Close the Rx Test File
echoRxFile.close()
def waitUntilSocketPairCanBeReused(ipFpga, portFpga):
"""Check and wait until the a socket pair can be reused.
[INFO] When a client or a server initiates an active close, then the same destination socket
(i.e. the same IP address / TCP port number) cannot be re-used immediately because
of security issues. Therefore, a closed connection must linger in a 'TIME_WAIT' or
'FIN_WAIT' state for as long as 2xMSL (Maximum Segment Lifetime), which corresponds
to twice the time a TCP segment might exist in the internet system. The MSL is
arbitrarily defined to be 2 minutes long.
:param ipFpga: the IP address of FPGA.
:param portFpga: the TCP port of the FPGA.
:return: nothing
"""
wait = True
# NETSTAT example: rc = os.system("netstat | grep '10.12.200.163:8803' | grep TIME_WAIT")
cmdStr = "netstat | grep \'" + str(ipFpga) + ":" + str(portFpga) + "\' | grep \'TIME_WAIT\|FIN_WAIT\' "
while wait:
rc = os.system(cmdStr)
if rc == 0:
print("[INFO] Cannot reuse this socket as long as it is in the \'TIME_WAIT\' or \'FIN_WAIT\' state.")
print(" Let's sleep for 5 sec...")
time.sleep(5)
else:
wait = False
def tcp_txrx_loop(sock, message, count, verbose=False):
"""TCP Tx-Rx Single-Thread Loop.
:param sock The socket to send/receive to/from.
:param message The message string to sent.
:param count The number of segments send.
:param verbose Enables verbosity.
:return None"""
if verbose:
print("[INFO] The following message of %d bytes will be sent out %d times:\n Message=%s\n" %
(len(message), count, message.decode('ascii')))
nrErr = 0
txMssgCnt = 0
rxMssgCnt = 0
rxByteCnt = 0
txStream = ""
rxStream = ""
# Init the Tx reference stream
for i in range(count):
txStream = txStream + message.decode('ascii')
startTime = datetime.datetime.now()
while rxByteCnt < (count * len(message)):
if txMssgCnt < count:
# Send a new message
# ------------------------
try:
tcpSock.sendall(message)
txMssgCnt += 1
finally:
pass
# Receive a segment
# --------------------
try:
data = tcpSock.recv(len(message))
rxByteCnt += len(data)
rxMssgCnt += 1
if verbose:
print("%d:%s" % (rxMssgCnt, data.decode('ascii')))
except IOError as e:
# On non blocking connections - when there are no incoming data, error is going to be
# raised. Some operating systems will indicate that using AGAIN, and some using
# WOULDBLOCK error code. We are going to check for both - if one of them - that's
# expected, means no incoming data, continue as normal. If we got different error code,
# something happened
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
print('[ERROR] Socket reading error: {}'.format(str(e)))
exit(1)
# We just did not receive anything
continue
except socket.error as exc:
# Any other exception
print("[EXCEPTION] Socket error while receiving :: %s" % exc)
# exit(1)
finally:
pass
rxStream = rxStream + data.decode('ascii')
endTime = datetime.datetime.now()
if verbose:
print("\n")
# Compare Tx and Rx stream
if rxStream != txStream:
print(" KO | Received stream = %s" % data.decode('ascii'))
print(" | Expected stream = %s" % rxStream)
nrErr += 1
elif verbose:
print(" OK | Received %d bytes in %d messages." % (rxByteCnt, rxMssgCnt))
elapseTime = endTime - startTime;
bandwidth = len(message) * 8 * count * 1.0 / (elapseTime.total_seconds() * 1024 * 1024)
print("[INFO] Transferred a total of %d bytes." % rxByteCnt)
print("#####################################################")
print("#### TCP Tx/Rx DONE with bandwidth = %6.1f Mb/s ####" % bandwidth)
print("#####################################################")
print()
def tcp_txrx_ramp(sock, message, count, verbose=False):
"""TCP Tx-Rx Single-Thread Ramp.
:param sock The socket to send/receive to/from.
:param message The message string to sent.
:param count The number of segments to send.
:param verbose Enables verbosity.
:return None"""
if verbose:
print("[INFO] The following message of %d bytes will be sent out incrementally %d times:\n Message=%s\n" %
(len(message), count, message.decode('ascii')))
nrErr = 0
loop = 0
rxByteCnt = 0
startTime = datetime.datetime.now()
while loop < count:
i = 1
while i <= len(message):
subMsg = message[0:i]
# Send datagram
# -------------------
try:
tcpSock.sendall(subMsg)
finally:
pass
# Receive datagram
# -------------------
try:
data = tcpSock.recv(len(subMsg))
rxByteCnt += len(data)
if data == subMsg:
if verbose:
print("Loop=%d | RxBytes=%d" % (loop, len(data)))
else:
print("Loop=%d | RxBytes=%d" % (loop, len(data)))
print(" KO | Received Message=%s" % data.decode('ascii'))
print(" | Expecting Message=%s" % subMsg)
nrErr += 1
except IOError as e:
# On non blocking connections - when there are no incoming data, error is going to be raised
# Some operating systems will indicate that using AGAIN, and some using WOULDBLOCK error code
# We are going to check for both - if one of them - that's expected, means no incoming data,
# continue as normal. If we got different error code - something happened
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
print('[ERROR] Socket reading error: {}'.format(str(e)))
exit(1)
# We just did not receive anything
continue
except socket.error as exc:
# Any other exception
print("[EXCEPTION] Socket error while receiving :: %s" % exc)
# exit(1)
finally:
pass
i += 1
loop += 1
endTime = datetime.datetime.now()
elapseTime = endTime - startTime
bandwidth = (rxByteCnt * 8 * count * 1.0) / (elapseTime.total_seconds() * 1024 * 1024)
megaBytes = (rxByteCnt * 1.0) / (1024 * 1024 * 1.0)
print("[INFO] Transferred a total of %.1f MB." % megaBytes)
print("#####################################################")
print("#### TCP Tx/Rx DONE with bandwidth = %6.1f Mb/s ####" % bandwidth)
print("#####################################################")
print()
###############################################################################
# #
# MAIN #
# #
###############################################################################
rc = 0
# STEP-1: Parse the command line strings into Python objects
# -----------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='A script to send/receive TCP data to/from an FPGA module.')
parser.add_argument('-fi', '--fpga_ipv4', type=str, default='',
help='The destination IPv4 address of the FPGA (a.k.a image_ip / e.g. 10.12.200.163)')
parser.add_argument('-fp', '--fpga_port', type=int, default=8803,
help='The TCP destination port of the FPGA (default is 8803)')
parser.add_argument('-ii', '--inst_id', type=int, default=0,
help='The instance ID assigned by the cloudFPGA Resource Manager (range is 1-32)')
parser.add_argument('-lc', '--loop_count', type=int, default=10,
help='The number of times to run run the test (default is 10)')
parser.add_argument('-mi', '--mngr_ipv4', type=str, default='10.12.0.132',
help='The IP address of the cloudFPGA Resource Manager (default is 10.12.0.132)')
parser.add_argument('-mp', '--mngr_port', type=int, default=8080,
help='The TCP port of the cloudFPGA Resource Manager (default is 8080)')
parser.add_argument('-mt', '--multi_threading', action="store_true",
help='Enable multi_threading')
parser.add_argument('-sd', '--seed', type=int, default=-1,
help='The initial number to seed the pseudo-random number generator.')
parser.add_argument('-sz', '--size', type=int, default=-1,
help='The size of the segment to generate.')
parser.add_argument('-un', '--user_name', type=str, default='',
help='A user name as used to log in ZYC2 (.e.g \'fab\')')
parser.add_argument('-up', '--user_passwd', type=str, default='',
help='The ZYC2 password attached to the user name')
parser.add_argument('-v', '--verbose', action="store_true",
help='Enable verbosity')
args = parser.parse_args()
if args.user_name == '' or args.user_passwd == '':
print("\nWARNING: You must provide a ZYC2 user name and the corresponding password for this script to execute.\n")
exit(1)
# STEP-2a: Retrieve the IP address of the FPGA module (this will be the SERVER)
# ------------------------------------------------------------------------------
ipFpga = getFpgaIpv4(args)
# STEP-2b: Retrieve the instance Id assigned by the cloudFPGA Resource Manager
# -----------------------------------------------------------------------------
instId = getInstanceId(args)
# STEP-2c: Retrieve the IP address of the cF Resource Manager
# -----------------------------------------------------------------------------
ipResMngr = getResourceManagerIpv4(args)
# STEP-3a: Retrieve the TCP port of the FPGA server
# -----------------------------------------------------------------------------
portFpga = getFpgaPort(args)
# STEP-3b: Retrieve the TCP port of the cloudFPGA Resource Manager
# -----------------------------------------------------------------------------
portResMngr = getResourceManagerPort(args)
# STEP-?: Configure the application registers
# -----------------------------------------------------------------------------
# TODO print("\nNow: Configuring the application registers.")
# TODO tcpEchoPathThruMode = (0x0 << 4) # See DIAG_CTRL_2 register
# STEP-4: Trigger the FPGA role to restart (i.e. perform SW reset of the role)
# -----------------------------------------------------------------------------
restartApp(instId, ipResMngr, portResMngr, args.user_name, args.user_passwd)
# STEP-5: Ping the FPGA
# -----------------------------------------------------------------------------
pingFpga(ipFpga)
# STEP-6a: Set the FPGA socket association
# -----------------------------------------------------------------------------
tcpDP = 8803 # 8803=0x2263 and 0x6322=25378
fpgaAssociation = (str(ipFpga), tcpDP)
# STEP-6b: Set the HOST socket association (optional)
# Info: Linux selects a source port from an ephemeral port range, which by
# default is a set to range from 32768 to 61000. You can check it
# with the command:
# > cat /proc/sys/net/ipv4/ip_local_port_range
# If we want to force the source port ourselves, we must use the
# "bind before connect" trick.
# -----------------------------------------------------------------------------
if 0:
tcpSP = tcpDP + 49152 # 8803 + 0xC000
hostAssociation = (ipSaStr, tcpSP)
# STEP-7: Wait until the current socket can be reused
# -----------------------------------------------------------------------------
if 0:
waitUntilSocketPairCanBeReused(ipFpga, portFpga)
# STEP-8a: Create a TCP/IP socket for the TCP/IP connection
# -----------------------------------------------------------------------------
try:
tcpSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except Exception as exc:
print("[EXCEPTION] %s" % exc)
exit(1)
# Step-8b: Allow this socket to be re-used and disable the Nagle's algorithm
# ----------------------------------------------------------------------------
tcpSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tcpSock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
# STEP-8c: Bind before connect (optional).
# This trick enables us to ask the kernel to select a specific source IP and
# source PORT by calling bind() before calling connect().
# -----------------------------------------------------------------------------
if 0:
try:
tcpSock.bind(hostAssociation)
print('Binding the socket address of the HOST to {%s, %d}' % hostAssociation)
except Exception as exc:
print("[EXCEPTION] %s" % exc)
exit(1)
# STEP-9: Connect to the remote FPGA
# -----------------------------------------------------------------------------
try:
tcpSock.connect(fpgaAssociation)
except Exception as exc:
print("[EXCEPTION] %s" % exc)
exit(1)
else:
print('\nSuccessful connection with socket address of FPGA at {%s, %d} \n' % fpgaAssociation)
# STEP-10: Setup the test
# -------------------------------
print("[INFO] Testcase `%s` is run with:" % (os.path.basename(__file__)))
seed = args.seed
if seed == -1:
seed = random.randint(0, 100000)
random.seed(seed)
print("\t\t seed = %d" % seed)
size = args.size
if size == -1:
size = random.randint(1, ZYC2_MSS)
elif size > ZYC2_MSS:
print('\nERROR: ')
print("[ERROR] This test-case expects the transfer of segment which are less or equal to MSS (.i.e %d bytes).\n" % ZYC2_MSS)
exit(1)
print("\t\t size = %d" % size)
count = args.loop_count
print("\t\t loop = %d" % count)
if seed % 1:
message = str_static_gen(size)
else:
message = str_rand_gen(size)
verbose = args.verbose
print("[INFO] This testcase is sending traffic from HOST-to-FPGA and back from FPGA-to-HOST.")
if args.multi_threading:
print("[INFO] This run is executed in multi-threading mode.\n")
# STEP-11: Create Rx and Tx threads
# ----------------------------------
tx_thread = threading.Thread(target=tcp_tx, args=(tcpSock, message, count, args.verbose))
rx_thread = threading.Thread(target=tcp_rx, args=(tcpSock, message, count, args.verbose))
# STEP-12: Start the threads
# ---------------------------
tx_thread.start()
rx_thread.start()
# STEP-13: Wait for threads to terminate
# ----------------------------------------
tx_thread.join()
rx_thread.join()
# STEP-14: Compare Rx and Tx files
# ----------------------------------------
result = filecmp.cmp(gEchoTxPath, gEchoRxPath, shallow=False)
if not result:
print("\n[ERROR] Rx file \'%s\' differs from Tx file \'%s\'.\n" % (gEchoRxPath, gEchoTxPath))
rc = 1
else:
os.remove(gEchoRxPath)
os.remove(gEchoTxPath)
else:
print("[INFO] The run is executed in single-threading mode.\n")
# STEP-11: Set the socket in non-blocking mode
# ----------------------------------------------
tcpSock.setblocking(False)
tcpSock.settimeout(5)
if seed == 0:
tcp_txrx_ramp(tcpSock, message, count, args.verbose)
else:
tcp_txrx_loop(tcpSock, message, count, args.verbose)
# STEP-14: Close socket
# -----------------------
time.sleep(2)
tcpSock.close()
exit(rc)
| 40.99
| 128
| 0.528178
| 2,308
| 20,495
| 4.652946
| 0.225303
| 0.006053
| 0.018996
| 0.00419
| 0.375826
| 0.336065
| 0.288854
| 0.281404
| 0.253003
| 0.240153
| 0
| 0.01964
| 0.257185
| 20,495
| 499
| 129
| 41.072144
| 0.685759
| 0.362967
| 0
| 0.429066
| 0
| 0.027682
| 0.255321
| 0.034937
| 0
| 0
| 0
| 0.002004
| 0
| 1
| 0.017301
| false
| 0.038062
| 0.027682
| 0
| 0.044983
| 0.186851
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
019ba56645c86cd5f76a825624bb4c712e44806d
| 967
|
py
|
Python
|
ymir/command/mir/scm/__init__.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | 64
|
2021-11-15T03:48:00.000Z
|
2022-03-25T07:08:46.000Z
|
ymir/command/mir/scm/__init__.py
|
Zhang-SJ930104/ymir
|
dd6481be6f229ade4cf8fba64ef44a15357430c4
|
[
"Apache-2.0"
] | 35
|
2021-11-23T04:14:35.000Z
|
2022-03-26T09:03:43.000Z
|
ymir/command/mir/scm/__init__.py
|
Aryalfrat/ymir
|
d4617ed00ef67a77ab4e1944763f608bface4be6
|
[
"Apache-2.0"
] | 57
|
2021-11-11T10:15:40.000Z
|
2022-03-29T07:27:54.000Z
|
import os
from mir.scm.cmd import CmdScm
from mir.tools.code import MirCode
from mir.tools.errors import MirRuntimeError
def Scm(root_dir: str, scm_executable: str = None) -> CmdScm:
"""Returns SCM instance that corresponds to a repo at the specified
path.
Args:
root_dir (str): path to a root directory of the repo.
scm_excutable(str): "git".
Returns:
mir.scm.cmd.BaseScm: SCM instance.
"""
if scm_executable not in ["git"]:
raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS,
error_message=f"args error: expected git, not {scm_executable}")
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.isdir(root_dir):
raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS,
error_message=f"can not create dir: {root_dir}")
return CmdScm(root_dir, scm_executable)
| 35.814815
| 94
| 0.646329
| 132
| 967
| 4.568182
| 0.386364
| 0.08126
| 0.029851
| 0.096186
| 0.215589
| 0.215589
| 0.215589
| 0.215589
| 0.215589
| 0.215589
| 0
| 0
| 0.267839
| 967
| 26
| 95
| 37.192308
| 0.851695
| 0.220269
| 0
| 0.142857
| 0
| 0
| 0.114327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d67721cacf03f0b19c2edfa7d94b286095c3b16
| 724
|
py
|
Python
|
readability_transformers/features/lf/utils.py
|
OneTheta/readability-transformers
|
3c122c98a90c67add8eafad16563b269d5e3124a
|
[
"Apache-2.0"
] | 1
|
2022-01-26T10:55:59.000Z
|
2022-01-26T10:55:59.000Z
|
readability_transformers/features/lf/utils.py
|
OneTheta/readability-transformers
|
3c122c98a90c67add8eafad16563b269d5e3124a
|
[
"Apache-2.0"
] | null | null | null |
readability_transformers/features/lf/utils.py
|
OneTheta/readability-transformers
|
3c122c98a90c67add8eafad16563b269d5e3124a
|
[
"Apache-2.0"
] | 2
|
2021-10-14T22:53:57.000Z
|
2022-01-26T10:53:32.000Z
|
"""
Software: LingFeat - Comprehensive Linguistic Features for Readability Assessment
Page: utils.py
License: CC-BY-SA 4.0
Original Author: Bruce W. Lee (이웅성) @brucewlee
Affiliation 1: LXPER AI, Seoul, South Korea
Affiliation 2: University of Pennsylvania, PA, USA
Contributing Author: -
Affiliation : -
"""
import re
import math
def division(x, y):
try:
result = x/y
except:
result = 0
return result
def nan_check(result):
for key in result:
if math.isnan(float(result[key])):
result[key] = 0
return result
def count_syllables(word:str):
return len(
re.findall('(?!e$)[aeiouy]+', word, re.I) +
re.findall('^[^aeiouy]*e$', word, re.I)
)
| 22.625
| 81
| 0.638122
| 98
| 724
| 4.693878
| 0.663265
| 0.008696
| 0.056522
| 0.069565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01087
| 0.237569
| 724
| 32
| 82
| 22.625
| 0.822464
| 0.414365
| 0
| 0.111111
| 0
| 0
| 0.067146
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.111111
| 0.055556
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d69e794272f0966fc5025bf7ae39b7bd8cdeaea
| 1,173
|
py
|
Python
|
src/ex05/bwfilter.py
|
satvik007/Scanner_OP
|
c146f67e3851cd537d62989842abfee7d34de2c0
|
[
"MIT"
] | null | null | null |
src/ex05/bwfilter.py
|
satvik007/Scanner_OP
|
c146f67e3851cd537d62989842abfee7d34de2c0
|
[
"MIT"
] | null | null | null |
src/ex05/bwfilter.py
|
satvik007/Scanner_OP
|
c146f67e3851cd537d62989842abfee7d34de2c0
|
[
"MIT"
] | 1
|
2021-05-10T10:14:27.000Z
|
2021-05-10T10:14:27.000Z
|
# Usage:
# python bwfilter.py --input=./data/test1.jpg
import cv2
import numpy as np
import argparse
def parse_args():
parser = argparse.ArgumentParser(add_help=True, description='testing B&W filter')
required_named = parser.add_argument_group('required named arguments')
required_named.add_argument('-i', '--input', type=str, help='path of the input image', required=True)
return parser.parse_args()
def show_img(img):
cv2.namedWindow("output", cv2.WINDOW_NORMAL)
cv2.resizeWindow('output', 900, 900)
cv2.imshow("output", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def bwfilter(bwimg):
# blur the image
bwimg = cv2.GaussianBlur(bwimg,(7,7),7)
cv2.imwrite('blur.png', bwimg)
bwimg = cv2.bilateralFilter(bwimg,9,75,75)
cv2.imwrite('bilat.png', bwimg)
# adaptive threshholding
bwimg = cv2.adaptiveThreshold(bwimg,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
cv2.imwrite('adaptth.png', bwimg)
return bwimg
if __name__=='__main__':
args = parse_args()
img = cv2.imread(args.input, 0)
bwimg = bwfilter(img)
show_img(bwimg)
cv2.imwrite('bwimg.png', bwimg)
| 26.066667
| 105
| 0.6948
| 160
| 1,173
| 4.94375
| 0.48125
| 0.040455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041026
| 0.168798
| 1,173
| 45
| 106
| 26.066667
| 0.770256
| 0.076726
| 0
| 0
| 0
| 0
| 0.127087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.107143
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d6f7df41b42997a8642b881e20683971aa08d5d
| 1,065
|
py
|
Python
|
resotolib/test/test_graph_extensions.py
|
someengineering/resoto
|
ee17313f5376e9797ed305e7fdb62d40139a6608
|
[
"Apache-2.0"
] | 126
|
2022-01-13T18:22:03.000Z
|
2022-03-31T11:03:14.000Z
|
resotolib/test/test_graph_extensions.py
|
someengineering/resoto
|
ee17313f5376e9797ed305e7fdb62d40139a6608
|
[
"Apache-2.0"
] | 110
|
2022-01-13T22:27:55.000Z
|
2022-03-30T22:26:50.000Z
|
resotolib/test/test_graph_extensions.py
|
someengineering/resoto
|
ee17313f5376e9797ed305e7fdb62d40139a6608
|
[
"Apache-2.0"
] | 8
|
2022-01-15T10:28:16.000Z
|
2022-03-30T16:38:21.000Z
|
from networkx import DiGraph
from pytest import fixture
from resotolib.graph.graph_extensions import dependent_node_iterator
@fixture
def graph() -> DiGraph:
g = DiGraph()
for i in range(1, 14):
g.add_node(i)
g.add_edges_from([(1, 2), (1, 3), (2, 3)]) # island 1
g.add_edges_from([(4, 5), (4, 6), (6, 7)]) # island 2
g.add_edges_from(
[(8, 9), (9, 10), (9, 11), (8, 12), (12, 11), (12, 13)]
) # island 3
return g
def test_reversed_directed_traversal(graph: DiGraph):
result = list(dependent_node_iterator(graph))
assert len(result) == 3 # 3 steps to complete
assert result == [
[3, 5, 7, 10, 11, 13], # step 1
[2, 6, 9, 12], # step 2
[1, 4, 8], # step 3
]
def test_delete_nodes(graph: DiGraph):
to_delete = graph.copy()
for parallel in dependent_node_iterator(graph):
for node in parallel:
to_delete.remove_node(node)
assert len(to_delete.nodes) == 0
def test_empty_graph():
assert list(dependent_node_iterator(DiGraph())) == []
| 26.625
| 68
| 0.605634
| 161
| 1,065
| 3.838509
| 0.341615
| 0.084142
| 0.135922
| 0.063107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075094
| 0.249765
| 1,065
| 39
| 69
| 27.307692
| 0.698373
| 0.062911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.133333
| false
| 0
| 0.1
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d72243ededbe77bc217b7d87bcc872254da5cff
| 3,806
|
py
|
Python
|
cross_sums.py
|
minddrive/random_math
|
b5dececaf48ec80d8250d0f5fde0485e1b9e73c2
|
[
"MIT"
] | null | null | null |
cross_sums.py
|
minddrive/random_math
|
b5dececaf48ec80d8250d0f5fde0485e1b9e73c2
|
[
"MIT"
] | null | null | null |
cross_sums.py
|
minddrive/random_math
|
b5dececaf48ec80d8250d0f5fde0485e1b9e73c2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.4
import functools
@functools.total_ordering
class CrossSum:
def __init__(self, digits, total=0, addends=''):
self._digits = digits
self._base = len(digits)
self.total = total
self.addends = addends
def _convert_total(self):
num_total = self.total
total_digits = []
while num_total:
total_digits.append(self._digits[num_total % self._base])
num_total //= self._base
return ''.join(total_digits[::-1])
# This assumes that the new addend is larger than others in the sum
def add_addend(self, addend):
d = self._digits.index(addend)
return CrossSum(self._digits, self.total + d, self.addends + addend)
def has_total(self, total):
if isinstance(total, str):
total_str = total
total = 0
for digit in total_str:
total = total * self._base + self._digits.index(digit)
return self.total == total
def has_addends(self, addends):
if not isinstance(addends, set):
addends = set(addends)
return set(self.addends).issuperset(addends)
@property
def num_addends(self):
return len(self.addends)
@staticmethod
def _is_valid_operand(other):
return hasattr(other, 'total') and hasattr(other, 'addends')
def __eq__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return (self.total, self.addends) == (other.total, other.addends)
def __lt__(self, other):
if not self._is_valid_operand(other):
return NotImplemented
return (self.total, self.addends) < (other.total, other.addends)
def __repr__(self):
return ("<CrossSum(digits='%s', total='%s', addends='%s'>"
% (self._digits, self._convert_total(), self.addends))
def __str__(self):
total_str = self._convert_total()
return '%s = %s' % (total_str, ' + '.join(self.addends))
class CrossSums:
def __init__(self, digits='0123456789', cross_sums=None):
self._digits = digits
self._base = len(digits)
if cross_sums is None:
cross_sums = [CrossSum(digits)]
for digit in digits[1:]:
cross_sums += [cs.add_addend(digit) for cs in cross_sums]
cross_sums = [cs for cs in cross_sums if cs.num_addends > 1]
self._cross_sums = sorted(cross_sums)
def filter(self, total=None, num_addends=None, addends=None):
cross_sums = self._cross_sums
if total:
cross_sums = [cs for cs in cross_sums if cs.has_total(total)]
if num_addends:
cross_sums = [cs for cs in cross_sums
if cs.num_addends == num_addends]
if addends:
addends = set(addends)
cross_sums = [cs for cs in cross_sums if cs.has_addends(addends)]
return CrossSums(self._digits, cross_sums)
@property
def max_sum(self):
return self._cross_sums[-1].total
def __iter__(self):
return self._cross_sums.__iter__()
def __len__(self):
return len(self._cross_sums)
if __name__ == '__main__':
doz_sums = CrossSums('0123456789XE')
print('Sums totalling 15:')
for ds in doz_sums.filter(total='15'):
print(' ', ds)
print('\nSums containing addends 3-X inclusive:')
for ds in doz_sums.filter(addends='3456789X'):
print(' ', ds)
print('\nSums containing ten addends:')
for ds in doz_sums.filter(num_addends=10):
print(' ', ds)
print('\nSums totaling 1X with five addends including 2 and 3:')
for ds in doz_sums.filter(total='1X', num_addends=5, addends='23'):
print(' ', ds)
| 27.781022
| 77
| 0.607725
| 486
| 3,806
| 4.49177
| 0.197531
| 0.086578
| 0.030234
| 0.027485
| 0.310124
| 0.245534
| 0.22721
| 0.174072
| 0.174072
| 0.174072
| 0
| 0.017903
| 0.280872
| 3,806
| 136
| 78
| 27.985294
| 0.779686
| 0.023384
| 0
| 0.175824
| 0
| 0
| 0.071332
| 0.005922
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175824
| false
| 0
| 0.010989
| 0.065934
| 0.384615
| 0.087912
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d7355fa775ea3bb8dea2a5a98443123ea1e47bf
| 1,177
|
py
|
Python
|
functions.py
|
XomaDev/asteroid-bot
|
2e0743fc3c51027b54b8f2e9aedf632395fdbc31
|
[
"Apache-2.0"
] | null | null | null |
functions.py
|
XomaDev/asteroid-bot
|
2e0743fc3c51027b54b8f2e9aedf632395fdbc31
|
[
"Apache-2.0"
] | 2
|
2021-05-12T05:37:24.000Z
|
2021-06-02T05:39:21.000Z
|
functions.py
|
XomaDev/asteroid-bot
|
2e0743fc3c51027b54b8f2e9aedf632395fdbc31
|
[
"Apache-2.0"
] | 5
|
2021-05-12T11:39:09.000Z
|
2021-10-06T06:49:05.000Z
|
import base64
import re
def encode(text):
return base64.b64encode(text.encode("ASCII")).decode()
def enhanceText(text):
text = text.replace('.', '.', text.count('.')).replace(',', ', ', text.count(','))
text = " ".join(text.split()).replace(" . ", ". ")
return text
def stylish_text(text):
text = text.lower()
style_text = list('𝗮𝗯𝗰𝗱𝗲𝗳𝗴𝗵𝗶𝗷𝗸𝗹𝗺𝗻𝗼𝗽𝗾𝗿𝘀𝘁𝘂𝘃𝘄𝘅𝘆𝘇')
normal_text = list('abcdefghijklmnopqrstuvwxyz')
result = []
for char in list(text):
if char in normal_text:
result.append(style_text[normal_text.index(char)])
else:
result.append(char)
return ''.join(result)
def checkForURLs(string):
regex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
url = re.findall(regex, string)
return [x[0] for x in url]
def replace_special_slash(text):
characters = '!@#$%^&*()-+?_=,<>/".' + "''"
new_string = ""
for i in text:
if i in characters:
new_string += '\\'
new_string += i
return new_string
| 25.586957
| 197
| 0.514019
| 135
| 1,177
| 4.4
| 0.407407
| 0.06734
| 0.020202
| 0.020202
| 0.010101
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013963
| 0.209006
| 1,177
| 45
| 198
| 26.155556
| 0.621912
| 0
| 0
| 0
| 0
| 0.032258
| 0.2226
| 0.203908
| 0.032258
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.064516
| 0.032258
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d768c2ea44a94e626129ce1ad7462b5def358ad
| 1,697
|
py
|
Python
|
docsrc/source/_static/Practice Problem Solutions/Connecting Python and Excel/xlwings/capm_returns/capm_returns.py
|
whoopnip/fin-model-course
|
e6c5ae313bba601c4aca0f334818b61cc0393118
|
[
"MIT"
] | 5
|
2020-08-29T15:28:39.000Z
|
2021-12-01T16:53:25.000Z
|
docsrc/source/_static/Practice Problem Solutions/Connecting Python and Excel/xlwings/capm_returns/capm_returns.py
|
whoopnip/fin-model-course
|
e6c5ae313bba601c4aca0f334818b61cc0393118
|
[
"MIT"
] | 16
|
2020-02-26T16:03:47.000Z
|
2021-06-15T15:17:37.000Z
|
docsrc/source/_static/Practice Problem Solutions/Connecting Python and Excel/xlwings/capm_returns/capm_returns.py
|
whoopnip/fin-model-course
|
e6c5ae313bba601c4aca0f334818b61cc0393118
|
[
"MIT"
] | 3
|
2021-01-22T19:38:36.000Z
|
2021-09-28T08:14:00.000Z
|
import xlwings as xw
import random
import pandas as pd
@xw.func
@xw.arg('num_periods', numbers=int)
@xw.ret(expand='table')
def n_random_normal(mean, stdev, num_periods, horizontal=False):
random_values = []
for i in range(num_periods):
num = random.normalvariate(mean, stdev)
if not horizontal:
num = [num]
random_values.append(num)
return random_values
@xw.func
@xw.arg('nper', numbers=int)
@xw.ret(expand='horizontal')
def n_random_uniform(bot, top, nper):
nums = []
for i in range(nper):
num = random.uniform(bot, top)
nums.append(num)
return nums
def capm(risk_free, beta, market_ret, epsilon):
return risk_free + beta * (market_ret - risk_free) + epsilon
def capm_auto_epsilon(risk_free, beta, market_ret, epsilon_stdev):
epsilon = random.normalvariate(0, epsilon_stdev)
return capm(risk_free, beta, market_ret, epsilon)
@xw.func
@xw.arg('betas', expand='horizontal')
@xw.arg('epsilon_stdevs', expand='horizontal')
@xw.arg('market_rets', expand='vertical')
@xw.arg('num_assets', numbers=int)
@xw.ret(expand='table', index=False)
def multi_capm(risk_free, betas, market_rets, epsilon_stdevs, num_assets):
df = pd.DataFrame()
for i in range(num_assets):
beta = betas[i]
epsilon_stdev = epsilon_stdevs[i]
returns = [capm_auto_epsilon(risk_free, beta, market_ret, epsilon_stdev) for market_ret in market_rets]
df[f'Asset {i + 1}'] = returns
return df
@xw.func
@xw.arg('data', pd.DataFrame, expand='table', index=False)
@xw.ret(expand='table')
def correlations(data):
return data.corr()
| 27.819672
| 112
| 0.659988
| 239
| 1,697
| 4.514644
| 0.259414
| 0.032437
| 0.055607
| 0.083411
| 0.281742
| 0.196478
| 0.148285
| 0.088971
| 0.088971
| 0.088971
| 0
| 0.001493
| 0.210371
| 1,697
| 60
| 113
| 28.283333
| 0.803731
| 0
| 0
| 0.12766
| 0
| 0
| 0.079462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12766
| false
| 0
| 0.06383
| 0.042553
| 0.319149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d7f27d4b21a33d924da32d2c0841520bdc52d0d
| 4,635
|
py
|
Python
|
shapey/utils/customdataset.py
|
njw0709/ShapeY
|
f2272f799fe779c3e4b3d0d06e88ecde9e4b039c
|
[
"MIT"
] | 1
|
2022-03-22T17:19:57.000Z
|
2022-03-22T17:19:57.000Z
|
shapey/utils/customdataset.py
|
njw0709/ShapeY
|
f2272f799fe779c3e4b3d0d06e88ecde9e4b039c
|
[
"MIT"
] | null | null | null |
shapey/utils/customdataset.py
|
njw0709/ShapeY
|
f2272f799fe779c3e4b3d0d06e88ecde9e4b039c
|
[
"MIT"
] | null | null | null |
import torchvision.datasets as datasets
from torch.utils.data import Dataset
from itertools import combinations
import math
import psutil
class CombinationDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
self.comb = list(combinations(dataset, 2))
def __getitem__(self, index):
img1, img2 = self.comb[index]
return img1, img2
def __len__(self):
return len(self.comb)
def cut_dataset(self, index):
self.comb = self.comb[index:]
class ImageFolderWithPaths(datasets.ImageFolder):
"""Custom dataset that includes image file paths. Extends
torchvision.datasets.ImageFolder
"""
# override the __getitem__ method. this is the method that dataloader calls
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(ImageFolderWithPaths, self).__getitem__(index)
# the image file path
path = self.imgs[index][0]
# make a new tuple that includes original and the path
tuple_with_path = original_tuple + (path,)
return tuple_with_path
class FeatureTensorDatasetWithImgName(Dataset):
def __init__(self, feature_tensor, img_name_array):
self.feature_tensor = feature_tensor
self.imgnames = img_name_array
def __getitem__(self, index):
feat = self.feature_tensor[index, :]
imgname = self.imgnames[index]
return imgname, feat
def __len__(self):
return len(self.imgnames)
class PermutationIndexDataset(Dataset):
def __init__(self, datalen):
self.datalen = datalen
def __getitem__(self, index):
idx1 = int(math.floor(index / self.datalen))
idx2 = index % self.datalen
return idx1, idx2
class OriginalandPostProcessedPairsDataset(Dataset):
def __init__(self, original_feat_dataset, postprocessed_feat_dataset):
self.original = original_feat_dataset
self.postprocessed = postprocessed_feat_dataset
self.datalen = len(self.postprocessed)
def __getitem__(self, index):
idx1 = int(math.floor(index / self.datalen))
idx2 = index % self.datalen
s1 = self.original[idx1]
s2 = self.postprocessed[idx2]
return (idx1, s1), (idx2, s2)
def __len__(self):
return len(self.original) ** 2
class PermutationPairsDataset(Dataset):
def __init__(self, original_feat_dataset, postprocessed=None):
self.original = original_feat_dataset
self.datalen = len(self.original)
self.postprocessed = postprocessed
def __getitem__(self, index):
idx1 = int(math.floor(index / self.datalen))
idx2 = index % self.datalen
s1 = self.original[idx1]
if self.postprocessed is not None:
s2 = self.postprocessed[idx2]
else:
s2 = self.original[idx2]
return (idx1, s1), (idx2, s2)
def __len__(self):
return len(self.original) ** 2
class HDFDataset(Dataset):
def __init__(self, hdfstore, mem_usage=0.85):
self.hdfstore = hdfstore
self.datalen = len(self.hdfstore)
self.pull_data_to_cache(mem_usage)
if not self.all_in_cache:
print("initializing placeholder cache list")
self.cache_length = int(
psutil.virtual_memory().available * 0.85 / self.hdfstore[0].nbytes
)
self.in_cache_idx = [None] * self.cache_length
self.in_cache = [None] * self.cache_length
self.cache_counter = 0
def __getitem__(self, index):
if not self.all_in_cache:
if index in self.in_cache_idx:
return self.in_cache[self.in_cache_idx.index(index)]
else:
self.in_cache_idx[self.cache_counter] = index
data = self.hdfstore[index]
self.in_cache[self.cache_counter] = data
self.cache_counter += 1
self.cache_counter %= self.cache_length
return data
return self.hdfstore[index]
def __len__(self):
return self.datalen
def pull_data_to_cache(self, mem_usage):
single_row = self.hdfstore[0]
if (
psutil.virtual_memory().available * mem_usage
< single_row.nbytes * self.datalen
):
print("Not enough memory to pull data to cache")
self.all_in_cache = False
else:
print("Pulling data to cache")
self.hdfstore = self.hdfstore[:]
self.all_in_cache = True
print("Done pulling data to cache")
| 32.1875
| 82
| 0.63754
| 541
| 4,635
| 5.186691
| 0.203327
| 0.050962
| 0.034925
| 0.047398
| 0.275837
| 0.245902
| 0.175695
| 0.175695
| 0.140057
| 0.140057
| 0
| 0.013103
| 0.275512
| 4,635
| 143
| 83
| 32.412587
| 0.822513
| 0.059763
| 0
| 0.305556
| 0
| 0
| 0.02788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.185185
| false
| 0
| 0.046296
| 0.046296
| 0.425926
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d82b1b31f8d0b6b84847768e733ad87e9b8137d
| 19,761
|
py
|
Python
|
ehlit/writer/dump.py
|
lefta/reflex-prototype
|
9d9a34e222d9782815da529a8e2daa575c7c3eba
|
[
"MIT"
] | 1
|
2019-03-29T14:06:00.000Z
|
2019-03-29T14:06:00.000Z
|
ehlit/writer/dump.py
|
lefta/ehlit-prototype
|
9d9a34e222d9782815da529a8e2daa575c7c3eba
|
[
"MIT"
] | null | null | null |
ehlit/writer/dump.py
|
lefta/ehlit-prototype
|
9d9a34e222d9782815da529a8e2daa575c7c3eba
|
[
"MIT"
] | null | null | null |
# Copyright © 2017-2019 Cedric Legrand
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from typing import Callable, cast, List, Sequence, Union
from ehlit.parser.c_header import CDefine, CMacroFunction, CAnyType
from ehlit.parser.ast import (
Alias, AnonymousArray, Array, ArrayAccess, Assignment, AST, BoolValue, Cast, Char,
ClassMethod, ClassProperty, CompoundIdentifier, Condition, ControlStructure, DecimalNumber,
Declaration, Dtor, EhClass, EhEnum, EhUnion, EnumField, Expression, ForDoLoop, FunctionCall,
Function, FunctionType, HeapAlloc, HeapDealloc, Identifier, Include, Import, InitializationList,
Namespace, Node, NullValue, Number, Operator, PrefixOperatorValue, ReferenceToType,
ReferenceToValue, Return, Sizeof, Statement, String, Struct, SuffixOperatorValue, SwitchCase,
SwitchCaseBody, SwitchCaseTest, Symbol, TemplatedIdentifier, VariableAssignment,
VariableDeclaration
)
IndentedFnType = Callable[['DumpWriter', Union[Node, str]], None]
def indent(fn: IndentedFnType) -> Callable[..., None]:
def fn_wrapper(cls: 'DumpWriter', node: Union[Node, str], is_next: bool = True) -> None:
cls.increment_prefix(is_next)
fn(cls, node)
cls.decrement_prefix()
return fn_wrapper
class DumpWriter:
def __init__(self, ast: AST) -> None:
self.prefix: str = ''
logging.debug('')
logging.debug('--- AST ---')
i: int = 0
count: int = len(ast)
self.prev_have_next: bool = count > 1
self.upd_prefix: bool = False
while i < count:
self.print_node(ast[i], i < count - 1)
i += 1
def dump(self, string: str) -> None:
logging.debug('%s%s', self.prefix, string)
def decrement_prefix(self) -> None:
self.prefix = self.prefix[:-3]
self.upd_prefix = False
def increment_prefix(self, is_next: bool) -> None:
if self.upd_prefix:
self.prefix = self.prefix[:-3]
if self.prev_have_next:
self.prefix += '\u2502 '
else:
self.prefix += ' '
self.upd_prefix = False
self.prev_have_next = is_next
if is_next:
self.prefix += '\u251c' + '\u2500 '
else:
self.prefix += '\u2514' + '\u2500 '
self.upd_prefix = True
def print_node(self, node: Node, is_next: bool = True) -> None:
func = getattr(self, 'dump' + type(node).__name__)
func(node, is_next)
def print_node_list(self, string: str, lst: Sequence[Node], is_next: bool = True) -> None:
self.increment_prefix(is_next)
self.dump(string)
i: int = 0
cnt: int = len(lst)
while i < cnt:
self.print_node(lst[i], i < cnt - 1)
i += 1
self.decrement_prefix()
@indent
def print_str(self, s: Union[Node, str]) -> None:
s = cast(str, s)
self.dump(s)
@indent
def dumpInclude(self, inc: Union[Node, str]) -> None:
inc = cast(Include, inc)
self.dump('Include')
self.print_str('Path: {}'.format(inc.lib))
self.print_node_list('Symbols found', inc.syms, False)
@indent
def dumpImport(self, node: Union[Node, str]) -> None:
node = cast(Import, node)
self.dump('Import')
self.print_str('Path: {}'.format(node.lib))
self.print_node_list('Symbols found', node.syms, False)
def dump_declaration(self, decl: Union[Node, str], is_next: bool = True) -> None:
decl = cast(Declaration, decl)
self.print_node(decl.typ_src, decl.sym is not None or is_next)
if decl.sym is not None:
self.print_node(decl.sym, is_next)
def dump_variable_declaration(self, cls_name: str, decl: VariableDeclaration) -> None:
self.dump(cls_name)
if decl.private:
self.print_str('Modifiers: private')
if decl.static:
self.print_str('Modifiers: static')
if decl.assign is not None:
self.dump_declaration(decl)
self.print_node(decl.assign, False)
else:
self.dump_declaration(decl, False)
@indent
def dumpVariableDeclaration(self, decl: Union[Node, str]) -> None:
decl = cast(VariableDeclaration, decl)
self.dump_variable_declaration('VariableDeclaration', decl)
def dump_function(self, cls_name: str, fun: Function) -> None:
self.dump(cls_name)
if fun.body_str is None:
self.print_str('Declaration')
if fun.sym is not None:
self.print_node(fun.sym)
self.dump_qualifiers(fun)
self.print_node(fun.typ, fun.body_str is not None)
if fun.body_str is not None:
self.print_node_list('Body', fun.body, False)
@indent
def dumpFunction(self, fun: Union[Node, str]) -> None:
fun = cast(Function, fun)
self.dump_function('Function', fun)
@indent
def dumpStatement(self, stmt: Union[Node, str]) -> None:
stmt = cast(Statement, stmt)
self.dump('Statement')
self.print_node(stmt.expr, False)
def dumpExpression(self, expr: Union[Node, str], is_next: bool) -> None:
expr = cast(Expression, expr)
self.print_node_list('Expression', expr.contents, is_next)
def dumpInitializationList(self, node: Union[Node, str], is_next: bool) -> None:
node = cast(InitializationList, node)
self.print_node_list('InitializerList', node.contents, is_next)
@indent
def dumpCast(self, node: Union[Node, str]) -> None:
node = cast(Cast, node)
self.dump('Cast')
self.print_node(node.types[0])
self.print_node(node.arg, False)
@indent
def dumpFunctionCall(self, call: Union[Node, str]) -> None:
call = cast(FunctionCall, call)
self.dump('FunctionCall')
self.print_node(call.sym)
if call.cast:
self.increment_prefix(True)
self.dump('Automatic cast')
self.print_node(call.cast, False)
self.decrement_prefix()
self.print_node_list('Arguments', call.args, False)
@indent
def dumpArrayAccess(self, arr: Union[Node, str]) -> None:
arr = cast(ArrayAccess, arr)
self.dump('ArrayAccess')
self.print_node(arr.idx)
self.print_node(arr.child, False)
@indent
def dumpVariableAssignment(self, assign: Union[Node, str]) -> None:
assign = cast(VariableAssignment, assign)
self.dump('VariableAssignment')
self.print_node(assign.var)
self.print_node(assign.assign, False)
@indent
def dumpAssignment(self, assign: Union[Node, str]) -> None:
assign = cast(Assignment, assign)
self.dump('Assignment')
if assign.operator is not None:
self.print_node(assign.operator)
self.print_node(assign.expr, False)
@indent
def dumpControlStructure(self, struct: Union[Node, str]) -> None:
struct = cast(ControlStructure, struct)
self.dump('ControlStructure: ' + struct.name)
if struct.cond is not None:
self.print_node(struct.cond)
self.print_node_list("ControlStructureBody", struct.body, False)
def dumpDoWhileLoop(self, node: Union[Node, str], is_next: bool) -> None:
self.dumpControlStructure(node, is_next)
@indent
def dumpForDoLoop(self, node: Union[Node, str]) -> None:
node = cast(ForDoLoop, node)
self.dump('ControlStructure: ' + node.name)
self.print_node(node.cond)
self.print_node_list("Initializers", node.initializers)
self.print_node_list("Actions", node.actions)
self.print_node_list("ControlStructureBody", node.body, False)
def dumpCondition(self, cond: Union[Node, str], is_next: bool) -> None:
cond = cast(Condition, cond)
self.print_node_list("ConditionBranches", cond.branches, is_next)
@indent
def dumpSwitchCase(self, node: Union[Node, str]) -> None:
node = cast(SwitchCase, node)
self.dump('Case')
self.print_node_list('Tests', node.cases)
self.print_node(node.body, False)
def dumpSwitchCaseTest(self, node: Union[Node, str], is_next: bool) -> None:
node = cast(SwitchCaseTest, node)
if node.test is not None:
self.print_node(node.test, is_next)
else:
self.print_str('default', is_next)
def dumpSwitchCaseBody(self, node: Union[Node, str], _: bool) -> None:
node = cast(SwitchCaseBody, node)
self.print_str('Falls through: ' + ('yes' if node.fallthrough else 'no'))
self.print_node_list('Body', node.body, False)
@indent
def dumpReturn(self, ret: Union[Node, str]) -> None:
ret = cast(Return, ret)
self.dump('Return')
if ret.expr is not None:
self.print_node(ret.expr, False)
def dump_qualifiers(self, node: Union[Symbol, Function]) -> None:
qualifiers: List[str] = []
if node.qualifiers.is_const:
qualifiers.append('const')
if node.qualifiers.is_volatile:
qualifiers.append('volatile')
if node.qualifiers.is_restricted:
qualifiers.append('restrict')
if node.qualifiers.is_inline:
qualifiers.append('inline')
if node.qualifiers.is_private:
qualifiers.append('private')
if len(qualifiers) != 0:
self.print_str('Modifiers: {}'.format(', '.join(qualifiers)))
@indent
def dumpReferenceToType(self, ref: Union[Node, str]) -> None:
ref = cast(ReferenceToType, ref)
self.dump('Reference')
self.dump_qualifiers(ref)
self.print_node(ref.child, False)
@indent
def dumpReferenceToValue(self, ref: Union[Node, str]) -> None:
ref = cast(ReferenceToValue, ref)
self.dump('Reference')
self.print_node(ref.child, False)
@indent
def dumpOperator(self, op: Union[Node, str]) -> None:
op = cast(Operator, op)
self.dump('Operator: ' + op.op)
@indent
def dumpArray(self, arr: Union[Node, str]) -> None:
arr = cast(Array, arr)
self.dump('Array')
if arr.length is not None:
self.print_str('Sub-type:')
self.increment_prefix(True)
self.print_node(arr.child, False)
if arr.length is not None:
self.decrement_prefix()
self.print_str('Length:', False)
self.increment_prefix(False)
self.print_node(arr.length, False)
self.decrement_prefix()
@indent
def dumpFunctionType(self, node: Union[Node, str]) -> None:
node = cast(FunctionType, node)
self.dump('FunctionType')
self.print_node(node.ret, len(node.args) != 0 or node.is_variadic)
if len(node.args) != 0:
self.print_node_list('Arguments:', node.args, node.is_variadic)
if node.is_variadic:
if node.variadic_type is None:
self.print_str('Variadic (C)', False)
else:
self.print_str('Variadic:', False)
self.increment_prefix(False)
self.print_node(node.variadic_type, False)
self.decrement_prefix()
def dumpCompoundIdentifier(self, node: Union[Node, str], is_next: bool) -> None:
node = cast(CompoundIdentifier, node)
self.increment_prefix(is_next)
self.dump('CompoundIdentifier')
self.dump_qualifiers(node)
i = 0
while i < len(node.elems):
self.print_node(node.elems[i], i < len(node.elems) - 1)
i += 1
self.decrement_prefix()
@indent
def dumpIdentifier(self, node: Union[Node, str]) -> None:
node = cast(Identifier, node)
self.dump('Identifier: ' + node.name)
@indent
def dumpTemplatedIdentifier(self, node: Union[Node, str]) -> None:
node = cast(TemplatedIdentifier, node)
self.dump('TemplatedIdentifier: ' + node.name)
self.print_node_list('Types', node.types, False)
@indent
def dumpHeapAlloc(self, node: Union[Node, str]) -> None:
node = cast(HeapAlloc, node)
self.dump('HeapAlloc')
self.print_node(node.sym)
self.print_node_list('Arguments', node.args, False)
@indent
def dumpHeapDealloc(self, node: Union[Node, str]) -> None:
node = cast(HeapDealloc, node)
self.dump('HeapDealloc')
self.print_node(node.sym)
@indent
def dumpNumber(self, num: Union[Node, str]) -> None:
num = cast(Number, num)
self.dump('Number: ' + num.num)
@indent
def dumpDecimalNumber(self, node: Union[Node, str]) -> None:
node = cast(DecimalNumber, node)
self.dump('DecimalNumber: ' + node.num)
@indent
def dumpChar(self, char: Union[Node, str]) -> None:
char = cast(Char, char)
self.dump('Character: ' + char.char)
@indent
def dumpString(self, string: Union[Node, str]) -> None:
string = cast(String, string)
self.dump('String: ' + string.string)
@indent
def dumpNullValue(self, stmt: Union[Node, str]) -> None:
stmt = cast(NullValue, stmt)
self.dump('NullValue')
@indent
def dumpBoolValue(self, node: Union[Node, str]) -> None:
node = cast(BoolValue, node)
self.dump('BoolValue: ' + 'true' if node.val is True else 'false')
@indent
def dumpPrefixOperatorValue(self, val: Union[Node, str]) -> None:
val = cast(PrefixOperatorValue, val)
self.dump('PrefixOperatorValue')
self.print_str('Operator: %s' % val.op)
self.print_node(val.val, False)
@indent
def dumpSuffixOperatorValue(self, val: Union[Node, str]) -> None:
val = cast(SuffixOperatorValue, val)
self.dump('SuffixOperatorValue')
self.print_str('Operator: %s' % val.op)
self.print_node(val.val, False)
@indent
def dumpAnonymousArray(self, node: Union[Node, str]) -> None:
node = cast(AnonymousArray, node)
self.dump('AnonymousArray')
self.print_node_list('Contents:', node.contents, False)
@indent
def dumpSizeof(self, node: Union[Node, str]) -> None:
node = cast(Sizeof, node)
self.dump('Sizeof')
self.print_node(node.sz_typ, False)
@indent
def dumpAlias(self, node: Union[Node, str]) -> None:
node = cast(Alias, node)
self.dump('Alias')
self.print_str('From:')
self.increment_prefix(True)
self.print_node(node.src_sym, False)
self.decrement_prefix()
self.print_str('To:', False)
self.increment_prefix(False)
self.print_node(node.dst, False)
self.decrement_prefix()
@indent
def dumpStruct(self, node: Union[Node, str]) -> None:
node = cast(Struct, node)
self.dump('Struct')
self.print_node(node.sym)
if node.fields is None:
self.print_str('Forward declaration', False)
else:
self.print_node_list('Fields', node.fields, False)
@indent
def dumpEhUnion(self, node: Union[Node, str]) -> None:
node = cast(EhUnion, node)
self.dump('Union')
self.print_node(node.sym)
if node.fields is None:
self.print_str('Forward declaration', False)
else:
self.print_node_list('Fields', node.fields, False)
@indent
def dumpClassMethod(self, node: Union[Node, str]) -> None:
node = cast(ClassMethod, node)
self.dump_function('ClassMethod', node)
@indent
def dumpCtor(self, node: Union[Node, str]) -> None:
node = cast(ClassMethod, node)
self.dump('Constructor')
self.dump_qualifiers(node)
assert isinstance(node.typ, FunctionType)
if len(node.typ.args) != 0:
self.print_node_list('Arguments:', node.typ.args)
if node.typ.is_variadic:
self.print_str('Variadic:')
self.increment_prefix(False)
assert node.typ.variadic_type is not None
self.print_node(node.typ.variadic_type, False)
self.decrement_prefix()
self.print_node_list('Body', node.body, False)
@indent
def dumpDtor(self, node: Union[Node, str]) -> None:
node = cast(Dtor, node)
self.dump('Destructor')
self.dump_qualifiers(node)
self.print_node_list('Body', node.body, False)
@indent
def dumpClassProperty(self, node: Union[Node, str]) -> None:
node = cast(ClassProperty, node)
self.dump_variable_declaration('ClassProperty', node)
@indent
def dumpEhClass(self, node: Union[Node, str]) -> None:
node = cast(EhClass, node)
self.dump('Class')
self.print_node(node.sym)
if node.contents is None:
self.print_str('Forward declaration', False)
else:
self.print_node_list('Properties', node.properties)
self.print_node_list('Methods', node.methods, len(node.ctors) != 0)
for ctor in node.ctors:
self.print_node(ctor, ctor != node.ctors[-1] or node.dtor is not None)
if node.dtor is not None:
self.print_node(node.dtor, False)
@indent
def dumpEhEnum(self, node: Union[Node, str]) -> None:
node = cast(EhEnum, node)
self.dump('Enum')
self.print_node(node.sym)
if node.fields is None:
self.print_str('Forward declaration', False)
else:
self.print_node_list('Fields', node.fields, False)
@indent
def dumpEnumField(self, node: Union[Node, str]) -> None:
node = cast(EnumField, node)
self.dump(node.name)
@indent
def dumpNamespace(self, node: Union[Node, str]) -> None:
node = cast(Namespace, node)
self.dump('Namespace')
self.print_node(node.sym)
self.print_node_list('Contents', node.contents, False)
@indent
def dumpCDefine(self, node: Union[Node, str]) -> None:
node = cast(CDefine, node)
self.dump('C define')
if node.sym is not None:
self.print_node(node.sym, False)
@indent
def dumpCMacroFunction(self, node: Union[Node, str]) -> None:
node = cast(CMacroFunction, node)
self.dump('C function macro')
if node.sym is not None:
self.print_node(node.sym)
assert isinstance(node.typ, FunctionType)
self.print_str('Arg count: {}'.format(len(node.typ.args)), False)
@indent
def dumpCAnyType(self, node: Union[Node, str]) -> None:
node = cast(CAnyType, node)
self.dump('No type')
| 36.730483
| 100
| 0.621325
| 2,420
| 19,761
| 4.976446
| 0.142149
| 0.070996
| 0.078801
| 0.0651
| 0.362036
| 0.309474
| 0.269368
| 0.229428
| 0.10612
| 0.090924
| 0
| 0.003137
| 0.257983
| 19,761
| 537
| 101
| 36.798883
| 0.818114
| 0.055058
| 0
| 0.286996
| 0
| 0
| 0.06128
| 0
| 0.004484
| 0
| 0
| 0
| 0.006726
| 1
| 0.150224
| false
| 0.002242
| 0.017937
| 0
| 0.172646
| 0.219731
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d833ac6a830a6bbcae3005bb5acb8b96a7801b5
| 1,859
|
py
|
Python
|
tutorials/04_Tutorial_Boolean.py
|
lmidolo/samplemaker
|
8211af0e4cea60aea8f5720d5ff0ee532c442123
|
[
"BSD-3-Clause"
] | null | null | null |
tutorials/04_Tutorial_Boolean.py
|
lmidolo/samplemaker
|
8211af0e4cea60aea8f5720d5ff0ee532c442123
|
[
"BSD-3-Clause"
] | null | null | null |
tutorials/04_Tutorial_Boolean.py
|
lmidolo/samplemaker
|
8211af0e4cea60aea8f5720d5ff0ee532c442123
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
04_Tutorial_Boolean
"""
# In this tutorial we learn how to do boolean operations between groups of
# polygons
# Let's import basic stuff
import samplemaker.layout as smlay # used for layout
import samplemaker.makers as sm # used for drawing
# Create a simple mask layout
themask = smlay.Mask("04_Tutorial_Boolean")
# Empty geometry
geomE = sm.GeomGroup()
# Let's make a large box
box0 = sm.make_rect(0,0,100,100,layer=1)
# And some text, because text is complex polygons!
text0 = sm.make_text(0, 0, "DIFF", 10, 2,angle=30,to_poly=True,layer=1)
# Let's take the boolean difference box-text
bdiff = box0.copy() # Note that boolean operations alter the original element so we need to make a copy first
bdiff.boolean_difference(text0, 1, 1)
# The first integer is the layer from which you should subtract and the second is the subtracted layer
# Now bdiff is box-text
geomE+=bdiff
# Now let's try intersection (AND operation)
# Let's use two overlapping texts, slighlty larger
text1 = sm.make_text(0,0,"DIFF",11,3,angle=30,to_poly=True,layer=1)
text1.boolean_intersection(text0, 1, 1)
text1.translate(100, 0)
geomE+=text1
# XOR is also quite useful, only keeps parts that are not in both
text2 = sm.make_text(50,0,"XOR",10,1,angle=0,to_poly=True,layer=1)
text2.boolean_xor(box0, 1, 1)
text2.translate(200, 0)
geomE+=text2
# Trapezoid slicing, useful for some e-beam export
trapz = text2.copy()
trapz.trapezoids(1)
trapz.translate(150, 0)
geomE+=trapz
# Union, we could re-unite all trapezoids in the previous
uni1 = trapz.copy()
uni1.boolean_union(1)
uni1.translate(150, 0)
geomE+=uni1
# Just for fun, outlining the last result
out1 = uni1.copy()
out1.poly_outlining(1, 1)
out1.translate(150, 0)
geomE+=out1
# Let's add all to main cell
themask.addToMainCell(geomE)
# Export to GDS
themask.exportGDS()
# Finished!
| 26.557143
| 109
| 0.743948
| 319
| 1,859
| 4.285266
| 0.438871
| 0.017557
| 0.021946
| 0.032919
| 0.068764
| 0.057059
| 0.03365
| 0
| 0
| 0
| 0
| 0.057935
| 0.145777
| 1,859
| 70
| 110
| 26.557143
| 0.802897
| 0.486821
| 0
| 0
| 0
| 0
| 0.032432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d8a66fbf9d684f0b5b7c285749ed54196898dec
| 1,211
|
py
|
Python
|
day6.py
|
aslttml/30days-of-code
|
be6c894f8df4913413b7e6d9a6b0585e5884d35d
|
[
"MIT"
] | null | null | null |
day6.py
|
aslttml/30days-of-code
|
be6c894f8df4913413b7e6d9a6b0585e5884d35d
|
[
"MIT"
] | null | null | null |
day6.py
|
aslttml/30days-of-code
|
be6c894f8df4913413b7e6d9a6b0585e5884d35d
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
import string
if __name__ == '__main__':
try:
t = int(input().strip())
except:
print('Invalid input.')
if t>=1 and t<=10:
for a0 in range(t):
s = input().strip()
index = 0
if len(s)>=2 and len(s)<=10000:
while index<len(s):
#Loop should quit when it reaches the last character, which has an index of (length-1)
if index<2:
odd = s[index]
even = s[index + 1]
elif index>=2:
odd = odd + s[index]
#If string length is an odd number loop should stop at the even index
#Trying to add another character will give an IndexError
if index<len(s)-1:
even = even + s[index + 1]
index = index + 2
print(odd + ' ' + even)
else:
print('Constraint error. String is either too long or too short.')
a0 = a0 + 1
else:
print('Constraint error.')
| 31.051282
| 102
| 0.456647
| 145
| 1,211
| 3.758621
| 0.482759
| 0.029358
| 0.033028
| 0.040367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033283
| 0.45417
| 1,211
| 38
| 103
| 31.868421
| 0.791225
| 0.182494
| 0
| 0.064516
| 0
| 0
| 0.098477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.193548
| 0
| 0.193548
| 0.129032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d8cd75b30233cb95ea2e1005dd56109d735bde6
| 2,377
|
py
|
Python
|
FindAndReplaceByProjectWithExclusions.py
|
Zlatov/FindAndReplaceByProjectWithExclusions
|
f1209696d960bd1471420ed18f4e71e03b3df1b5
|
[
"MIT"
] | null | null | null |
FindAndReplaceByProjectWithExclusions.py
|
Zlatov/FindAndReplaceByProjectWithExclusions
|
f1209696d960bd1471420ed18f4e71e03b3df1b5
|
[
"MIT"
] | null | null | null |
FindAndReplaceByProjectWithExclusions.py
|
Zlatov/FindAndReplaceByProjectWithExclusions
|
f1209696d960bd1471420ed18f4e71e03b3df1b5
|
[
"MIT"
] | null | null | null |
import sublime, sublime_plugin
import os
import json
class FindAndReplaceByProjectWithExclusions(sublime_plugin.TextCommand):
print('reloading FindAndReplaceByProjectWithExclusions')
def run(self, edit, from_current_file_path=None):
# Текущее окно сублайма
window = self.view.window()
# В окне - проект, берём его настройки
dict_project = window.project_data()
# Определим exclusions - безопасным извлечением интересуемой настройки из многоуровнего словаря
# через get.
exclusions_list = dict_project.get('settings', {}).get("find_and_replace_by_project_with_exclusions")
exclusions = None
if exclusions_list is not None:
exclusions = ', '.join('-' + exclusion for exclusion in exclusions_list)
# Определим project_path - первый путь из прикреплённых папок в файле
# проекта.
sublime_project_file_path = window.project_file_name()
is_project = sublime_project_file_path is not None
project_path = None
if is_project and dict_project is not None and 'folders' in dict_project and dict_project['folders'][0] is not None:
relative_first_folder_path = dict_project['folders'][0]['path']
if relative_first_folder_path == '.' or relative_first_folder_path == './':
relative_first_folder_path = ''
project_path = os.path.join(os.path.dirname(sublime_project_file_path), relative_first_folder_path)
# Определим dir_path - путь к директории текущего открытого файла (если
# таковой открыт).
dir_path = None
file_path = self.view.file_name()
if file_path is not None:
dir_path = os.path.dirname(file_path)
# Бизнес логика
# Определение пути поиска по исходным данным
search_path = ""
if from_current_file_path == True and dir_path is not None:
search_path = dir_path
elif is_project and project_path is not None:
search_path = project_path
elif is_project:
search_path = "<project>"
# Дополнение пути поиска исключенем
where_string = search_path
if exclusions is not None:
where_string = search_path + ", " + exclusions
# Аргументы для открытия панели
panel_args = {
"panel": "find_in_files",
"regex": False,
"where": where_string
}
# Показываем панель с настройками в panel_args
self.view.window().run_command(
"show_panel",
panel_args
)
| 34.449275
| 120
| 0.715187
| 304
| 2,377
| 5.315789
| 0.361842
| 0.039604
| 0.044554
| 0.071163
| 0.082921
| 0.028465
| 0
| 0
| 0
| 0
| 0
| 0.00106
| 0.206563
| 2,377
| 68
| 121
| 34.955882
| 0.855779
| 0.207404
| 0
| 0
| 0
| 0
| 0.095187
| 0.042781
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.069767
| 0
| 0.116279
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d8df2d3b1dcbda991e01aea09990e08fc942cf3
| 1,566
|
py
|
Python
|
schnell-nautilus.py
|
umangrajpara99/OpenInSchnell
|
5d48be8741f130471c892f1e77f19b9dad70a882
|
[
"MIT"
] | null | null | null |
schnell-nautilus.py
|
umangrajpara99/OpenInSchnell
|
5d48be8741f130471c892f1e77f19b9dad70a882
|
[
"MIT"
] | null | null | null |
schnell-nautilus.py
|
umangrajpara99/OpenInSchnell
|
5d48be8741f130471c892f1e77f19b9dad70a882
|
[
"MIT"
] | null | null | null |
# Schnell Nautilus Extension
#
# Place me in ~/.local/share/nautilus-python/extensions/,
# ensure you have python-nautilus package, restrart Nautilus, and enjoy :)
from gi import require_version
require_version('Gtk', '3.0')
require_version('Nautilus', '3.0')
from gi.repository import Nautilus, GObject
from subprocess import call
import os
# path to schnell
schnell = 'schnell'
# what name do you want to see in the context menu?
schnellname = 'Schnell'
# always create new window?
NEWWINDOW = False
class SchnellExtension(GObject.GObject, Nautilus.MenuProvider):
def schnellname(self, menu, files):
safepaths = ''
for file in files:
filepath = file.get_location().get_path()
safepaths += '"' + filepath + '" '
# If one of the files we are trying to open is a folder
# create a new instance of schnell
call(schnell + ' ' + safepaths + '&', shell=True)
def get_file_items(self, window, files):
item = Nautilus.MenuItem(
name='SchnellOpen',
label='Open In ' + schnellname,
tip='Opens the selected files with Schnell'
)
item.connect('activate', self.schnellname, files)
return [item]
def get_background_items(self, window, file_):
item = Nautilus.MenuItem(
name='SchnellOpenBackground',
label='Open in ' + schnellname,
tip='Opens Schnell in the current directory'
)
item.connect('activate', self.schnellname, [file_])
return [item]
| 27.473684
| 74
| 0.637292
| 182
| 1,566
| 5.423077
| 0.483516
| 0.042553
| 0.030395
| 0.048632
| 0.129686
| 0.06079
| 0
| 0
| 0
| 0
| 0
| 0.00346
| 0.261814
| 1,566
| 56
| 75
| 27.964286
| 0.850346
| 0.213282
| 0
| 0.125
| 0
| 0
| 0.143208
| 0.017185
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d9062da7cb5be608d72b13c37aef7c0131a8035
| 1,891
|
py
|
Python
|
Cogs/StaticMethods.py
|
pajratbej/hetman
|
1da634cdb94221bb81ceb0c29467cccce640bbb6
|
[
"MIT"
] | 2
|
2019-12-19T17:11:29.000Z
|
2020-02-22T17:55:13.000Z
|
Cogs/StaticMethods.py
|
pajratbej/hetman
|
1da634cdb94221bb81ceb0c29467cccce640bbb6
|
[
"MIT"
] | 5
|
2019-12-08T21:42:12.000Z
|
2022-03-11T23:58:29.000Z
|
Cogs/StaticMethods.py
|
pajratbej/hetman
|
1da634cdb94221bb81ceb0c29467cccce640bbb6
|
[
"MIT"
] | null | null | null |
from pymongo import MongoClient
import random as r
import os
client = MongoClient(os.environ["MONGO_LAB"])
db = client.get_database("hetmanbot")
collection = db['data_base']
class StaticMethods():
@staticmethod
def push_record(name, txt, number):
records = collection.find_one({"document_id": number})
for i in records[name]:
if txt[10:] == str(list(i.values()))[2:-2]:
return "Ten cytat już istnieje"
else:
size = len(records[name])
print(records[name])
collection.update({"document_id": number}, {'$push': {name: {str(size): txt[10:]}}})
print("Dodano nowy cytat")
return "Dodano nowy cytat"
@staticmethod
def get_random_record(name, number):
records = collection.find_one({"document_id": number})
for i in records[name]:
str(list(i.values()))[2:-2]
return str(list(r.choice(records[name]).values()))
@staticmethod
def get_specific_record(name, number, r_number):
records = collection.find_one({"document_id": number})
return str(list(records[name][r_number].values()))
@staticmethod
def number_of_quotes(name, number):
records = collection.find_one({"document_id": number})
size = len(records[name])
return size
@staticmethod
def replace(string):
collection.update({"document_id": 3}, {'$set' : {"plan": string}})
@staticmethod
def getPlan():
record = collection.find_one({"document_id": 3})
return record["plan"]
@staticmethod
def getGame():
record = collection.find_one({"document_id":3})
return record["game"]
@staticmethod
def setGame(string):
collection.update({"document_id": 3},{'$set': {"game": string}})
| 31.516667
| 100
| 0.589635
| 214
| 1,891
| 5.084112
| 0.313084
| 0.082721
| 0.09375
| 0.137868
| 0.398897
| 0.398897
| 0.398897
| 0.292279
| 0.25
| 0.115809
| 0
| 0.008677
| 0.268641
| 1,891
| 59
| 101
| 32.050847
| 0.778019
| 0
| 0
| 0.375
| 0
| 0
| 0.111581
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.0625
| 0
| 0.395833
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d9276de4eb719b6800f06060463d545ce0e50b7
| 702
|
py
|
Python
|
article/admin.py
|
SeddonShen/TimePill
|
8b2c4dc2c129f440d67e1dba1ab16591057b65f7
|
[
"Apache-2.0"
] | 4
|
2021-12-26T04:39:06.000Z
|
2021-12-29T16:57:36.000Z
|
article/admin.py
|
SeddonShen/TimePill
|
8b2c4dc2c129f440d67e1dba1ab16591057b65f7
|
[
"Apache-2.0"
] | null | null | null |
article/admin.py
|
SeddonShen/TimePill
|
8b2c4dc2c129f440d67e1dba1ab16591057b65f7
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from . import models
from .models import Article, Comment
class ArticleAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['id']}),
(None, {'fields': ['title', 'content', 'status']})
]
list_display = (
['expire_time', 'diary_type', 'square_open', 'add_date', 'mod_date', 'id', 'title', 'content', 'status',
'author_id'])
# title = title,
# content = content,
# square_open = square_open,
# expire_time = expire_time,
# status = status,
# author_id_id = user_id,
# diary_type = diary_type,
# admin.site.register(Article, ArticleAdmin)
admin.site.register(Comment)
admin.site.register(Article)
| 25.071429
| 108
| 0.668091
| 84
| 702
| 5.392857
| 0.416667
| 0.07947
| 0.112583
| 0.10596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17094
| 702
| 27
| 109
| 26
| 0.778351
| 0.317664
| 0
| 0
| 0
| 0
| 0.232409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d961a7fca3206cd16ef0e5d9d5a6b6cd7a06634
| 32,545
|
py
|
Python
|
neurokit2/ecg/ecg_findpeaks.py
|
vansjyo/NeuroKit
|
238cd3d89467f7922c68a3a4c1f44806a8466922
|
[
"MIT"
] | null | null | null |
neurokit2/ecg/ecg_findpeaks.py
|
vansjyo/NeuroKit
|
238cd3d89467f7922c68a3a4c1f44806a8466922
|
[
"MIT"
] | null | null | null |
neurokit2/ecg/ecg_findpeaks.py
|
vansjyo/NeuroKit
|
238cd3d89467f7922c68a3a4c1f44806a8466922
|
[
"MIT"
] | null | null | null |
# - * - coding: utf-8 - * -
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal
from ..signal import signal_smooth
from ..signal import signal_zerocrossings
def ecg_findpeaks(ecg_cleaned, sampling_rate=1000, method="neurokit", show=False):
"""Find R-peaks in an ECG signal.
Low-level function used by `ecg_peaks()` to identify R-peaks in an ECG signal using a different set of algorithms. See `ecg_peaks()` for details.
Parameters
----------
ecg_cleaned : list, array or Series
The cleaned ECG channel as returned by `ecg_clean()`.
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second).
Defaults to 1000.
method : string
The algorithm to be used for R-peak detection. Can be one of 'neurokit' (default),
'pamtompkins1985', 'hamilton2002', 'christov2004', 'gamboa2008', 'elgendi2010', 'engzeemod2012', 'kalidas2017', 'martinez2003' or 'rodrigues2020'.
show : bool
If True, will return a plot to visualizing the thresholds used in the
algorithm. Useful for debugging.
Returns
-------
info : dict
A dictionary containing additional information, in this case the
samples at which R-peaks occur, accessible with the key "ECG_R_Peaks".
See Also
--------
ecg_clean, signal_fixpeaks, ecg_peaks, ecg_rate, ecg_process, ecg_plot
Examples
--------
>>> import neurokit2 as nk
>>>
>>> ecg = nk.ecg_simulate(duration=10, sampling_rate=1000)
>>> cleaned = nk.ecg_clean(ecg, sampling_rate=1000)
>>> info = nk.ecg_findpeaks(cleaned)
>>> nk.events_plot(info["ECG_R_Peaks"], cleaned)
>>>
>>> # Different methods
>>> neurokit = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="neurokit"), method="neurokit")
>>> pantompkins1985 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="pantompkins1985"), method="pantompkins1985")
>>> hamilton2002 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="hamilton2002"), method="hamilton2002")
>>> christov2004 = nk.ecg_findpeaks(cleaned, method="christov2004")
>>> gamboa2008 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="gamboa2008"), method="gamboa2008")
>>> elgendi2010 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="elgendi2010"), method="elgendi2010")
>>> engzeemod2012 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="engzeemod2012"), method="engzeemod2012")
>>> kalidas2017 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="kalidas2017"), method="kalidas2017")
>>> martinez2003 = nk.ecg_findpeaks(cleaned, method="martinez2003")
>>>
>>> # Visualize
>>> nk.events_plot([neurokit["ECG_R_Peaks"],
pantompkins1985["ECG_R_Peaks"],
hamilton2002["ECG_R_Peaks"],
christov2004["ECG_R_Peaks"],
gamboa2008["ECG_R_Peaks"],
elgendi2010["ECG_R_Peaks"],
engzeemod2012["ECG_R_Peaks"],
kalidas2017["ECG_R_Peaks"]],
martinez2003["ECG_R_Peaks"]], cleaned)
References
--------------
- Gamboa, H. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology. PhD ThesisUniversidade.
- W. Zong, T. Heldt, G.B. Moody, and R.G. Mark. An open-source algorithm to detect onset of arterial blood pressure pulses. In Computers in Cardiology, 2003, pages 259–262, 2003.
- Hamilton, Open Source ECG Analysis Software Documentation, E.P.Limited, 2002.
- Jiapu Pan and Willis J. Tompkins. A Real-Time QRS Detection Algorithm. In: IEEE Transactions on Biomedical Engineering BME-32.3 (1985), pp. 230–236.
- C. Zeelenberg, A single scan algorithm for QRS detection and feature extraction, IEEE Comp. in Cardiology, vol. 6, pp. 37-42, 1979
- A. Lourenco, H. Silva, P. Leite, R. Lourenco and A. Fred, "Real Time Electrocardiogram Segmentation for Finger Based ECG Biometrics", BIOSIGNALS 2012, pp. 49-54, 2012.
"""
# Try retrieving right column
if isinstance(ecg_cleaned, pd.DataFrame):
try:
ecg_cleaned = ecg_cleaned["ECG_Clean"]
except NameError:
try:
ecg_cleaned = ecg_cleaned["ECG_Raw"]
except NameError:
ecg_cleaned = ecg_cleaned["ECG"]
method = method.lower() # remove capitalised letters
# Run peak detection algorithm
if method in ["nk", "nk2", "neurokit", "neurokit2"]:
rpeaks = _ecg_findpeaks_neurokit(ecg_cleaned, sampling_rate,
show=show)
elif method in ["pantompkins", "pantompkins1985"]:
rpeaks = _ecg_findpeaks_pantompkins(ecg_cleaned, sampling_rate)
elif method in ["gamboa2008", "gamboa"]:
rpeaks = _ecg_findpeaks_gamboa(ecg_cleaned, sampling_rate)
elif method in ["ssf", "slopesumfunction", "zong", "zong2003"]:
rpeaks = _ecg_findpeaks_ssf(ecg_cleaned, sampling_rate)
elif method in ["hamilton", "hamilton2002"]:
rpeaks = _ecg_findpeaks_hamilton(ecg_cleaned, sampling_rate)
elif method in ["christov", "christov2004"]:
rpeaks = _ecg_findpeaks_christov(ecg_cleaned, sampling_rate)
elif method in ["engzee", "engzee2012", "engzeemod", "engzeemod2012"]:
rpeaks = _ecg_findpeaks_engzee(ecg_cleaned, sampling_rate)
elif method in ["elgendi", "elgendi2010"]:
rpeaks = _ecg_findpeaks_elgendi(ecg_cleaned, sampling_rate)
elif method in ["kalidas2017", "swt", "kalidas", "kalidastamil", "kalidastamil2017"]:
rpeaks = _ecg_findpeaks_kalidas(ecg_cleaned, sampling_rate)
elif method in ["martinez2003", "martinez"]:
rpeaks = _ecg_findpeaks_WT(ecg_cleaned, sampling_rate)
elif method in ["rodrigues2020", "rodrigues", "asi"]:
rpeaks = _ecg_findpeaks_rodrigues(ecg_cleaned, sampling_rate)
else:
raise ValueError("NeuroKit error: ecg_findpeaks(): 'method' should be "
"one of 'neurokit' or 'pamtompkins'.")
# Prepare output.
info = {"ECG_R_Peaks": rpeaks}
return info
# =============================================================================
# NeuroKit
# =============================================================================
def _ecg_findpeaks_neurokit(signal, sampling_rate=1000, smoothwindow=.1, avgwindow=.75,
gradthreshweight=1.5, minlenweight=0.4, mindelay=0.3,
show=False):
"""
All tune-able parameters are specified as keyword arguments. The `signal`
must be the highpass-filtered raw ECG with a lowcut of .5 Hz.
"""
if show is True:
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True)
# Compute the ECG's gradient as well as the gradient threshold. Run with
# show=True in order to get an idea of the threshold.
grad = np.gradient(signal)
absgrad = np.abs(grad)
smooth_kernel = int(np.rint(smoothwindow * sampling_rate))
avg_kernel = int(np.rint(avgwindow * sampling_rate))
smoothgrad = signal_smooth(absgrad, kernel="boxcar", size=smooth_kernel)
avggrad = signal_smooth(smoothgrad, kernel="boxcar", size=avg_kernel)
gradthreshold = gradthreshweight * avggrad
mindelay = int(np.rint(sampling_rate * mindelay))
if show is True:
ax1.plot(signal)
ax2.plot(smoothgrad)
ax2.plot(gradthreshold)
# Identify start and end of QRS complexes.
qrs = smoothgrad > gradthreshold
beg_qrs = np.where(np.logical_and(np.logical_not(qrs[0:-1]), qrs[1:]))[0]
end_qrs = np.where(np.logical_and(qrs[0:-1], np.logical_not(qrs[1:])))[0]
# Throw out QRS-ends that precede first QRS-start.
end_qrs = end_qrs[end_qrs > beg_qrs[0]]
# Identify R-peaks within QRS (ignore QRS that are too short).
num_qrs = min(beg_qrs.size, end_qrs.size)
min_len = np.mean(end_qrs[:num_qrs] - beg_qrs[:num_qrs]) * minlenweight
peaks = [0]
for i in range(num_qrs):
beg = beg_qrs[i]
end = end_qrs[i]
len_qrs = end - beg
if len_qrs < min_len:
continue
if show is True:
ax2.axvspan(beg, end, facecolor="m", alpha=0.5)
# Find local maxima and their prominence within QRS.
data = signal[beg:end]
locmax, props = scipy.signal.find_peaks(data, prominence=(None, None))
if locmax.size > 0:
# Identify most prominent local maximum.
peak = beg + locmax[np.argmax(props["prominences"])]
# Enforce minimum delay between peaks.
if peak - peaks[-1] > mindelay:
peaks.append(peak)
peaks.pop(0)
if show is True:
ax1.scatter(peaks, signal[peaks], c="r")
peaks = np.asarray(peaks).astype(int) # Convert to int
return peaks
# =============================================================================
# Pan & Tompkins (1985)
# =============================================================================
def _ecg_findpeaks_pantompkins(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- Jiapu Pan and Willis J. Tompkins. A Real-Time QRS Detection Algorithm.
In: IEEE Transactions on Biomedical Engineering BME-32.3 (1985), pp. 230–236.
"""
diff = np.diff(signal)
squared = diff * diff
N = int(0.12 * sampling_rate)
mwa = _ecg_findpeaks_MWA(squared, N)
mwa[:int(0.2 * sampling_rate)] = 0
mwa_peaks = _ecg_findpeaks_peakdetect(mwa, sampling_rate)
mwa_peaks = np.array(mwa_peaks, dtype='int')
return mwa_peaks
# =============================================================================
# Hamilton (2002)
# =============================================================================
def _ecg_findpeaks_hamilton(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- Hamilton, Open Source ECG Analysis Software Documentation, E.P.Limited, 2002.
"""
diff = abs(np.diff(signal))
b = np.ones(int(0.08 * sampling_rate))
b = b/int(0.08 * sampling_rate)
a = [1]
ma = scipy.signal.lfilter(b, a, diff)
ma[0:len(b) * 2] = 0
n_pks = []
n_pks_ave = 0.0
s_pks = []
s_pks_ave = 0.0
QRS = [0]
RR = []
RR_ave = 0.0
th = 0.0
i = 0
idx = []
peaks = []
for i in range(len(ma)):
if i > 0 and i < len(ma) - 1:
if ma[i-1] < ma[i] and ma[i + 1] < ma[i]:
peak = i
peaks.append(i)
if ma[peak] > th and (peak-QRS[-1]) > 0.3 * sampling_rate:
QRS.append(peak)
idx.append(i)
s_pks.append(ma[peak])
if len(n_pks) > 8:
s_pks.pop(0)
s_pks_ave = np.mean(s_pks)
if RR_ave != 0.0:
if QRS[-1]-QRS[-2] > 1.5 * RR_ave:
missed_peaks = peaks[idx[-2] + 1:idx[-1]]
for missed_peak in missed_peaks:
if missed_peak - peaks[idx[-2]] > int(0.360 * sampling_rate) and ma[missed_peak] > 0.5 * th:
QRS.append(missed_peak)
QRS.sort()
break
if len(QRS) > 2:
RR.append(QRS[-1]-QRS[-2])
if len(RR) > 8:
RR.pop(0)
RR_ave = int(np.mean(RR))
else:
n_pks.append(ma[peak])
if len(n_pks) > 8:
n_pks.pop(0)
n_pks_ave = np.mean(n_pks)
th = n_pks_ave + 0.45 * (s_pks_ave-n_pks_ave)
i += 1
QRS.pop(0)
QRS = np.array(QRS, dtype='int')
return QRS
# =============================================================================
# Slope Sum Function (SSF) - Zong et al. (2003)
# =============================================================================
def _ecg_findpeaks_ssf(signal, sampling_rate=1000, threshold=20, before=0.03, after=0.01):
"""
From https://github.com/PIA-Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L448
- W. Zong, T. Heldt, G.B. Moody, and R.G. Mark. An open-source algorithm to detect onset of arterial blood pressure pulses. In Computers in
Cardiology, 2003, pages 259–262, 2003.
"""
# TODO: Doesn't really seems to work
# convert to samples
winB = int(before * sampling_rate)
winA = int(after * sampling_rate)
Rset = set()
length = len(signal)
# diff
dx = np.diff(signal)
dx[dx >= 0] = 0
dx = dx ** 2
# detection
idx, = np.nonzero(dx > threshold)
idx0 = np.hstack(([0], idx))
didx = np.diff(idx0)
# search
sidx = idx[didx > 1]
for item in sidx:
a = item - winB
if a < 0:
a = 0
b = item + winA
if b > length:
continue
r = np.argmax(signal[a:b]) + a
Rset.add(r)
# output
rpeaks = list(Rset)
rpeaks.sort()
rpeaks = np.array(rpeaks, dtype='int')
return rpeaks
# =============================================================================
# Christov (2004)
# =============================================================================
def _ecg_findpeaks_christov(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- Ivaylo I. Christov, Real time electrocardiogram QRS detection using combined adaptive threshold, BioMedical Engineering OnLine 2004, vol. 3:28, 2004.
"""
total_taps = 0
b = np.ones(int(0.02 * sampling_rate))
b = b/int(0.02 * sampling_rate)
total_taps += len(b)
a = [1]
MA1 = scipy.signal.lfilter(b, a, signal)
b = np.ones(int(0.028 * sampling_rate))
b = b/int(0.028 * sampling_rate)
total_taps += len(b)
a = [1]
MA2 = scipy.signal.lfilter(b, a, MA1)
Y = []
for i in range(1, len(MA2)-1):
diff = abs(MA2[i + 1]-MA2[i-1])
Y.append(diff)
b = np.ones(int(0.040 * sampling_rate))
b = b/int(0.040 * sampling_rate)
total_taps += len(b)
a = [1]
MA3 = scipy.signal.lfilter(b, a, Y)
MA3[0:total_taps] = 0
ms50 = int(0.05 * sampling_rate)
ms200 = int(0.2 * sampling_rate)
ms1200 = int(1.2 * sampling_rate)
ms350 = int(0.35 * sampling_rate)
M = 0
newM5 = 0
M_list = []
MM = []
M_slope = np.linspace(1.0, 0.6, ms1200-ms200)
F = 0
F_list = []
R = 0
RR = []
Rm = 0
R_list = []
MFR = 0
MFR_list = []
QRS = []
for i in range(len(MA3)):
# M
if i < 5 * sampling_rate:
M = 0.6 * np.max(MA3[:i + 1])
MM.append(M)
if len(MM) > 5:
MM.pop(0)
elif QRS and i < QRS[-1] + ms200:
newM5 = 0.6 * np.max(MA3[QRS[-1]:i])
if newM5 > 1.5 * MM[-1]:
newM5 = 1.1 * MM[-1]
elif QRS and i == QRS[-1] + ms200:
if newM5 == 0:
newM5 = MM[-1]
MM.append(newM5)
if len(MM) > 5:
MM.pop(0)
M = np.mean(MM)
elif QRS and i > QRS[-1] + ms200 and i < QRS[-1] + ms1200:
M = np.mean(MM) * M_slope[i-(QRS[-1] + ms200)]
elif QRS and i > QRS[-1] + ms1200:
M = 0.6 * np.mean(MM)
# F
if i > ms350:
F_section = MA3[i-ms350:i]
max_latest = np.max(F_section[-ms50:])
max_earliest = np.max(F_section[:ms50])
F = F + ((max_latest-max_earliest)/150.0)
# R
if QRS and i < QRS[-1] + int((2.0/3.0 * Rm)):
R = 0
elif QRS and i > QRS[-1] + int((2.0/3.0 * Rm)) and i < QRS[-1] + Rm:
dec = (M-np.mean(MM))/1.4
R = 0 + dec
MFR = M + F + R
M_list.append(M)
F_list.append(F)
R_list.append(R)
MFR_list.append(MFR)
if not QRS and MA3[i] > MFR:
QRS.append(i)
elif QRS and i > QRS[-1] + ms200 and MA3[i] > MFR:
QRS.append(i)
if len(QRS) > 2:
RR.append(QRS[-1] - QRS[-2])
if len(RR) > 5:
RR.pop(0)
Rm = int(np.mean(RR))
QRS.pop(0)
QRS = np.array(QRS, dtype='int')
return QRS
# =============================================================================
# Gamboa (2008)
# =============================================================================
def _ecg_findpeaks_gamboa(signal, sampling_rate=1000, tol=0.002):
"""
From https://github.com/PIA-Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L834
- Gamboa, H. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology. PhD ThesisUniversidade.
"""
# convert to samples
v_100ms = int(0.1 * sampling_rate)
v_300ms = int(0.3 * sampling_rate)
hist, edges = np.histogram(signal, 100, density=True)
TH = 0.01
F = np.cumsum(hist)
v0 = edges[np.nonzero(F > TH)[0][0]]
v1 = edges[np.nonzero(F < (1 - TH))[0][-1]]
nrm = max([abs(v0), abs(v1)])
norm_signal = signal / float(nrm)
d2 = np.diff(norm_signal, 2)
b = np.nonzero((np.diff(np.sign(np.diff(-d2)))) == -2)[0] + 2
b = np.intersect1d(b, np.nonzero(-d2 > tol)[0])
if len(b) < 3:
rpeaks = []
else:
b = b.astype('float')
rpeaks = []
previous = b[0]
for i in b[1:]:
if i - previous > v_300ms:
previous = i
rpeaks.append(np.argmax(signal[int(i):int(i + v_100ms)]) + i)
rpeaks = sorted(list(set(rpeaks)))
rpeaks = np.array(rpeaks, dtype='int')
return rpeaks
# =============================================================================
# Engzee Modified (2012)
# =============================================================================
def _ecg_findpeaks_engzee(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- C. Zeelenberg, A single scan algorithm for QRS detection and feature extraction, IEEE Comp. in Cardiology, vol. 6, pp. 37-42, 1979
- A. Lourenco, H. Silva, P. Leite, R. Lourenco and A. Fred, "Real Time Electrocardiogram Segmentation for Finger Based ECG Biometrics", BIOSIGNALS 2012, pp. 49-54, 2012.
"""
engzee_fake_delay = 0
diff = np.zeros(len(signal))
for i in range(4, len(diff)):
diff[i] = signal[i]-signal[i-4]
ci = [1, 4, 6, 4, 1]
low_pass = scipy.signal.lfilter(ci, 1, diff)
low_pass[:int(0.2 * sampling_rate)] = 0
ms200 = int(0.2 * sampling_rate)
ms1200 = int(1.2 * sampling_rate)
ms160 = int(0.16 * sampling_rate)
neg_threshold = int(0.01 * sampling_rate)
M = 0
M_list = []
neg_m = []
MM = []
M_slope = np.linspace(1.0, 0.6, ms1200-ms200)
QRS = []
r_peaks = []
counter = 0
thi_list = []
thi = False
thf_list = []
thf = False
for i in range(len(low_pass)):
# M
if i < 5 * sampling_rate:
M = 0.6 * np.max(low_pass[:i + 1])
MM.append(M)
if len(MM) > 5:
MM.pop(0)
elif QRS and i < QRS[-1] + ms200:
newM5 = 0.6 * np.max(low_pass[QRS[-1]:i])
if newM5 > 1.5 * MM[-1]:
newM5 = 1.1 * MM[-1]
elif QRS and i == QRS[-1] + ms200:
MM.append(newM5)
if len(MM) > 5:
MM.pop(0)
M = np.mean(MM)
elif QRS and i > QRS[-1] + ms200 and i < QRS[-1] + ms1200:
M = np.mean(MM) * M_slope[i-(QRS[-1] + ms200)]
elif QRS and i > QRS[-1] + ms1200:
M = 0.6 * np.mean(MM)
M_list.append(M)
neg_m.append(-M)
if not QRS and low_pass[i] > M:
QRS.append(i)
thi_list.append(i)
thi = True
elif QRS and i > QRS[-1] + ms200 and low_pass[i] > M:
QRS.append(i)
thi_list.append(i)
thi = True
if thi and i < thi_list[-1] + ms160:
if low_pass[i] < -M and low_pass[i-1] > -M:
# thf_list.append(i)
thf = True
if thf and low_pass[i] < -M:
thf_list.append(i)
counter += 1
elif low_pass[i] > -M and thf:
counter = 0
thi = False
thf = False
elif thi and i > thi_list[-1] + ms160:
counter = 0
thi = False
thf = False
if counter > neg_threshold:
unfiltered_section = signal[thi_list[-1] - int(0.01 * sampling_rate):i]
r_peaks.append(engzee_fake_delay + np.argmax(unfiltered_section) + thi_list[-1] - int(0.01 * sampling_rate))
counter = 0
thi = False
thf = False
r_peaks = np.array(r_peaks, dtype='int')
return r_peaks
# =============================================================================
# Stationary Wavelet Transform (SWT) - Kalidas and Tamil (2017)
# =============================================================================
def _ecg_findpeaks_kalidas(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- Vignesh Kalidas and Lakshman Tamil (2017). Real-time QRS detector using Stationary Wavelet Transform for Automated ECG Analysis. In: 2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE). Uses the Pan and Tompkins thresolding.
"""
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError("NeuroKit error: ecg_findpeaks(): the 'PyWavelets' "
"module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).")
swt_level = 3
padding = -1
for i in range(1000):
if (len(signal) + i) % 2 ** swt_level == 0:
padding = i
break
if padding > 0:
signal = np.pad(signal, (0, padding), 'edge')
elif padding == -1:
print("Padding greater than 1000 required\n")
swt_ecg = pywt.swt(signal, 'db3', level=swt_level)
swt_ecg = np.array(swt_ecg)
swt_ecg = swt_ecg[0, 1, :]
squared = swt_ecg * swt_ecg
f1 = 0.01/sampling_rate
f2 = 10/sampling_rate
b, a = scipy.signal.butter(3, [f1 * 2, f2 * 2], btype='bandpass')
filtered_squared = scipy.signal.lfilter(b, a, squared)
filt_peaks = _ecg_findpeaks_peakdetect(filtered_squared, sampling_rate)
filt_peaks = np.array(filt_peaks, dtype='int')
return filt_peaks
# =============================================================================
# Elgendi et al. (2010)
# =============================================================================
def _ecg_findpeaks_elgendi(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- Elgendi, Mohamed & Jonkman, Mirjam & De Boer, Friso. (2010). Frequency Bands Effects on QRS Detection. The 3rd International Conference on Bio-inspired Systems and Signal Processing (BIOSIGNALS2010). 428-431.
"""
window1 = int(0.12 * sampling_rate)
mwa_qrs = _ecg_findpeaks_MWA(abs(signal), window1)
window2 = int(0.6 * sampling_rate)
mwa_beat = _ecg_findpeaks_MWA(abs(signal), window2)
blocks = np.zeros(len(signal))
block_height = np.max(signal)
for i in range(len(mwa_qrs)):
if mwa_qrs[i] > mwa_beat[i]:
blocks[i] = block_height
else:
blocks[i] = 0
QRS = []
for i in range(1, len(blocks)):
if blocks[i-1] == 0 and blocks[i] == block_height:
start = i
elif blocks[i-1] == block_height and blocks[i] == 0:
end = i-1
if end-start > int(0.08 * sampling_rate):
detection = np.argmax(signal[start:end + 1]) + start
if QRS:
if detection-QRS[-1] > int(0.3 * sampling_rate):
QRS.append(detection)
else:
QRS.append(detection)
QRS = np.array(QRS, dtype='int')
return QRS
# =============================================================================
# Continuous Wavelet Transform (CWT) - Martinez et al. (2003)
# =============================================================================
#
def _ecg_findpeaks_WT(signal, sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError("NeuroKit error: ecg_delineator(): the 'PyWavelets' "
"module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).")
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, freqs = pywt.cwt(signal, scales, 'gaus1', sampling_period=1.0/sampling_rate)
# For wt of scale 2^4
signal_4 = cwtmatr[4, :]
epsilon_4 = np.sqrt(np.mean(np.square(signal_4)))
peaks_4, _ = scipy.signal.find_peaks(np.abs(signal_4), height=epsilon_4)
# For wt of scale 2^3
signal_3 = cwtmatr[3, :]
epsilon_3 = np.sqrt(np.mean(np.square(signal_3)))
peaks_3, _ = scipy.signal.find_peaks(np.abs(signal_3), height=epsilon_3)
# Keep only peaks_3 that are nearest to peaks_4
peaks_3_keep = np.zeros_like(peaks_4)
for i in range(len(peaks_4)):
peaks_distance = abs(peaks_4[i] - peaks_3)
peaks_3_keep[i] = peaks_3[np.argmin(peaks_distance)]
# For wt of scale 2^2
signal_2 = cwtmatr[2, :]
epsilon_2 = np.sqrt(np.mean(np.square(signal_2)))
peaks_2, _ = scipy.signal.find_peaks(np.abs(signal_2), height=epsilon_2)
# Keep only peaks_2 that are nearest to peaks_3
peaks_2_keep = np.zeros_like(peaks_4)
for i in range(len(peaks_4)):
peaks_distance = abs(peaks_3_keep[i] - peaks_2)
peaks_2_keep[i] = peaks_2[np.argmin(peaks_distance)]
# For wt of scale 2^1
signal_1 = cwtmatr[1, :]
epsilon_1 = np.sqrt(np.mean(np.square(signal_1)))
peaks_1, _ = scipy.signal.find_peaks(np.abs(signal_1), height=epsilon_1)
# Keep only peaks_1 that are nearest to peaks_2
peaks_1_keep = np.zeros_like(peaks_4)
for i in range(len(peaks_4)):
peaks_distance = abs(peaks_2_keep[i] - peaks_1)
peaks_1_keep[i] = peaks_1[np.argmin(peaks_distance)]
# Find R peaks
max_R_peak_dist = int(0.1 * sampling_rate)
rpeaks = []
for index_cur, index_next in zip(peaks_1_keep[:-1], peaks_1_keep[1:]):
correct_sign = signal_1[index_cur] < 0 and signal_1[index_next] > 0 # limit 1
near = (index_next - index_cur) < max_R_peak_dist # limit 2
if near and correct_sign:
rpeaks.append(signal_zerocrossings(
signal_1[index_cur:index_next])[0] + index_cur)
rpeaks = np.array(rpeaks, dtype='int')
return rpeaks
# =============================================================================
# ASI (FSM based 2020)
# =============================================================================
def _ecg_findpeaks_rodrigues(signal, sampling_rate=1000):
"""
Segmenter by Tiago Rodrigues, inspired by on Gutierrez-Rivas (2015) and Sadhukhan (2012).
References
----------
- Gutiérrez-Rivas, R., García, J. J., Marnane, W. P., & Hernández, A. (2015). Novel real-time low-complexity QRS complex detector based on adaptive thresholding. IEEE Sensors Journal, 15(10), 6036-6043.
- Sadhukhan, D., & Mitra, M. (2012). R-peak detection algorithm for ECG using double difference and RR interval processing. Procedia Technology, 4, 873-877.
"""
N = int(np.round(3 * sampling_rate/128))
Nd = N-1
Pth = (0.7 * sampling_rate) / 128+2.7
# Pth = 3, optimal for fs = 250 Hz
Rmin = 0.26
rpeaks = []
i = 1
tf = len(signal)
Ramptotal = 0
# Double derivative squared
diff_ecg = [signal[i] - signal[i - Nd] for i in range(Nd, len(signal))]
ddiff_ecg = [diff_ecg[i] - diff_ecg[i - 1] for i in range(1, len(diff_ecg))]
squar = np.square(ddiff_ecg)
# Integrate moving window
b = np.array(np.ones(N))
a = [1]
processed_ecg = scipy.signal.lfilter(b, a, squar)
# R-peak finder FSM
while i < tf - sampling_rate: # ignore last second of recording
# State 1: looking for maximum
tf1 = np.round(i + Rmin*sampling_rate)
Rpeakamp = 0
while i < tf1:
# Rpeak amplitude and position
if processed_ecg[i] > Rpeakamp:
Rpeakamp = processed_ecg[i]
rpeakpos = i + 1
i += 1
Ramptotal = (19 / 20) * Ramptotal + (1 / 20) * Rpeakamp
rpeaks.append(rpeakpos)
# State 2: waiting state
d = tf1 - rpeakpos
tf2 = i + np.round(0.2*2 - d)
while i <= tf2:
i += 1
# State 3: decreasing threshold
Thr = Ramptotal
while processed_ecg[i] < Thr:
Thr = Thr * np.exp(-Pth / sampling_rate)
i += 1
return rpeaks
# =============================================================================
# Utilities
# =============================================================================
def _ecg_findpeaks_MWA(signal, window_size):
"""
From https://github.com/berndporr/py-ecg-detectors/
"""
mwa = np.zeros(len(signal))
sums = np.cumsum(signal)
def get_mean(begin, end):
if begin == 0:
return sums[end - 1] / end
dif = sums[end - 1] - sums[begin - 1]
return dif / (end - begin)
for i in range(len(signal)):
if i < window_size:
section = signal[0:i]
else:
section = get_mean(i - window_size, i)
if i != 0:
mwa[i] = np.mean(section)
else:
mwa[i] = signal[i]
return mwa
def _ecg_findpeaks_peakdetect(detection, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
"""
min_distance = int(0.25 * sampling_rate)
signal_peaks = [0]
noise_peaks = []
SPKI = 0.0
NPKI = 0.0
threshold_I1 = 0.0
threshold_I2 = 0.0
RR_missed = 0
index = 0
indexes = []
missed_peaks = []
peaks = []
for i in range(len(detection)):
if i > 0 and i < len(detection) - 1:
if detection[i-1] < detection[i] and detection[i + 1] < detection[i]:
peak = i
peaks.append(i)
if detection[peak] > threshold_I1 and (peak - signal_peaks[-1]) > 0.3 * sampling_rate:
signal_peaks.append(peak)
indexes.append(index)
SPKI = 0.125 * detection[signal_peaks[-1]] + 0.875 * SPKI
if RR_missed != 0:
if signal_peaks[-1] - signal_peaks[-2] > RR_missed:
missed_section_peaks = peaks[indexes[-2] + 1:indexes[-1]]
missed_section_peaks2 = []
for missed_peak in missed_section_peaks:
if missed_peak - signal_peaks[-2] > min_distance and signal_peaks[-1] - missed_peak > min_distance and detection[missed_peak] > threshold_I2:
missed_section_peaks2.append(missed_peak)
if len(missed_section_peaks2) > 0:
missed_peak = missed_section_peaks2[np.argmax(detection[missed_section_peaks2])]
missed_peaks.append(missed_peak)
signal_peaks.append(signal_peaks[-1])
signal_peaks[-2] = missed_peak
else:
noise_peaks.append(peak)
NPKI = 0.125 * detection[noise_peaks[-1]] + 0.875 * NPKI
threshold_I1 = NPKI + 0.25 * (SPKI - NPKI)
threshold_I2 = 0.5 * threshold_I1
if len(signal_peaks) > 8:
RR = np.diff(signal_peaks[-9:])
RR_ave = int(np.mean(RR))
RR_missed = int(1.66 * RR_ave)
index = index + 1
signal_peaks.pop(0)
return signal_peaks
| 32.940283
| 262
| 0.540636
| 4,168
| 32,545
| 4.082534
| 0.15595
| 0.053597
| 0.005994
| 0.010343
| 0.367772
| 0.316232
| 0.276211
| 0.237659
| 0.202692
| 0.195757
| 0
| 0.055108
| 0.286864
| 32,545
| 987
| 263
| 32.973658
| 0.677884
| 0.296144
| 0
| 0.277281
| 0
| 0
| 0.036339
| 0
| 0
| 0
| 0
| 0.001013
| 0
| 1
| 0.026834
| false
| 0.019678
| 0.021467
| 0
| 0.076923
| 0.001789
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d96c1cfb476a1c31417724d0d6d9bf4095e9439
| 1,157
|
py
|
Python
|
tinynn/converter/operators/base.py
|
www516717402/TinyNeuralNetwork
|
23e7931b4377462fad94a9ab0651b6d9a346252d
|
[
"MIT"
] | 1
|
2022-01-11T06:40:13.000Z
|
2022-01-11T06:40:13.000Z
|
tinynn/converter/operators/base.py
|
kingkie/TinyNeuralNetwork
|
9b4313bbe6fb46d602681b69799e4725eef4d71b
|
[
"MIT"
] | null | null | null |
tinynn/converter/operators/base.py
|
kingkie/TinyNeuralNetwork
|
9b4313bbe6fb46d602681b69799e4725eef4d71b
|
[
"MIT"
] | 1
|
2021-12-20T07:21:37.000Z
|
2021-12-20T07:21:37.000Z
|
import inspect
import sys
from enum import IntEnum
from tflite.ActivationFunctionType import ActivationFunctionType
from tflite.BuiltinOperator import BuiltinOperator
# In Python 3.6, we cannot make ExtendedOperator derive from IntEnum
if sys.version_info >= (3, 7):
bases = (IntEnum, )
else:
bases = ()
class _ExtendedOperatorBase(BuiltinOperator, *bases):
INPUT_NODE = -1
OUTPUT_NODE = -2
CONSTANT_NODE = -3
BATCH_NORM = -10
GENERIC_CONV = -11
GENERIC_DECONV = -12
def type_name(self):
return self.name.replace('_NODE', '')
# In Python 3.6, the elements in the parent class are not collected in IntEnum,
# so we have to do that dynamically.
if sys.version_info >= (3, 7):
ExtendedOperator = _ExtendedOperatorBase
else:
ExtendedOperator = IntEnum('ExtendedOperator', dict(
filter(lambda x: not x[0].startswith('__'), inspect.getmembers(_ExtendedOperatorBase))))
FUSE_ACTIVATION_MAP = {BuiltinOperator.RELU: ActivationFunctionType.RELU,
BuiltinOperator.RELU6: ActivationFunctionType.RELU6,
BuiltinOperator.TANH: ActivationFunctionType.TANH}
| 28.925
| 96
| 0.713915
| 130
| 1,157
| 6.223077
| 0.546154
| 0.024722
| 0.02225
| 0.024722
| 0.044499
| 0.044499
| 0
| 0
| 0
| 0
| 0
| 0.021598
| 0.199654
| 1,157
| 39
| 97
| 29.666667
| 0.852052
| 0.15471
| 0
| 0.153846
| 0
| 0
| 0.023614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.192308
| 0.038462
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d986eb3521f1a36cc7b07b20157b53df24adc51
| 852
|
py
|
Python
|
ihome/apps/homes/urls.py
|
Noah-Smith-wgp/rentinghouse
|
22ba71aa8b3b0c290b8c01cd2f4dd14bca81d3d3
|
[
"MIT"
] | null | null | null |
ihome/apps/homes/urls.py
|
Noah-Smith-wgp/rentinghouse
|
22ba71aa8b3b0c290b8c01cd2f4dd14bca81d3d3
|
[
"MIT"
] | null | null | null |
ihome/apps/homes/urls.py
|
Noah-Smith-wgp/rentinghouse
|
22ba71aa8b3b0c290b8c01cd2f4dd14bca81d3d3
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from apps.homes import views
urlpatterns = [
url(r'^areas/$', views.AreaAPIView.as_view()),
# url(r'^houses/$', views.HouseAPIView.as_view()),
# 我的房屋列表
url(r'^user/houses/$', views.HouseListView.as_view()),
# 首页房屋模块
url(r'^houses/index/$', views.HouseIndexView.as_view()),
# 房屋详情页面
url(r'^houses/(?P<house_id>\d+)/$', views.HouseDetailView.as_view()),
]
router = DefaultRouter()
# # 首页房屋推荐
# router.register(r'houses/index', views.HouseIndexViewSet, basename='index')
# urlpatterns += router.urls
# 发布房源 房屋数据搜索
router.register(r'houses', views.HouseAPIView, basename='houses')
urlpatterns += router.urls
# 上传房源图片
router.register(r'houses/(?P<house_id>\d+)/images', views.HouseImageView, basename='images')
urlpatterns += router.urls
| 29.37931
| 92
| 0.70892
| 107
| 852
| 5.570093
| 0.401869
| 0.07047
| 0.050336
| 0.105705
| 0.053691
| 0.053691
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118545
| 852
| 28
| 93
| 30.428571
| 0.793609
| 0.233568
| 0
| 0.142857
| 0
| 0
| 0.176287
| 0.090484
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d9999a24bad3d878ecc89ba34c9037a6d5b672e
| 646
|
py
|
Python
|
encryption/validation/ssl_client.py
|
TheConner/intl-iot
|
e7f0d7e96392acec900f29eb95cbbf5cb8d8db66
|
[
"Apache-2.0"
] | 46
|
2019-09-19T05:03:56.000Z
|
2022-03-07T05:55:12.000Z
|
encryption/validation/ssl_client.py
|
dng24/intl-iot
|
84d46012afce5c7473d0cc9b82dc9e3aef069bbf
|
[
"Apache-2.0"
] | null | null | null |
encryption/validation/ssl_client.py
|
dng24/intl-iot
|
84d46012afce5c7473d0cc9b82dc9e3aef069bbf
|
[
"Apache-2.0"
] | 23
|
2019-09-18T02:04:59.000Z
|
2022-03-07T05:55:13.000Z
|
import socket
import ssl
import sys
hostname = '127.0.0.1'
if len(sys.argv) < 2:
exit(0)
inputfile = sys.argv[1]
print('\tRead file %s' % inputfile)
# msg = b"HEAD / HTTP /1.0\r\nHost: linuxfr.org\r\n\r\n"
msg = open(inputfile).read()
msg = bytes(msg.encode())
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations('rootCA.pem')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
ssock.connect((hostname, 8443))
# cert = ssock.getpeercert()
ssock.sendall(msg)
print('\tSent %s .+' % msg[:10])
| 26.916667
| 70
| 0.676471
| 99
| 646
| 4.333333
| 0.575758
| 0.032634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033395
| 0.165635
| 646
| 23
| 71
| 28.086957
| 0.762523
| 0.125387
| 0
| 0
| 0
| 0
| 0.080071
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6d9d4c3a43bd4e12042fd3d32b8a804be12b5ec6
| 429
|
py
|
Python
|
solutions/1209_remove_all_adjacent_duplicates_in_string_ii.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/1209_remove_all_adjacent_duplicates_in_string_ii.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/1209_remove_all_adjacent_duplicates_in_string_ii.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
class Solution:
def removeDuplicates(self, s: str, k: int) -> str:
"""Stack.
Running time: O(n) where n is the length of s.
"""
st = [['#', 0]]
for c in s:
if st[-1][0] == c:
st[-1][1] += 1
if st[-1][1] == k:
st.pop()
else:
st.append([c, 1])
return ''.join([i[0] * i[1] for i in st])
| 26.8125
| 54
| 0.361305
| 59
| 429
| 2.627119
| 0.542373
| 0.058065
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.461538
| 429
| 15
| 55
| 28.6
| 0.623377
| 0.125874
| 0
| 0
| 0
| 0
| 0.002841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6da36de83dd56e3ca84e1de8b7ae22701073bf6d
| 528
|
py
|
Python
|
parte2/alternativeq2.py
|
ronaldbrito/PDS
|
58c8f9737e4cc5872a27e7b778a43def5e3e11f4
|
[
"MIT"
] | 1
|
2019-03-16T01:49:11.000Z
|
2019-03-16T01:49:11.000Z
|
parte2/alternativeq2.py
|
heliomeiralins/pds
|
58c8f9737e4cc5872a27e7b778a43def5e3e11f4
|
[
"MIT"
] | null | null | null |
parte2/alternativeq2.py
|
heliomeiralins/pds
|
58c8f9737e4cc5872a27e7b778a43def5e3e11f4
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.misc import imread, imsave
from scipy import ndimage
img = imread('doc1.bmp')
def f(x):
ret = x * 255 / 150
if ret > 255:
ret = 255
return ret
F = np.vectorize(f)
treated_img = F(img)
imsave('treated_doc.bmp', treated_img)
mask = treated_img < treated_img.mean()
label_im, nb_labels = ndimage.label(mask)
sizes = ndimage.sum(mask, label_im, range(nb_labels + 1))
print(nb_labels)
print(sum(sizes > 1))
print(sum(sizes > 2))
print(sum(sizes > 5))
print(sum(sizes > 10))
| 17.6
| 57
| 0.676136
| 88
| 528
| 3.943182
| 0.431818
| 0.115274
| 0.149856
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044393
| 0.189394
| 528
| 29
| 58
| 18.206897
| 0.766355
| 0
| 0
| 0
| 0
| 0
| 0.043561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.15
| 0
| 0.25
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6da7a648349b63e6ebd5bddae98e78d24000ce56
| 2,617
|
py
|
Python
|
module2-sql-for-analysis/insert_rpg_thief.py
|
KristineYW/DS-Unit-3-Sprint-2-SQL-and-Databases
|
4a690cd8e651161296d7aec2af86a56c499d6801
|
[
"MIT"
] | null | null | null |
module2-sql-for-analysis/insert_rpg_thief.py
|
KristineYW/DS-Unit-3-Sprint-2-SQL-and-Databases
|
4a690cd8e651161296d7aec2af86a56c499d6801
|
[
"MIT"
] | null | null | null |
module2-sql-for-analysis/insert_rpg_thief.py
|
KristineYW/DS-Unit-3-Sprint-2-SQL-and-Databases
|
4a690cd8e651161296d7aec2af86a56c499d6801
|
[
"MIT"
] | null | null | null |
import os
from dotenv import load_dotenv
import sqlite3
import psycopg2
from psycopg2.extras import execute_values
load_dotenv() # looks inside the .env file for some env vars
# passes env var values to python var
DB_HOST = os.getenv("DB_HOST", default="OOPS")
DB_NAME = os.getenv("DB_NAME", default="OOPS")
DB_USER = os.getenv("DB_USER", default="OOPS")
DB_PASSWORD = os.getenv("DB_PASSWORD", default="OOPS")
# what is the filepath to connect to our sqlite database?
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "module1-introduction-to-sql", "rpg_db.sqlite3")
class SqliteService_thief():
def __init__(self, db_filepath=DB_FILEPATH):
self.connection = sqlite3.connect(db_filepath)
self.cursor = self.connection.cursor()
def fetch_characters_thief(self):
return self.cursor.execute("SELECT * FROM charactercreator_thief;").fetchall()
class ElephantSQLService_thief():
def __init__(self):
self.connection = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)
self.cursor = self.connection.cursor()
def create_characters_thief_table(self):
create_query = """
DROP TABLE IF EXISTS characters_thief; -- allows this to be run idempotently, avoids psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "characters_thief_pkey" DETAIL: Key (character_id)=(1) already exists.
CREATE TABLE IF NOT EXISTS characters_thief (
character_ptr_id INT,
is_sneaking INT,
energy INT
);
"""
print(create_query)
self.cursor.execute(create_query)
self.connection.commit()
def insert_characters_thief(self, characters_thief):
"""
Param characters_thief needs to be a list of tuples, each representing a row to insert (each should have each column)
"""
insertion_query = """
INSERT INTO characters_thief (character_ptr_id, is_sneaking, energy)
VALUES %s
"""
execute_values(self.cursor, insertion_query, characters_thief)
self.connection.commit()
if __name__ == "__main__":
#
# EXTRACT (AND MAYBE TRANSFORM IF NECESSARY)
#
sqlite_service = SqliteService_thief()
characters_thief = sqlite_service.fetch_characters_thief()
print(type(characters_thief), len(characters_thief))
print(type(characters_thief[0]), characters_thief[0])
#
# LOAD
#
pg_service = ElephantSQLService_thief()
pg_service.create_characters_thief_table()
pg_service.insert_characters_thief(characters_thief)
| 42.209677
| 244
| 0.708445
| 328
| 2,617
| 5.375
| 0.365854
| 0.161656
| 0.022689
| 0.018151
| 0.114577
| 0.081679
| 0
| 0
| 0
| 0
| 0
| 0.005231
| 0.196408
| 2,617
| 62
| 245
| 42.209677
| 0.833096
| 0.115781
| 0
| 0.125
| 0
| 0.020833
| 0.286027
| 0.045992
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0.041667
| 0.104167
| 0.020833
| 0.270833
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dabb2d9a3beda1b3745a3582f1367443e6ae076
| 4,626
|
py
|
Python
|
src/data_prep_uci.py
|
akumesi48/hyper-genetic
|
6e1ec16b31bb2259d4a325e08779d5668750a635
|
[
"MIT"
] | null | null | null |
src/data_prep_uci.py
|
akumesi48/hyper-genetic
|
6e1ec16b31bb2259d4a325e08779d5668750a635
|
[
"MIT"
] | null | null | null |
src/data_prep_uci.py
|
akumesi48/hyper-genetic
|
6e1ec16b31bb2259d4a325e08779d5668750a635
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, KFold
def cv_index(n_fold, feature, label):
skf = KFold(n_fold, shuffle=True, random_state=7840)
index_list = []
for i, j in skf.split(feature, label):
index_list.append((i, j))
return index_list
def data_selector(data_name):
if data_name == 'cmc':
return x_train_cmc, x_test_cmc, y_train_cmc, y_test_cmc, index_cmc
elif data_name == 'setap':
return x_train_setap, x_test_setap, y_train_setap, y_test_setap, index_setap
elif data_name == 'audit':
return x_train_audit, x_test_audit, y_train_audit, y_test_audit, index_audit
elif data_name == 'titanic':
return x_train_tt, x_test_tt, y_train_tt, y_test_tt, index_tt
elif data_name == 'dota':
return x_train_dota, x_test_dota, y_train_dota, y_test_dota, index_dota
no_of_folds = 3
# Dataset cmc
data_cmc = pd.read_csv("data/cmc.data", header=None)
data_cmc[9] = np.where(data_cmc[9] == 1, 0, 1)
data_cmc_label = data_cmc.pop(9)
x_train_cmc, x_test_cmc, y_train_cmc, y_test_cmc = train_test_split(data_cmc,
data_cmc_label,
random_state=7840,
test_size=0.25)
index_cmc = cv_index(no_of_folds, x_train_cmc, y_train_cmc)
# Dataset SETAP
data_setap = pd.read_csv("data/setap.csv")
data_setap['label'] = np.where(data_setap['label'] == 'A', 0, 1)
data_setap_label = data_setap.pop('label')
x_train_setap, x_test_setap, y_train_setap, y_test_setap = train_test_split(data_setap,
data_setap_label,
random_state=7840,
test_size=0.25)
index_setap = cv_index(no_of_folds, x_train_setap, y_train_setap)
# Dataset audit
data_audit = pd.read_csv("data/audit_risk.csv")
data_audit['LOCATION_ID'] = pd.to_numeric(data_audit['LOCATION_ID'], errors='coerce')
data_audit['LOCATION_ID'] = data_audit['LOCATION_ID'].fillna(data_audit['LOCATION_ID'].mode()[0])
data_audit['Money_Value'] = data_audit['Money_Value'].fillna(data_audit['Money_Value'].mean())
data_audit_label = data_audit.pop('Risk')
x_train_audit, x_test_audit, y_train_audit, y_test_audit = train_test_split(data_audit,
data_audit_label,
random_state=7840,
test_size=0.25,)
index_audit = cv_index(no_of_folds, x_train_audit, y_train_audit)
# Dataset titanic
data_tt = pd.read_csv("data/titanic_train.csv")
data_tt['Age'] = data_tt['Age'].fillna(data_tt['Age'].mean())
data_tt['Embarked'] = data_tt['Embarked'].fillna(data_tt['Embarked'].mode()[0])
data_tt['Pclass'] = data_tt['Pclass'].apply(str)
for col in data_tt.dtypes[data_tt.dtypes == 'object'].index:
for_dummy = data_tt.pop(col)
data_tt = pd.concat([data_tt, pd.get_dummies(for_dummy, prefix=col)], axis=1)
data_tt_labels = data_tt.pop('Survived')
x_train_tt, x_test_tt, y_train_tt, y_test_tt = train_test_split(data_tt,
data_tt_labels,
random_state=7840,
test_size=0.25)
index_tt = cv_index(no_of_folds, x_train_tt, y_train_tt)
# Dataset DotA2
x_train_dota = pd.read_csv("data/dota2Train.csv", header=None)
x_train_dota[0] = np.where(x_train_dota[0] == 1, 1, 0)
y_train_dota = x_train_dota.pop(0)
x_test_dota = pd.read_csv("data/dota2Test.csv", header=None)
x_test_dota[0] = np.where(x_test_dota[0] == 1, 1, 0)
y_test_dota = x_test_dota.pop(0)
index_dota = cv_index(no_of_folds, x_train_dota, y_train_dota)
# for train_index, test_index in skf.split(x_train, y_train):
# train_feature, test_feature = x_train.iloc[train_index], x_train.iloc[test_index]
# train_label, test_label = y_train.iloc[train_index], y_train.iloc[test_index]
# print(train_gbm(train_feature, train_label, test_feature, test_label))
# skf = KFold(5)
# train_index = []
# test_index = []
# index_list = []
# for i, j in skf.split(x_train_cmc, y_train_cmc):
# index_list.append((i, j))
| 47.690722
| 97
| 0.61284
| 676
| 4,626
| 3.786982
| 0.147929
| 0.051563
| 0.023438
| 0.030469
| 0.317578
| 0.250391
| 0.231641
| 0.188672
| 0.157813
| 0.115625
| 0
| 0.018007
| 0.279723
| 4,626
| 96
| 98
| 48.1875
| 0.7503
| 0.111327
| 0
| 0.102941
| 0
| 0
| 0.073748
| 0.005372
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.058824
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dad2a388f6001f81a7db3ec98fd61ac8d241fec
| 935
|
py
|
Python
|
ncdoublescrape/__main__.py
|
hancush/ncdoublescrape
|
ea64277514ddff04e634bb464dd5ea6bf05226ae
|
[
"BSD-3-Clause"
] | null | null | null |
ncdoublescrape/__main__.py
|
hancush/ncdoublescrape
|
ea64277514ddff04e634bb464dd5ea6bf05226ae
|
[
"BSD-3-Clause"
] | null | null | null |
ncdoublescrape/__main__.py
|
hancush/ncdoublescrape
|
ea64277514ddff04e634bb464dd5ea6bf05226ae
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import importlib
import logging
import sys
logger = logging.getLogger()
COMMAND_MODULES = (
'ncdoublescrape.scrape',
)
def main():
parser = argparse.ArgumentParser('ncds', description='A janky NCAA scraper')
subparsers = parser.add_subparsers(dest='subcommand')
subcommands = {}
for module in COMMAND_MODULES:
try:
command = importlib.import_module(module).Command(subparsers)
except ImportError as e:
logger.error('exception "%s" prevented loading of %s module', e, module)
else:
subcommands[command.name] = command
args, other = parser.parse_known_args()
if not args.subcommand:
parser.print_help()
else:
try:
subcommands[args.subcommand].handle(args, other)
except Exception as e:
logger.critical(str(e))
sys.exit(1)
if __name__ == '__main__':
main()
| 23.375
| 84
| 0.637433
| 102
| 935
| 5.696078
| 0.539216
| 0.051635
| 0.030981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001447
| 0.260963
| 935
| 40
| 85
| 23.375
| 0.839363
| 0
| 0
| 0.133333
| 0
| 0
| 0.115385
| 0.022436
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.2
| 0
| 0.233333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6daefd5d0ce54a9298c815543a7ce9308e437d8f
| 5,037
|
py
|
Python
|
src/lambda_function.py
|
sd11/react-aws-s3-rekognition
|
f37ea4ef0242f8c650380ab0c060e0bddb4ff432
|
[
"Unlicense"
] | null | null | null |
src/lambda_function.py
|
sd11/react-aws-s3-rekognition
|
f37ea4ef0242f8c650380ab0c060e0bddb4ff432
|
[
"Unlicense"
] | null | null | null |
src/lambda_function.py
|
sd11/react-aws-s3-rekognition
|
f37ea4ef0242f8c650380ab0c060e0bddb4ff432
|
[
"Unlicense"
] | null | null | null |
from __future__ import print_function
import boto3
from decimal import Decimal
import json
import urllib
from botocore.vendored import requests
print('Loading function')
rekognition = boto3.client('rekognition')
s3 = boto3.resource("s3")
# --------------- Helper Functions to call Rekognition APIs ------------------
def detect_faces(bucket, key):
response = rekognition.detect_faces(Image={"S3Object": {"Bucket": bucket, "Name": key}})
return response
def detect_labels(bucket, key):
response = rekognition.detect_labels(Image={"S3Object": {"Bucket": bucket, "Name": key}})
# Sample code to write response to DynamoDB table 'MyTable' with 'PK' as Primary Key.
# Note: role used for executing this Lambda function should have write access to the table.
#table = boto3.resource('dynamodb').Table('MyTable')
#labels = [{'Confidence': Decimal(str(label_prediction['Confidence'])), 'Name': label_prediction['Name']} for label_prediction in response['Labels']]
#table.put_item(Item={'PK': key, 'Labels': labels})
return response
def index_faces(bucket, key):
# Note: Collection has to be created upfront. Use CreateCollection API to create a collecion.
#rekognition.create_collection(CollectionId='BLUEPRINT_COLLECTION')
response = rekognition.index_faces(Image={"S3Object": {"Bucket": bucket, "Name": key}}, CollectionId="BLUEPRINT_COLLECTION")
return response
def find_recipes(ingredients):
payload = {
'ingredients': ingredients,
'number': 2,
'ranking': '1',
'apiKey': '8bce36150747496f98b2c81860545458'
}
recipes = requests.get('https://api.spoonacular.com/recipes/findByIngredients', params=payload)
return recipes.json()
# --------------- Main handler ------------------
def lambda_handler(event, context):
'''Demonstrates S3 trigger that uses
Rekognition APIs to detect faces, labels and index faces in S3 Object.
'''
print("Received event: " + json.dumps(event, indent=2))
# Get the object from the event
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key'].encode('utf8'))
try:
# Calls rekognition DetectFaces API to detect faces in S3 object
#response = detect_faces(bucket, key)
# Calls rekognition DetectLabels API to detect labels in S3 object
response = detect_labels(bucket, key)
# Calls rekognition IndexFaces API to detect faces in S3 object and index faces into specified collection
#response = index_faces(bucket, key)
# Print response to console.
print('Detected labels for ' + key)
print()
ingredients = ""
for label in response['Labels']:
encodedName = label['Name'].encode('utf-8')
if len(ingredients):
ingredients = ingredients + ", " + encodedName
else:
ingredients = encodedName
# print ("Label: " + label['Name'])
# print ("Confidence: " + str(label['Confidence']))
# print ("Instances:")
#for instance in label['Instances']:
# print (" Bounding box")
# print (" Top: " + str(instance['BoundingBox']['Top']))
# print (" Left: " + str(instance['BoundingBox']['Left']))
# print (" Width: " + str(instance['BoundingBox']['Width']))
# print (" Height: " + str(instance['BoundingBox']['Height']))
# print (" Confidence: " + str(instance['Confidence']))
# print()
# print ("Parents:")
# for parent in label['Parents']:
# print (" " + parent['Name'])
# print ("----------")
# print ()
recipes = find_recipes(ingredients)
#return recipes
#print(ingredients)
#print(recipes)
recipeResponse = []
for recipe in recipes:
recipeIngredients = []
for usedIngredient in recipe['usedIngredients']:
recipeIngredients.append({
'name': usedIngredient['name'],
'servingSize': str(usedIngredient['amount']) + ' ' + usedIngredient['unit']
})
recipeResponse.append({
'title': recipe['title'],
'image': recipe['image'],
'ingredients': recipeIngredients
})
responseData = { 'ingredients': ingredients, 'recipes': recipeResponse }
if responseData:
print(s3)
obj = s3.Object('groupneuralnetworkrecipebucket1','recipes.json')
obj.put(Body=json.dumps(responseData))
return responseData
except Exception as e:
print(e)
print("Error processing object {} from bucket {}. ".format(key, bucket) +
"Make sure your object and bucket exist and your bucket is in the same region as this function.")
raise e
| 37.036765
| 153
| 0.596188
| 499
| 5,037
| 5.967936
| 0.328657
| 0.022163
| 0.018805
| 0.025185
| 0.08865
| 0.053056
| 0.04231
| 0
| 0
| 0
| 0
| 0.014065
| 0.266031
| 5,037
| 135
| 154
| 37.311111
| 0.791453
| 0.364304
| 0
| 0.074627
| 0
| 0
| 0.186172
| 0.019981
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0
| 0.089552
| 0
| 0.238806
| 0.119403
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dafe2f9f75624f4e6bac8d36ece57fee0f45bc2
| 3,836
|
py
|
Python
|
main_server.py
|
tenzindayoe/example
|
edbee1fd1b6cbb55f6b02f82f972c3da46a4dd89
|
[
"MIT"
] | null | null | null |
main_server.py
|
tenzindayoe/example
|
edbee1fd1b6cbb55f6b02f82f972c3da46a4dd89
|
[
"MIT"
] | null | null | null |
main_server.py
|
tenzindayoe/example
|
edbee1fd1b6cbb55f6b02f82f972c3da46a4dd89
|
[
"MIT"
] | null | null | null |
import socket
import sqlite3
def Main():
port = 4000
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
print(socket.gethostbyaddr(socket.gethostname()))
s.bind((socket.gethostbyaddr(),port))
print("Server started")
while True:
reg_db = sqlite3.connect("reg_db.db")
reg_db_cur = reg_db.cursor()
unp_db = sqlite3.connect("unp_db.db")
unp_db_cur = unp_db.cursor()
'''first quadrant table : f_q_table
second quadrant table : s_q_table
third quadrant table : t_q_table
fourth quadrant table : fourth_q_table'''
data, addr = s.recvfrom(1024)
data = data.decode('utf-8')
data = eval(data)
if data[0] == "acc":
data.remove(data[0])
username = data[0].lower()
email = data[1].lower()
email = email.replace("@","_")
password = data[2].lower()
success = None
unp_db_cur.execute("SELECT email FROM accounts where email = "+"'"+email+"'")
res = unp_db_cur.fetchall()
if len(res) == 0:
try:
unp_db_cur.execute("INSERT INTO accounts values("+"'"+email+"',"+" '"+username+"', "+"'"+password+"')")
unp_db.commit()
unp_db_cur.execute("SELECT * FROM accounts")
print(unp_db_cur.fetchall())
s.sendto("ACCOUNT CREATED".encode("utf-8"),addr)
except:
print("DATABASE REGISTRATION ERROR")
elif len(res) ==1:
print("email associated with account already exist")
elif data[1] == "ch_acc_cred":
email_ch = data[0][0].replace("@","_")
name_ch = data[0][1]
pw_ch = data[0][2]
unp_db_cur.execute("SELECT * FROM accounts WHERE email ="+"'"+email_ch+"'")
ch_res = unp_db_cur.fetchall()
if ch_res == [] :
print("no accounts associated with the email")
s.sendto("error_lia".encode("utf-8"), addr)
elif ch_res[0][2]== pw_ch:
print("Login success")
s.sendto(str(["verified",ch_res[0][1]]).encode("utf-8"),addr)
elif data[0] == "tr_reg":
lat = data[1][0]
lon = data[1][1]
if lon >= 0 and lat >= 0:
print("INSERT INTO f_q_table values('"+str(lon)+"', '"+str(lat)+")")
reg_db_cur.execute("INSERT INTO f_q_table values('"+str(lon)+"', '"+str(lat)+"')")
else:
print("message from : ", addr)
print("message : ",data)
temp = data
# the data from the client is of the form [[lo,lo*],[la,la*]]
lo = temp[0][0]
lo_p = temp[0][1]
la = temp[1][0]
la_p = temp[1][1]
text = ""
if lo >= 0 and la >=0:
text = "FIRST QUADRANT"
print(text)
reg_db_cur.execute("SELECT DISTINCT * FROM f_q_table WHERE lon > %s AND lon < %s AND lat > %s AND lat < %s"%(lo,lo_p,la,la_p))
coordinates = reg_db_cur.fetchall()
coordinates = str(coordinates)
print("sending : ", coordinates)
s.sendto(coordinates.encode('utf-8'), addr)
elif lo_p < 0 and la >= 0:
text = "SECOND QUADRANT"
print(text)
elif lo_p < 0 and la_p < 0:
text = "THIRD QUADRANT"
print(text)
elif lo >= 0 and la_p < 0:
text = "FOURTH QUADRANT"
print(text)
else:
text = "unidentified... but will find tomorrow"
if __name__ == "__main__":
Main().run()
| 29.282443
| 142
| 0.489572
| 462
| 3,836
| 3.896104
| 0.251082
| 0.033333
| 0.035556
| 0.033333
| 0.236111
| 0.123333
| 0.075556
| 0.038889
| 0.038889
| 0.038889
| 0
| 0.023583
| 0.369917
| 3,836
| 130
| 143
| 29.507692
| 0.721142
| 0.015381
| 0
| 0.071429
| 0
| 0.011905
| 0.186544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011905
| false
| 0.02381
| 0.02381
| 0
| 0.035714
| 0.178571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6db40aaa8cd6b1e406d9fcd14ef25634a3d1ada0
| 2,274
|
py
|
Python
|
db/division.py
|
leaffan/pynhldb
|
a0cdd56f0c21b866bfe62aa10b3dd205a9ec0ff1
|
[
"MIT"
] | 3
|
2017-02-01T15:37:23.000Z
|
2017-08-31T20:41:46.000Z
|
db/division.py
|
leaffan/pynhldb
|
a0cdd56f0c21b866bfe62aa10b3dd205a9ec0ff1
|
[
"MIT"
] | 41
|
2017-09-13T02:13:21.000Z
|
2018-11-07T03:29:39.000Z
|
db/division.py
|
leaffan/pynhldb
|
a0cdd56f0c21b866bfe62aa10b3dd205a9ec0ff1
|
[
"MIT"
] | 1
|
2017-03-09T14:58:39.000Z
|
2017-03-09T14:58:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from .common import Base, session_scope
from .team import Team
class Division(Base):
__tablename__ = 'divisions'
__autoload__ = True
HUMAN_READABLE = 'division'
def __init__(self, name, season, teams, conference=None):
self.division_name = name
self.season = season
self.teams = list()
for t in teams:
self.teams.append(t.team_id)
self.conference = conference
@classmethod
def get_divisions_and_teams(cls, season=None):
if season is None:
now = datetime.datetime.now()
season = now.year - 1 if now.month <= 6 else now.year
division_dict = dict()
with session_scope() as session:
divs = session.query(Division).filter(
Division.season == season).all()
for d in divs:
teams = list()
for team_id in d.teams:
team = Team.find_by_id(team_id)
teams.append(team)
division_dict[d.division_name] = teams
return division_dict
def __str__(self):
if self.conference:
base_information_str = "%s Division (%s Conference) %s:" % (
self.division_name, self.conference, self.season)
else:
base_information_str = "%s Division %s:" % (
self.division_name, self.season)
team_information_str = "\n\t+ ".join(
sorted([Team.find_by_id(team_id).name for team_id in self.teams]))
return "\n\t+ ".join((base_information_str, team_information_str))
def __gt__(self, other):
if None in (self.conference, other.conference):
return self.division_name > other.division_name
else:
return (
self.conference, self.division_name
) > (
other.conference, other.division_name)
def __lt__(self, other):
if None in (self.conference, other.conference):
return self.division_name < other.division_name
else:
return (
self.conference, self.division_name
) < (
other.conference, other.division_name)
| 30.32
| 78
| 0.575198
| 259
| 2,274
| 4.803089
| 0.266409
| 0.115756
| 0.090032
| 0.067524
| 0.359325
| 0.326367
| 0.252412
| 0.252412
| 0.252412
| 0.252412
| 0
| 0.001963
| 0.328056
| 2,274
| 74
| 79
| 30.72973
| 0.812173
| 0.01847
| 0
| 0.196429
| 0
| 0
| 0.033632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089286
| false
| 0
| 0.053571
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6db421aa4d6562b0233d7c1b87bdca893ef23405
| 1,338
|
py
|
Python
|
tests/test_config.py
|
regro/runthis-server
|
26d6551560bd6ddabdb9b360ecd327460dfd779a
|
[
"BSD-3-Clause"
] | 2
|
2019-11-13T23:19:13.000Z
|
2019-11-15T21:01:51.000Z
|
tests/test_config.py
|
regro/runthis-server
|
26d6551560bd6ddabdb9b360ecd327460dfd779a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_config.py
|
regro/runthis-server
|
26d6551560bd6ddabdb9b360ecd327460dfd779a
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from ruamel import yaml
from runthis.server.config import Config, get_config_from_yaml
@pytest.fixture
def config_obj(tmpdir):
return Config(
tty_server="ttyd",
command="xonsh",
docker=False,
docker_image="myimage",
keyfile="/path/to/privkey.pem",
)
def test_fields(config_obj):
assert config_obj.tty_server == "ttyd"
assert config_obj.command == "xonsh"
assert not config_obj.docker
assert config_obj.docker_image == "myimage"
assert config_obj.keyfile == "/path/to/privkey.pem"
DICT_CONFIG_CONTENT = dict(
tty_server="tty-server",
command="myshell",
docker=True,
docker_image="img",
host="8.8.8.8",
certfile="/path/to/cert.pem",
)
@pytest.mark.parametrize(
"config_content", [DICT_CONFIG_CONTENT, {"runthis": DICT_CONFIG_CONTENT}]
)
def test_populate_config_by_yaml(config_content, tmpdir):
yaml_path = tmpdir.join("TEST.yaml")
yaml_path.write(yaml.dump(config_content))
config_obj = get_config_from_yaml(str(yaml_path))
assert config_obj.tty_server == "tty-server"
assert config_obj.command == "myshell"
assert config_obj.docker
assert config_obj.docker_image == "img"
assert config_obj.host == "8.8.8.8"
assert config_obj.certfile == "/path/to/cert.pem"
| 26.76
| 77
| 0.684604
| 180
| 1,338
| 4.838889
| 0.266667
| 0.144661
| 0.172216
| 0.072331
| 0.268657
| 0.094145
| 0.094145
| 0.094145
| 0
| 0
| 0
| 0.007401
| 0.192078
| 1,338
| 49
| 78
| 27.306122
| 0.798335
| 0
| 0
| 0
| 0
| 0
| 0.142003
| 0
| 0
| 0
| 0
| 0
| 0.282051
| 1
| 0.076923
| false
| 0
| 0.076923
| 0.025641
| 0.179487
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6db4535a87906f105783cb4e0f22471fe703aef0
| 290
|
py
|
Python
|
src/constants.py
|
halilyaman/UlasimdaYapayZekaYarismasi
|
e9f024454470ad6f40653583f3d7f24cdd4f4fd9
|
[
"MIT"
] | 1
|
2021-09-23T22:34:12.000Z
|
2021-09-23T22:34:12.000Z
|
src/constants.py
|
halilyaman/UlasimdaYapayZekaYarismasi
|
e9f024454470ad6f40653583f3d7f24cdd4f4fd9
|
[
"MIT"
] | null | null | null |
src/constants.py
|
halilyaman/UlasimdaYapayZekaYarismasi
|
e9f024454470ad6f40653583f3d7f24cdd4f4fd9
|
[
"MIT"
] | null | null | null |
# DISCLAIMER TO CONTEST TEAMS : DO NOT MAKE CHANGES IN THIS FILE.
classes = {
"Tasit": 0,
"Insan": 1,
"UAP": 2,
"UAI": 3,
}
landing_statuses = {
"Inilebilir": "1",
"Inilemez": "0",
"Inis Alani Degil": "-1"
}
base_url = "http://192.168.1.10:3000"
| 19.333333
| 67
| 0.527586
| 38
| 290
| 3.973684
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097087
| 0.289655
| 290
| 14
| 68
| 20.714286
| 0.635922
| 0.22069
| 0
| 0
| 0
| 0
| 0.371429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6db6887534c339671321ea2ad6c3cae9fe067123
| 2,345
|
py
|
Python
|
setup.py
|
Ezbob/dgDynamic
|
394de1c138c1517c4cdfead879c43db189752d92
|
[
"MIT"
] | null | null | null |
setup.py
|
Ezbob/dgDynamic
|
394de1c138c1517c4cdfead879c43db189752d92
|
[
"MIT"
] | null | null | null |
setup.py
|
Ezbob/dgDynamic
|
394de1c138c1517c4cdfead879c43db189752d92
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from setuptools.command.install import install
import os
import sys
import atexit
if __name__ == '__main__':
package_name = 'dgdynamic'
excludes = [
'__pycache__',
'StochKit'
]
extras = [
'default_config.ini',
'spim.ocaml',
'stochkit.tar.gz'
]
def find_package_dirs(package_dir_path, excludes):
return [path for path, dirs, files in os.walk(package_dir_path)
if not any(exclude_name in path for exclude_name in excludes)]
def get_requirements():
with open('requirements.txt', mode="r") as file:
return list(map(str.strip, file))
package_dirs = find_package_dirs(package_name, excludes)
internal_python_paths = {
".".join(p_name.split('/')): p_name
for p_name in package_dirs
}
class CustomInstall(install):
def run(self):
def _post_install():
def find_module_path():
for p in sys.path:
if os.path.isdir(p) and package_name in os.listdir(p):
return os.path.join(p, package_name)
install_path = find_module_path()
stochkit2_plugin_path = os.path.join(install_path, "plugins/stochastic/stochkit2/")
stochkit2_tar_path = os.path.join(stochkit2_plugin_path, "stochkit.tar.gz")
stochkit2_installer_path = os.path.join(stochkit2_plugin_path, "StochKit")
os.system("tar xvzf " + stochkit2_tar_path + " -C " + stochkit2_plugin_path)
os.system("cd " + stochkit2_installer_path + " && ./install.sh")
atexit.register(_post_install)
install.run(self)
setup(
cmdclass={'install': CustomInstall},
name=package_name,
version='1.0.0',
description='Dynamic simulation library for the MØD graph transformation framework',
url='https://bitbucket.org/Ezben/dgdynamic',
author='Anders Busch',
author_email='andersbusch@gmail.com',
license='MIT',
package_dir=internal_python_paths,
include_package_data=True,
package_data={'': extras},
packages=list(internal_python_paths.keys()),
install_requires=get_requirements(),
zip_safe=False
)
| 33.028169
| 99
| 0.612367
| 269
| 2,345
| 5.063197
| 0.420074
| 0.040382
| 0.029369
| 0.030837
| 0.060206
| 0.060206
| 0.060206
| 0.060206
| 0
| 0
| 0
| 0.007147
| 0.284009
| 2,345
| 70
| 100
| 33.5
| 0.80405
| 0
| 0
| 0
| 0
| 0
| 0.143345
| 0.021331
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086207
| false
| 0
| 0.086207
| 0.017241
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6db73ff6f5328cc7f6a179960f5ceb876377c833
| 5,543
|
py
|
Python
|
Robotics/src/otonomsesli.py
|
ahmetakif/Voice-Controlled-Raspberry-Pi-Robot
|
00dcc15dfbb7441d6403fb0467b2144e8750cc0c
|
[
"Apache-2.0"
] | 5
|
2019-08-21T08:08:27.000Z
|
2021-06-14T06:56:50.000Z
|
Robotics/src/otonomsesli.py
|
ahmetakif/Voice-Controlled-Raspberry-Pi-Robot
|
00dcc15dfbb7441d6403fb0467b2144e8750cc0c
|
[
"Apache-2.0"
] | null | null | null |
Robotics/src/otonomsesli.py
|
ahmetakif/Voice-Controlled-Raspberry-Pi-Robot
|
00dcc15dfbb7441d6403fb0467b2144e8750cc0c
|
[
"Apache-2.0"
] | 2
|
2019-08-21T08:16:58.000Z
|
2021-04-07T11:56:11.000Z
|
import os
import RPi.GPIO as gpio
import time
from mesafe import distance
motorhizi = 1
aci2 = aci3 = aci4 = 6
aci = 5.5
in4 = 26
in3 = 4
in2 = 12
in1 = 8
solled = 9
sagled = 11
gpio.setwarnings(False)
def init():
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
gpio.setup(22,gpio.OUT)
gpio.setup(27,gpio.OUT)
gpio.setup(17,gpio.OUT)
gpio.setup(18,gpio.OUT)
gpio.setup(in4,gpio.OUT)
gpio.setup(in3,gpio.OUT)
gpio.setup(in2,gpio.OUT)
gpio.setup(in1,gpio.OUT)
gpio.setup(21,gpio.OUT)
gpio.setup(solled,gpio.OUT)
gpio.setup(sagled,gpio.OUT)
gpio.setup(23,gpio.IN)
gpio.setup(24,gpio.IN)
gpio.output(22,0)
gpio.output(18,0)
gpio.output(17,0)
gpio.output(27,0)
gpio.output(in4,0)
gpio.output(in3,0)
gpio.output(in2,0)
gpio.output(in1,0)
gpio.output(21,0)
gpio.output(solled,0)
gpio.output(sagled,0)
def ileri(tf,ff):
init()
gpio.output(17,0)
gpio.output(22,0)
ip = gpio.PWM(27,50)
ip2 = gpio.PWM(18,50)
ip.start(ff)
ip2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def geri(tf,ff):
init()
gpio.output(18,0)
gpio.output(27,0)
gp = gpio.PWM(22,50)
gp2 = gpio.PWM(17,50)
gp.start(ff)
gp2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def sol(tf,ff):
init()
gpio.output(17,0)
gpio.output(27,0)
sp = gpio.PWM(22,50)
sp2 = gpio.PWM(18,50)
sp.start(ff)
sp2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def sag(tf,ff):
init()
gpio.output(18,0)
gpio.output(22,0)
sap = gpio.PWM(27,50)
sap2 = gpio.PWM(17,50)
sap.start(ff)
sap2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def dur():
init()
gpio.output(22,0)
gpio.output(17,0)
gpio.output(18,0)
gpio.output(27,0)
gpio.cleanup()
def adim1(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,1)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,0)
if (y == 0): # sag
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,1)
time.sleep(tf)
gpio.cleanup()
def adim2(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,0)
gpio.output(in2,1)
gpio.output(in3,0)
gpio.output(in4,0)
if (y == 0): # sag
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,1)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def adim3(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,1)
gpio.output(in4,0)
if (y == 0): # sag
gpio.output(in1,0)
gpio.output(in2,1)
gpio.output(in3,0)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def adim4(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,1)
if (y == 0): # sag
gpio.output(in1,1)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def stepper(tf,ff,yf):
ff = float(ff)
ff = ff / 1000
if (yf == 0): # sag
for i in range(0,tf):
adim1(ff,0)
adim2(ff,0)
adim3(ff,0)
adim4(ff,0)
if (yf == 1): # sol
for i in range(0,tf):
adim1(ff,1)
adim2(ff,1)
adim3(ff,1)
adim4(ff,1)
def servo(tf):
gpio.setmode(gpio.BCM)
gpio.setup(5,gpio.OUT)
p = gpio.PWM(5,50)
p.start(5.5)
p.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo2(tf):
gpio.setmode(gpio.BCM)
gpio.setup(6,gpio.OUT)
p2 = gpio.PWM(6,50)
p2.start(6)
p2.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo3(tf):
gpio.setmode(gpio.BCM)
gpio.setup(20,gpio.OUT)
p3 = gpio.PWM(20,50)
p3.start(6)
p3.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo4(tf):
gpio.setmode(gpio.BCM)
gpio.setup(16,gpio.OUT)
p3 = gpio.PWM(16,50)
p3.start(6)
p3.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def ses(tf,ff):
init()
sp = gpio.PWM(21,ff)
sp.start(70)
time.sleep(tf)
gpio.cleanup()
def led(ff,tf,sf):
init()
sp = gpio.PWM(solled,500)
sap = gpio.PWM(sagled,500)
if (sf == 0):
sp.start(ff)
time.sleep(tf)
gpio.cleanup()
elif (sf == 1):
sap.start(ff)
time.sleep(tf)
gpio.cleanup()
elif (sf == 2):
sp.start(ff)
sap.start(ff)
time.sleep(tf)
gpio.cleanup()
print (" ")
print ("otonomgorev yazilimi google speech api sesli komutlari ile robotun otonom hareket etmesi için yazilmistir")
print (" ")
time.sleep(1)
def cizgi(lf):
os.system("aplay -vv /home/pi/Robotics/PortalTurret/Turret_active.wav &")
int = 0
for int in range(1,lf):
init()
if (gpio.input(23) == 0 and gpio.input(24) == 0):
ileri(0.1,100)
elif (gpio.input(23) == 1 and gpio.input(24) == 0):
sol(0.1,100)
elif (gpio.input(22) == 0 and gpio.input(24) == 1):
sag(0.1,100)
else:
dur()
dur()
main()
aci2 = aci3 = aci4 = 6
aci = 5.5
| 20.378676
| 115
| 0.541043
| 874
| 5,543
| 3.430206
| 0.149886
| 0.183456
| 0.128419
| 0.06004
| 0.605404
| 0.574383
| 0.521348
| 0.445964
| 0.400934
| 0.308205
| 0
| 0.086834
| 0.295688
| 5,543
| 271
| 116
| 20.453875
| 0.681096
| 0.007036
| 0
| 0.576132
| 0
| 0
| 0.033679
| 0.008738
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.016461
| 0
| 0.090535
| 0.012346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6db82d6e06cc11fb7b83d45d0342ef4c6c52a44f
| 6,093
|
py
|
Python
|
experiments/livecell/validate_model.py
|
JonasHell/torch-em
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
[
"MIT"
] | 13
|
2021-03-09T21:31:09.000Z
|
2022-03-21T05:24:26.000Z
|
experiments/livecell/validate_model.py
|
JonasHell/torch-em
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
[
"MIT"
] | 16
|
2021-03-02T23:19:34.000Z
|
2022-03-25T19:43:41.000Z
|
experiments/livecell/validate_model.py
|
JonasHell/torch-em
|
2e008e0cd2f0ea6681581374fce4f9f47b986d55
|
[
"MIT"
] | 4
|
2021-05-18T08:29:33.000Z
|
2022-02-11T12:16:20.000Z
|
import argparse
import os
from glob import glob
from pathlib import Path
import imageio
import h5py
import pandas as pd
from bioimageio.core import load_resource_description
from bioimageio.core.prediction import predict_with_padding
from bioimageio.core.prediction_pipeline import create_prediction_pipeline
from elf.evaluation import mean_average_precision
from torch_em.util.segmentation import (connected_components_with_boundaries,
mutex_watershed, size_filter)
from tqdm import tqdm
from xarray import DataArray
try:
import napari
except ImportError:
napari = None
def segment(prediction_pipeline, path, out_path, view, offsets=None, strides=None, min_seg_size=50):
image = imageio.imread(path)
assert image.ndim == 2
input_ = DataArray(image[None, None], dims=prediction_pipeline.input_specs[0].axes)
padding = {"x": 16, "y": 16}
prediction = predict_with_padding(prediction_pipeline, input_, padding)[0][0]
foreground, prediction = prediction[0], prediction[1:]
if offsets is None:
assert prediction.shape[0] == 1, f"{prediction.shape}"
prediction = prediction[0]
assert foreground.shape == prediction.shape
seg = connected_components_with_boundaries(foreground, prediction)
else:
assert len(offsets) == prediction.shape[0]
mask = foreground > 0.5
seg = mutex_watershed(prediction, offsets, mask=mask, strides=strides)
seg = size_filter(seg, min_seg_size, hmap=prediction, with_background=True)
# implement more postprocessing?
# - merge noisy foreground prediction (that only have very weak boundary predictions) into the background
if out_path is not None:
with h5py.File(out_path, "w") as f:
f.create_dataset("prediction", data=prediction, compression="gzip")
f.create_dataset("foreground", data=foreground, compression="gzip")
f.create_dataset("segmentation", data=seg, compression="gzip")
if view:
assert napari is not None
v = napari.Viewer()
v.add_image(image)
v.add_image(foreground)
v.add_image(prediction)
v.add_labels(seg)
napari.run()
return seg
def validate(seg, gt_path):
gt = imageio.imread(gt_path)
assert gt.shape == seg.shape
map_, scores = mean_average_precision(seg, gt, return_aps=True)
# map, iou50, iou75, iou90
return [map_, scores[0], scores[5], scores[-1]]
def run_prediction(model_path, input_files, target_files, output_folder, view, min_seg_size, device):
model = load_resource_description(model_path)
offsets, strides = None, None
if "mws" in model.config:
offsets = model.config["mws"]["offsets"]
strides = [4, 4]
if output_folder is not None:
os.makedirs(output_folder, exist_ok=True)
validation_results = []
devices = None if device is None else [device]
with create_prediction_pipeline(bioimageio_model=model, devices=devices) as pp:
for in_path, target_path in tqdm(zip(input_files, target_files), total=len(input_files)):
fname = str(Path(in_path).stem)
out_path = None if output_folder is None else os.path.join(output_folder, f"{fname}.h5")
seg = segment(pp, in_path, out_path, view,
offsets=offsets, strides=strides, min_seg_size=min_seg_size)
if target_path:
val = validate(seg, target_path)
validation_results.append([fname] + val)
if validation_results:
cols = ["name", "mAP", "IoU50", "IoU75", "IoU90"]
validation_results = pd.DataFrame(validation_results, columns=cols)
print("Validation results averaged over all", len(input_files), "images:")
print(validation_results[cols[1:]].mean(axis=0))
return validation_results
# TODO needs update for live-cell data structure
def _load_data(input_folder, ext):
input_data = glob(os.path.join(input_folder, "images", f"*.{ext}"))
input_data.sort()
if os.path.exists(os.path.join(input_folder, "masks")):
input_target = glob(os.path.join(input_folder, "masks", f"*.{ext}"))
input_target.sort()
else:
input_target = [None] * len(input_data)
assert len(input_data) == len(input_target)
return input_data, input_target
def main():
parser = argparse.ArgumentParser(
"Run prediction and segmentation with a bioimagie.io model and save or validate the results."
"If 'output_folder' is passed, the results will be saved as hdf5 files with keys:"
"prediction: the affinity or boundary predictions"
"foreground: the foreground predictions"
"segmentation: the nucleus instance segmentation"
)
parser.add_argument("-m", "--model", required=True, help="Path to the bioimage.io model.")
parser.add_argument("-i", "--input_folder", required=True,
help="The root input folder with subfolders 'images' and (optionally) 'masks'")
parser.add_argument("--ext", default="tif", help="The file extension of the input files.")
parser.add_argument("-o", "--output_folder", default=None, help="Where to save the results.")
parser.add_argument("-v", "--view", default=0,
help="Whether to show segmentation results (needs napari).", type=int)
parser.add_argument("--min_seg_size", default=25, type=int)
parser.add_argument("--device", default=None, help="The device used for inference.")
parser.add_argument("--save_path", "-s", default=None, help="Where to save a csv with the validation results.")
args = parser.parse_args()
input_files, target_files = _load_data(args.input_folder, args.ext)
res = run_prediction(args.model, input_files, target_files, args.output_folder,
view=bool(args.view), min_seg_size=args.min_seg_size, device=args.device)
if args.save_path is not None:
assert res is not None
res.to_csv(args.save_path, index=False)
if __name__ == "__main__":
main()
| 42.02069
| 115
| 0.683243
| 803
| 6,093
| 5.002491
| 0.265255
| 0.015683
| 0.019915
| 0.020911
| 0.070451
| 0.033109
| 0
| 0
| 0
| 0
| 0
| 0.009129
| 0.208928
| 6,093
| 144
| 116
| 42.3125
| 0.824274
| 0.033809
| 0
| 0.017094
| 0
| 0
| 0.150119
| 0
| 0
| 0
| 0
| 0.006944
| 0.068376
| 1
| 0.042735
| false
| 0.008547
| 0.136752
| 0
| 0.213675
| 0.017094
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dbfcff192ece7ab414ec98a8d97692a952d7bdd
| 7,506
|
py
|
Python
|
main.py
|
qmn/pershing
|
ec3cc87d9bfb7ca0cf1da1449d695c36df548309
|
[
"BSD-2-Clause"
] | 16
|
2017-05-20T05:30:59.000Z
|
2022-02-08T05:41:52.000Z
|
main.py
|
qmn/pershing
|
ec3cc87d9bfb7ca0cf1da1449d695c36df548309
|
[
"BSD-2-Clause"
] | null | null | null |
main.py
|
qmn/pershing
|
ec3cc87d9bfb7ca0cf1da1449d695c36df548309
|
[
"BSD-2-Clause"
] | 3
|
2016-09-18T15:55:37.000Z
|
2020-12-27T15:36:09.000Z
|
#!/usr/bin/env python2.7
from __future__ import print_function
import json
import sys
import numpy as np
import os.path
import time
from math import ceil
import argparse
import nbt
from util import blif, cell, cell_library
from placer import placer
from router import router, extractor, minetime
from vis import png
from inserter import inserter
def underline_print(s):
print()
print(s)
print("-" * len(s))
if __name__ == "__main__":
placements = None
dimensions = None
routing = None
# Create parser
parser = argparse.ArgumentParser(description="An automatic place-and-route tool for Minecraft Redstone circuits.")
parser.add_argument('blif', metavar="<input BLIF file>")
parser.add_argument('-o', '--output_dir', metavar="output_directory", dest="output_dir")
parser.add_argument('--library', metavar="library_file", dest="library_file", default="lib/quan.yaml")
parser.add_argument('--placements', metavar="placements_file", dest="placements_file", help="Use this placements file rather than creating one. Must be previously generated from the supplied BLIF.")
parser.add_argument('--routings', metavar="routings_file", dest="routings_file", help="Use this routings file rather than creating one. Must be previously generated from the supplied BLIF and placements JSON.")
parser.add_argument('--world', metavar="world_folder", dest="world_folder", help="Place the extracted redstone circuit layout in this world.")
args = parser.parse_args()
# Load placements, if provided
if args.placements_file is not None:
print("Using placements file:", args.placements_file)
with open(args.placements_file) as f:
placements = json.loads(f.readline())
dimensions = json.loads(f.readline())
# Load library file
with open(args.library_file) as f:
cell_lib = cell_library.load(f)
# Load BLIF
with open(args.blif) as f:
blif = blif.load(f)
# Result directory
if args.output_dir is not None:
if os.path.isabs(args.output_dir):
result_dir = args.output_dir
else:
result_dir = os.path.abspath(args.output_dir)
else:
result_base, _ = os.path.splitext(args.blif)
result_dir = os.path.abspath(result_base + "_result")
# Try making the directory
if not os.path.exists(result_dir):
try:
os.mkdir(result_dir)
print("Made result dir: ", result_dir)
except OSError as e:
print(e)
pregenerated_cells = cell_library.pregenerate_cells(cell_lib, pad=1)
placer = placer.GridPlacer(blif, pregenerated_cells, grid_spacing=5)
start_time = time.time()
print("Started", time.strftime("%c", time.localtime(start_time)))
# PLACE =============================================================
if placements is None:
underline_print("Performing Initial Placement...")
placements, dimensions = placer.initial_placement()
score = placer.score(placements, dimensions)
print("Initial Placement Penalty:", score)
underline_print("Doing Placement...")
# Place cells
T_0 = 250
iterations = 2000
new_placements = placer.simulated_annealing_placement(placements, dimensions, T_0, iterations)
placements, dimensions = placer.shrink(new_placements)
# Place pins and resize
placements += placer.place_pins(dimensions)
placements, dimensions = placer.shrink(placements)
# print(new_placements)
print("Placed", len(placements), "cells")
with open(os.path.join(result_dir, "placements.json"), "w") as f:
json.dump(placements, f)
f.write("\n")
json.dump(dimensions, f)
# Visualize this layout
layout = placer.placement_to_layout(dimensions, placements)
png.layout_to_png(layout, filename_base=os.path.join(result_dir, "composite"))
print("Dimensions:", dimensions)
# ROUTE =============================================================
underline_print("Doing Routing...")
placements, dimensions = placer.shrink(placements)
layout = placer.placement_to_layout(dimensions, placements)
router = router.Router(blif, pregenerated_cells)
# Load routings, if provided
if args.routings_file is not None:
print("Using routings file:", args.routings_file)
with open(args.routings_file) as f:
routing = router.deserialize_routing(f)
if routing is None:
blocks, data = layout
print("Doing initial routing...")
routing = router.initial_routing(placements, blocks.shape)
print("done.")
routing = router.re_route(routing, layout)
# Preserve routing
with open(os.path.join(result_dir, "routing.json"), "w") as f:
router.serialize_routing(routing, dimensions, f)
print("Routed", len(routing), "nets")
# EXTRACT ===========================================================
underline_print("Doing Extraction...")
extractor = extractor.Extractor(blif, pregenerated_cells)
extracted_routing = extractor.extract_routing(routing)
extracted_layout = extractor.extract_layout(extracted_routing, layout)
with open(os.path.join(result_dir, "extraction.json"), "w") as f:
blocks, data = extracted_layout
json.dump(blocks.tolist(), f)
json.dump(data.tolist(), f)
print("Wrote extraction to extraction.json")
# VISUALIZE =========================================================
underline_print("Doing Visualization...")
# Get the pins
pins = placer.locate_circuit_pins(placements)
# png.nets_to_png(layout, routing)
png_fn = os.path.join(result_dir, "layout.png")
png.layout_to_composite(extracted_layout, pins=pins).save(png_fn)
print("Image written to ", png_fn)
# MINETIME =========================================================
underline_print("Doing Timing Analysis with MineTime...")
mt = minetime.MineTime()
path_delays = mt.compute_combinational_delay(placements, extracted_routing, cell_lib)
print("Path delays:")
for path_delay, path in sorted(path_delays, key=lambda x: x[0], reverse=True):
print(path_delay, " ", " -> ".join(path))
print()
crit_delay, crit_path = max(path_delays, key=lambda x: x[0])
print("Critical path delay: {} ticks".format(crit_delay))
print("Minimum period: {:.2f} s".format(crit_delay * 0.05))
print("Maximum frequency: {:.4f} Hz".format(1./(crit_delay * 0.05)))
underline_print("Design Statistics")
blocks, _ = layout
print("Layout size: {} x {} x {}".format(blocks.shape[0], blocks.shape[1], blocks.shape[2]))
print(" Blocks placed: {}".format(sum(blocks.flat != 0)))
print()
print("Total nets: {}".format(len(extracted_routing)))
print(" Segments routed: {}".format(sum(len(net["segments"]) for net in extracted_routing.itervalues())))
print()
end_time = time.time()
print("Finished", time.strftime("%c", time.localtime(end_time)), "(took", ceil(end_time - start_time), "s)")
# INSERTION ========================================================
if args.world_folder is not None:
underline_print("Inserting Design into Minecraft World...")
world = nbt.world.WorldFolder(args.world_folder)
inserter.insert_extracted_layout(world, extracted_layout, offset=(4, 0, 0))
| 37.158416
| 214
| 0.641886
| 898
| 7,506
| 5.208241
| 0.25167
| 0.023092
| 0.021809
| 0.017105
| 0.142185
| 0.087022
| 0.077186
| 0.029506
| 0.029506
| 0.029506
| 0
| 0.005149
| 0.197842
| 7,506
| 201
| 215
| 37.343284
| 0.771633
| 0.094591
| 0
| 0.075188
| 0
| 0.007519
| 0.185535
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007519
| false
| 0
| 0.105263
| 0
| 0.112782
| 0.293233
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dc096d4b45dd4acd7b5d28912a696afdc093628
| 296
|
py
|
Python
|
logtest.py
|
jonathanstrong/log-viewer
|
83374de21ce807709217e3fffa87b75265b3edd6
|
[
"MIT"
] | 1
|
2017-03-09T01:18:06.000Z
|
2017-03-09T01:18:06.000Z
|
logtest.py
|
jonathanstrong/log-viewer
|
83374de21ce807709217e3fffa87b75265b3edd6
|
[
"MIT"
] | null | null | null |
logtest.py
|
jonathanstrong/log-viewer
|
83374de21ce807709217e3fffa87b75265b3edd6
|
[
"MIT"
] | null | null | null |
import logging
import logging.handlers
import time
logger = logging.getLogger(__name__)
handler = logging.handlers.SocketHandler('localhost', 9033)
stream = logging.StreamHandler()
logger.addHandler(handler)
logger.addHandler(stream)
while True:
logger.warning('ping')
time.sleep(.001)
| 22.769231
| 59
| 0.780405
| 34
| 296
| 6.676471
| 0.588235
| 0.114537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026515
| 0.108108
| 296
| 12
| 60
| 24.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.043919
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dc161a661b51dac6f78e1e2949123e1dfec52e8
| 5,032
|
py
|
Python
|
text-builder.py
|
guskma/text-builder
|
e9de6178ef5ce71a6f022b7932d40a906200578e
|
[
"MIT"
] | null | null | null |
text-builder.py
|
guskma/text-builder
|
e9de6178ef5ce71a6f022b7932d40a906200578e
|
[
"MIT"
] | null | null | null |
text-builder.py
|
guskma/text-builder
|
e9de6178ef5ce71a6f022b7932d40a906200578e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from argparse import ArgumentParser
from collections import OrderedDict
import jinja2
import csv
import sys
import os.path
import re
def store_keyval(src_dict, key, val):
if key is None:
return val
if type(src_dict) is not OrderedDict:
src_dict = OrderedDict()
matched = re.match(r'^([^\.\[\]]+?)(?:\[([\d]+|@?)\])?(?:\.(.+))?$', key)
if matched is None:
print(f'Invalid key name: {key}')
return src_dict
key_name = matched.group(1)
key_index_str = matched.group(2)
key_dict = matched.group(3)
is_array = key_index_str is not None
is_dict = key_dict is not None
key_exists = key_name in src_dict.keys()
if is_array and not key_exists:
src_dict[key_name] = [None]
elif is_dict and not key_exists:
src_dict[key_name] = OrderedDict()
if is_array:
if key_index_str == '@':
key_index = len(src_dict[key_name]) - 1
elif not key_index_str and src_dict[key_name][0] is None:
key_index = 0
elif not key_index_str:
key_index = len(src_dict[key_name])
else:
key_index = int(key_index_str)
key_len = len(src_dict[key_name])
if key_len < key_index + 1:
src_dict[key_name].extend([None] * (key_index - key_len + 1))
src_dict[key_name][key_index] = store_keyval(src_dict[key_name][key_index], key_dict, val)
elif is_dict:
src_dict[key_name] = store_keyval(src_dict[key_name], key_dict, val)
else:
src_dict[key_name] = val
return src_dict
def build_templates(args):
if args.DEBUG:
print('* === text-builder execute. ===')
templateLoader = jinja2.FileSystemLoader(searchpath='.', encoding=args.ENCODING)
templateEnv = jinja2.Environment( loader=templateLoader )
templateEnv.trim_blocks = True
newline = args.NEWLINE.replace(r'\r', "\r").replace(r'\n', "\n")
if args.DEBUG:
sys.stdout.write('* Loading INVENTORY file ... ')
f = open(args.INVENTORY, 'rt', encoding=args.ENCODING, newline=newline)
if args.DEBUG:
print('Done.')
try:
if args.DEBUG:
print('* Loading header.')
reader = list(csv.reader(f))
header = reader.pop(0)
header_cols = len(header)
parsed_files = 0
for row in reader:
if args.DEBUG:
sys.stdout.write(f'* Building row({parsed_files + 2}): ')
dict_row = OrderedDict()
cols = len(row)
for i in range(cols if cols > header_cols else header_cols):
if header_cols <= i:
continue
elif cols <= i:
col = ""
else:
col = row[i]
dict_row = store_keyval(dict_row, header[i], col)
if args.DEBUG:
print(dict_row)
template = templateEnv.get_template(args.TEMPLATE)
outputText = template.render(dict_row)
output_dir = args.OUTPUTS_DIR
if 'output_dir' in dict_row and dict_row['output_dir'].strip() != '':
output_dir = f"{output_dir}/{dict_row['output_dir'].strip()}"
os.makedirs(output_dir, exist_ok=True)
filename = dict_row['filename'] if 'filename' in dict_row else f"parsed_{parsed_files}.txt"
output_filename = f"{output_dir}/{filename}"
with open(output_filename, 'w', newline=newline, encoding=args.ENCODING) as output_file:
output_file.write(outputText)
print("wrote file: %s" % output_filename)
parsed_files += 1
print(f"\nDone. output {parsed_files} files in \"{output_dir}\" directory.")
finally:
f.close()
def cmd_options():
usage = f"text-builder <TEMPLATE> <INVENTORY> [-ehno]"
argparser = ArgumentParser(usage=usage)
argparser.add_argument(
'TEMPLATE',
type=str,
help='Template text file.')
argparser.add_argument(
'INVENTORY',
type=str,
help='Paramaters CSV file.')
argparser.add_argument(
'-d', '--debug',
dest='DEBUG',
action='store_true',
help='Output debug message.')
argparser.add_argument(
'-e', '--encoding',
type=str,
dest='ENCODING',
default='cp932',
help='Set encoding charset of template and inventory file. (default: "cp932")')
argparser.add_argument(
'-n', '--new-line',
type=str,
dest='NEWLINE',
default="\r\n",
help='Set new line charcode. (default: "\\r\\n")')
argparser.add_argument(
'-o', '--output-path',
type=str,
default='output',
dest='OUTPUTS_DIR',
help='Set output files path.')
args = argparser.parse_args()
return args
if __name__ == "__main__":
args = cmd_options()
build_templates(args)
| 29.775148
| 103
| 0.584261
| 629
| 5,032
| 4.45787
| 0.235294
| 0.044936
| 0.049929
| 0.064907
| 0.136947
| 0.093795
| 0.066334
| 0.046362
| 0.025678
| 0.025678
| 0
| 0.00615
| 0.289149
| 5,032
| 168
| 104
| 29.952381
| 0.777747
| 0.008347
| 0
| 0.165414
| 0
| 0
| 0.152366
| 0.027666
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022556
| false
| 0
| 0.06015
| 0
| 0.112782
| 0.06015
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dc1f2e93753dd6196949aaa91494539baeb31b1
| 2,077
|
py
|
Python
|
hard_grasp.py
|
bionicdl-sustech/AmphibiousManipulation
|
397c7dfef6b4dda178a567c36aabfe0f4b05b821
|
[
"MIT"
] | null | null | null |
hard_grasp.py
|
bionicdl-sustech/AmphibiousManipulation
|
397c7dfef6b4dda178a567c36aabfe0f4b05b821
|
[
"MIT"
] | null | null | null |
hard_grasp.py
|
bionicdl-sustech/AmphibiousManipulation
|
397c7dfef6b4dda178a567c36aabfe0f4b05b821
|
[
"MIT"
] | null | null | null |
import yaml
import os
import sys
import time
import numpy as np
import cv2 as cv
from franka.FrankaController import FrankaController
def read_cfg(path):
with open(path, 'r') as stream:
out = yaml.safe_load(stream)
return out
if __name__ == '__main__':
ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT)
cfg = read_cfg(ROOT + '/config/grasping _colorseg.yaml')
arm = FrankaController(ROOT + '/config/franka.yaml')
# grasping config
initial_pose = cfg['initial_position']
initial_pose[2] -= 0.3
check_position = cfg['check_position']
drop_position = cfg['drop_position']
grasp_pre_offset = cfg['grasp_prepare_offset']
effector_offset = cfg['effector_offset']
check_threshold = cfg['check_threshold']
attmp_num = cfg['attmp_num']
print("Moving to initial position...")
arm.move_p(initial_pose)
print("Moving to initial position... Done")
stored_exception = None
arm.move_p(initial_pose)
current_num = 0
while current_num < attmp_num:
try:
if stored_exception:
break
target_in_base = drop_position.copy()
target_in_base[2] -= 0.37
prepare_pos = [target_in_base[0], target_in_base[1], target_in_base[2] + grasp_pre_offset + effector_offset, 3.14, 0, 0]
arm.move_p(prepare_pos)
arm.gripperOpen()
arm.move_p([target_in_base[0], target_in_base[1], target_in_base[2] + effector_offset, 3.14, 0, 0])
arm.gripperGrasp(width=0.05, force=2)
time.sleep(0.5)
# Move to check position
# arm.move_p(check_position)
arm.move_p(initial_pose)
# Move to drop position and drop object
arm.move_p(drop_position)
arm.gripperOpen()
# Back to initial position
arm.move_p(initial_pose)
current_num += 1
except KeyboardInterrupt:
stored_exception = sys.exc_info()
cv.destroyAllWindows()
| 25.9625
| 132
| 0.629273
| 271
| 2,077
| 4.535055
| 0.328413
| 0.045566
| 0.052075
| 0.052075
| 0.264443
| 0.211554
| 0.189585
| 0.12205
| 0.063466
| 0.063466
| 0
| 0.021136
| 0.271064
| 2,077
| 79
| 133
| 26.291139
| 0.790621
| 0.061627
| 0
| 0.12
| 0
| 0
| 0.115286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.14
| 0
| 0.18
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dc710027aba9309fc1e58e6facfd13ec0253ff6
| 731
|
py
|
Python
|
Domain_Knowledge_based/stanfordNER.py
|
Mount428/Hate-Speech-Detection
|
f8644844dda954ebd169aeec54cb4c7361d88a09
|
[
"MIT"
] | null | null | null |
Domain_Knowledge_based/stanfordNER.py
|
Mount428/Hate-Speech-Detection
|
f8644844dda954ebd169aeec54cb4c7361d88a09
|
[
"MIT"
] | null | null | null |
Domain_Knowledge_based/stanfordNER.py
|
Mount428/Hate-Speech-Detection
|
f8644844dda954ebd169aeec54cb4c7361d88a09
|
[
"MIT"
] | null | null | null |
from nltk.tag import StanfordNERTagger
import pandas as pd
from sklearn.metrics import f1_score, confusion_matrix
from loader import Load
train, test = Load('c')
ner = StanfordNERTagger('./stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz', './stanford-ner-2018-10-16/stanford-ner.jar')
data = train
data['tweet'] = ner.tag_sents(data['tweet'].str.split(' '))
pred = []
for i, d in data.iterrows():
tweet = d['tweet']
tag = 'IND'
for w in tweet:
if w[1] == 'ORGANIZATION':
tag = 'GRP'
# elif w[1] == 'PEOPLE':
# tag = 'IND'
pred.append(tag)
print(confusion_matrix(data['label'], pred))
print(f1_score(data['label'], pred, average='macro'))
| 22.84375
| 148
| 0.641587
| 104
| 731
| 4.461538
| 0.548077
| 0.071121
| 0.064655
| 0.073276
| 0.081897
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035473
| 0.19015
| 731
| 31
| 149
| 23.580645
| 0.748311
| 0.051984
| 0
| 0
| 0
| 0.055556
| 0.24238
| 0.169811
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dc79c5bcc11f79748762758e10ea27d0fe9f70f
| 41,355
|
py
|
Python
|
pyrif/FuXi/Rete/Network.py
|
mpetyx/pyrif
|
2f7ba863030d7337bb39ad502d1e09e26ac950d2
|
[
"MIT"
] | null | null | null |
pyrif/FuXi/Rete/Network.py
|
mpetyx/pyrif
|
2f7ba863030d7337bb39ad502d1e09e26ac950d2
|
[
"MIT"
] | null | null | null |
pyrif/FuXi/Rete/Network.py
|
mpetyx/pyrif
|
2f7ba863030d7337bb39ad502d1e09e26ac950d2
|
[
"MIT"
] | null | null | null |
"""
====================================================================================
A Rete Network Building and 'Evaluation' Implementation for RDFLib Graphs of
Notation 3 rules.
The DLP implementation uses this network to automatically building RETE
decision trees for OWL forms of DLP
Uses Python hashing mechanism to maximize the efficiency of the built
pattern network.
The network :
- compiles an RDFLib N3 rule graph into AlphaNode and BetaNode instances
- takes a fact (or the removal of a fact, perhaps?) and propagates down,
starting from its alpha nodes
- stores inferred triples in provided triple source (an RDFLib graph) or
a temporary IOMemory Graph by default
"""
from itertools import chain
import sys
import time
from pprint import pprint
try:
from functools import reduce
except ImportError:
pass
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
from .BetaNode import (
BetaNode,
LEFT_MEMORY,
RIGHT_MEMORY,
PartialInstantiation,
)
from .AlphaNode import (
AlphaNode,
BuiltInAlphaNode,
ReteToken,
)
from FuXi.Horn import (
ComplementExpansion,
DATALOG_SAFETY_NONE,
DATALOG_SAFETY_STRICT,
DATALOG_SAFETY_LOOSE,
)
from FuXi.Syntax.InfixOWL import Class
from FuXi.Horn.PositiveConditions import (
Exists,
GetUterm,
Or,
SetOperator,
Uniterm,
)
from FuXi.DLP import (
MapDLPtoNetwork,
non_DHL_OWL_Semantics,
)
from FuXi.DLP.ConditionalAxioms import AdditionalRules
from .Util import (
generateTokenSet,
renderNetwork,
xcombine,
)
from rdflib.graph import (
ConjunctiveGraph,
Graph,
ReadOnlyGraphAggregate,
)
from rdflib.namespace import NamespaceManager
from rdflib import (
BNode,
Literal,
Namespace,
RDF,
RDFS,
URIRef,
Variable,
)
from rdflib import py3compat
from rdflib.util import first
from .ReteVocabulary import RETE_NS
from .RuleStore import (
Formula,
N3Builtin,
N3RuleStore,
)
OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
Any = None
LOG = Namespace("http://www.w3.org/2000/10/swap/log#")
#From itertools recipes
def iteritems(mapping):
return list(zip(iter(mapping.keys()), iter(mapping.values())))
def any(seq, pred=None):
"""Returns True if pred(x) is true for at least one element in the iterable"""
for elem in filter(pred, seq):
return True
return False
class HashablePatternList(object):
"""
A hashable list of N3 statements which are patterns of a rule. Order is disregarded
by sorting based on unicode value of the concatenation of the term strings
(in both triples and function builtins invokations).
This value is also used for the hash. In this way, patterns with the same terms
but in different order are considered equivalent and share the same Rete nodes
>>> nodes = {}
>>> a = HashablePatternList([(Variable('X'), Literal(1), Literal(2))])
>>> nodes[a] = 1
>>> nodes[HashablePatternList([None]) + a] = 2
>>> b = HashablePatternList([(Variable('Y'), Literal(1), Literal(2))])
>>> b in a #doctest: +SKIP
True
>>> a == b #doctest: +SKIP
True
"""
def __init__(self, items=None, skipBNodes=False):
self.skipBNodes = skipBNodes
if items:
self._l = items
else:
self._l = []
def _hashRulePattern(self, item):
"""
Generates a unique hash for RDF triples and N3 builtin invokations. The
hash function consists of the hash of the terms concatenated in order
"""
if isinstance(item, tuple):
return reduce(lambda x, y: x + y, [
i for i in item
if not self.skipBNodes or not isinstance(i, BNode)
])
elif isinstance(item, N3Builtin):
return reduce(lambda x, y: x + y, [item.argument, item.result])
def __len__(self):
return len(self._l)
def __getslice__(self, beginIdx, endIdx):
return HashablePatternList(self._l[beginIdx:endIdx])
def __hash__(self):
if self._l:
_concatPattern = [pattern and self._hashRulePattern(pattern) or "None" for pattern in self._l]
#nulify the impact of order in patterns
_concatPattern.sort()
return hash(reduce(lambda x, y: x + y, _concatPattern))
else:
return hash(None)
def __add__(self, other):
assert isinstance(other, HashablePatternList), other
return HashablePatternList(self._l + other._l)
def __repr__(self):
return repr(self._l)
def extend(self, other):
assert isinstance(other, HashablePatternList), other
self._l.extend(other._l)
def append(self, other):
self._l.append(other)
def __iter__(self):
return iter(self._l)
def __eq__(self, other):
return hash(self) == hash(other)
def _mulPatternWithSubstitutions(tokens, consequent, termNode):
"""
Takes a set of tokens and a pattern and returns an iterator over consequent
triples, created by applying all the variable substitutions in the given tokens against the pattern
>>> aNode = AlphaNode((Variable('S'), Variable('P'), Variable('O')))
>>> token1 = ReteToken((URIRef('urn:uuid:alpha'), OWL_NS.differentFrom, URIRef('urn:uuid:beta')))
>>> token2 = ReteToken((URIRef('urn:uuid:beta'), OWL_NS.differentFrom, URIRef('urn:uuid:alpha')))
>>> token1 = token1.bindVariables(aNode)
>>> token2 = token2.bindVariables(aNode)
>>> inst = PartialInstantiation([token1, token2])
"""
# success = False
for binding in tokens.bindings:
tripleVals = []
# if any(consequent,
# lambda term:isinstance(term, Variable) and term not in binding):# not mismatchedTerms:
# return
# else:
for term in consequent:
if isinstance(term, (Variable, BNode)) and term in binding:
#try:
tripleVals.append(binding[term])
#except:
# pass
else:
tripleVals.append(term)
yield tuple(tripleVals), binding
class InferredGoal(Exception):
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return "Goal inferred.: %" % self.msg
class ReteNetwork:
"""
The Rete network. The constructor takes an N3 rule graph, an identifier (a BNode by default), an
initial Set of Rete tokens that serve as the 'working memory', and an rdflib Graph to
add inferred triples to - by forward-chaining via Rete evaluation algorithm),
"""
def __init__(self, ruleStore, name=None,
initialWorkingMemory=None,
inferredTarget=None,
nsMap={},
graphVizOutFile=None,
dontFinalize=False,
goal=None):
self.leanCheck = {}
self.goal = goal
self.nsMap = nsMap
self.name = name and name or BNode()
self.nodes = {}
self.alphaPatternHash = {}
self.ruleSet = set()
for alphaPattern in xcombine(('1', '0'), ('1', '0'), ('1', '0')):
self.alphaPatternHash[tuple(alphaPattern)] = {}
if inferredTarget is None:
self.inferredFacts = Graph()
namespace_manager = NamespaceManager(self.inferredFacts)
for k, v in list(nsMap.items()):
namespace_manager.bind(k, v)
self.inferredFacts.namespace_manager = namespace_manager
else:
self.inferredFacts = inferredTarget
self.workingMemory = initialWorkingMemory and initialWorkingMemory or set()
self.proofTracers = {}
self.terminalNodes = set()
self.instantiations = {}
start = time.time()
self.ruleStore = ruleStore
self.justifications = {}
self.dischargedBindings = {}
if not dontFinalize:
self.ruleStore._finalize()
self.filteredFacts = Graph()
#'Universal truths' for a rule set are rules where the LHS is empty.
# Rather than automatically adding them to the working set, alpha nodes are 'notified'
# of them, so they can be checked for while performing inter element tests.
self.universalTruths = []
from FuXi.Horn.HornRules import Ruleset
self.rules = set()
self.negRules = set()
for rule in Ruleset(n3Rules=self.ruleStore.rules, nsMapping=self.nsMap):
import warnings
warnings.warn(
"Rules in a network should be built *after* construction via " +
" self.buildNetworkClause(HornFromN3(n3graph)) for instance",
DeprecationWarning, 2)
self.buildNetworkFromClause(rule)
self.alphaNodes = [node for node in list(self.nodes.values()) if isinstance(node, AlphaNode)]
self.alphaBuiltInNodes = [node for node in list(self.nodes.values()) if isinstance(node, BuiltInAlphaNode)]
self._setupDefaultRules()
if initialWorkingMemory:
start = time.time()
self.feedFactsToAdd(initialWorkingMemory)
print("Time to calculate closure on working memory: %s m seconds" % (
(time.time() - start) * 1000))
if graphVizOutFile:
print("Writing out RETE network to ", graphVizOutFile)
renderNetwork(self, nsMap=nsMap).write(graphVizOutFile)
def getNsBindings(self, nsMgr):
for prefix, Uri in nsMgr.namespaces():
self.nsMap[prefix] = Uri
def buildFilterNetworkFromClause(self, rule):
lhs = BNode()
rhs = BNode()
builtins = []
for term in rule.formula.body:
if isinstance(term, N3Builtin):
#We want to move builtins to the 'end' of the body
#so they only apply to the terminal nodes of
#the corresponding network
builtins.append(term)
else:
self.ruleStore.formulae.setdefault(lhs, Formula(lhs)).append(term.toRDFTuple())
for builtin in builtins:
self.ruleStore.formulae.setdefault(lhs, Formula(lhs)).append(builtin.toRDFTuple())
nonEmptyHead = False
for term in rule.formula.head:
nonEmptyHead = True
assert not hasattr(term, 'next')
assert isinstance(term, Uniterm)
self.ruleStore.formulae.setdefault(rhs, Formula(rhs)).append(term.toRDFTuple())
assert nonEmptyHead, "Filters must conclude something."
self.ruleStore.rules.append((self.ruleStore.formulae[lhs], self.ruleStore.formulae[rhs]))
tNode = self.buildNetwork(iter(self.ruleStore.formulae[lhs]),
iter(self.ruleStore.formulae[rhs]),
rule,
aFilter=True)
self.alphaNodes = [node for node in list(self.nodes.values()) if isinstance(node, AlphaNode)]
self.rules.add(rule)
return tNode
def buildNetworkFromClause(self, rule):
lhs = BNode()
rhs = BNode()
builtins = []
for term in rule.formula.body:
if isinstance(term, N3Builtin):
#We want to move builtins to the 'end' of the body
#so they only apply to the terminal nodes of
#the corresponding network
builtins.append(term)
else:
self.ruleStore.formulae.setdefault(lhs, Formula(lhs)).append(term.toRDFTuple())
for builtin in builtins:
self.ruleStore.formulae.setdefault(lhs, Formula(lhs)).append(builtin.toRDFTuple())
nonEmptyHead = False
for term in rule.formula.head:
nonEmptyHead = True
assert not hasattr(term, 'next')
assert isinstance(term, Uniterm)
self.ruleStore.formulae.setdefault(rhs, Formula(rhs)).append(term.toRDFTuple())
if not nonEmptyHead:
import warnings
warnings.warn(
"Integrity constraints (rules with empty heads) are not supported: %s" % rule,
SyntaxWarning, 2)
return
self.ruleStore.rules.append((self.ruleStore.formulae[lhs], self.ruleStore.formulae[rhs]))
tNode = self.buildNetwork(iter(self.ruleStore.formulae[lhs]),
iter(self.ruleStore.formulae[rhs]),
rule)
self.alphaNodes = [node for node in list(self.nodes.values()) if isinstance(node, AlphaNode)]
self.rules.add(rule)
return tNode
def calculateStratifiedModel(self, database):
"""
Stratified Negation Semantics for DLP using SPARQL to handle the negation
"""
if not self.negRules:
return
from FuXi.DLP.Negation import StratifiedSPARQL
# from FuXi.Rete.Magic import PrettyPrintRule
import copy
noNegFacts = 0
for i in self.negRules:
#Evaluate the Graph pattern, and instanciate the head of the rule with
#the solutions returned
nsMapping = dict([(v, k) for k, v in list(self.nsMap.items())])
sel, compiler = StratifiedSPARQL(i, nsMapping)
query = compiler.compile(sel)
i.stratifiedQuery = query
vars = sel.projection
unionClosureG = self.closureGraph(database)
for rt in unionClosureG.query(query):
solutions = {}
if isinstance(rt, tuple):
solutions.update(dict([(vars[idx], i) for idx, i in enumerate(rt)]))
else:
solutions[vars[0]] = rt
i.solutions = solutions
head = copy.deepcopy(i.formula.head)
head.ground(solutions)
fact = head.toRDFTuple()
self.inferredFacts.add(fact)
self.feedFactsToAdd(generateTokenSet([fact]))
noNegFacts += 1
#Now we need to clear assertions that cross the individual, concept, relation divide
# toRemove = []
for s, p, o in self.inferredFacts.triples((None, RDF.type, None)):
if s in unionClosureG.predicates() or\
s in [_s for _s, _p, _o in
unionClosureG.triples_choices(
(None,
RDF.type,
[OWL_NS.Class,
OWL_NS.Restriction]))]:
self.inferredFacts.remove((s, p, o))
return noNegFacts
def setupDescriptionLogicProgramming(self,
owlN3Graph,
expanded=[],
addPDSemantics=True,
classifyTBox=False,
constructNetwork=True,
derivedPreds=[],
ignoreNegativeStratus=False,
safety=DATALOG_SAFETY_NONE):
rt = [rule
for rule in MapDLPtoNetwork(self,
owlN3Graph,
complementExpansions=expanded,
constructNetwork=constructNetwork,
derivedPreds=derivedPreds,
ignoreNegativeStratus=ignoreNegativeStratus,
safety=safety)]
if ignoreNegativeStratus:
rules, negRules = rt
rules = set(rules)
self.negRules = set(negRules)
else:
rules = set(rt)
if constructNetwork:
self.rules.update(rules)
additionalRules = set(AdditionalRules(owlN3Graph))
if addPDSemantics:
from FuXi.Horn.HornRules import HornFromN3
additionalRules.update(HornFromN3(StringIO(non_DHL_OWL_Semantics)))
if constructNetwork:
for rule in additionalRules:
self.buildNetwork(iter(rule.formula.body),
iter(rule.formula.head),
rule)
self.rules.add(rule)
else:
rules.update(additionalRules)
if constructNetwork:
rules = self.rules
# noRules = len(rules)
if classifyTBox:
self.feedFactsToAdd(generateTokenSet(owlN3Graph))
# print("##### DLP rules fired against OWL/RDF TBOX", self)
return rules
def reportSize(self, tokenSizeThreshold=1200, stream=sys.stdout):
for pattern, node in list(self.nodes.items()):
if isinstance(node, BetaNode):
for largeMem in [i for i in iter(node.memories.values()) if len(i) > tokenSizeThreshold]:
if largeMem:
print("Large apha node memory extent: ")
pprint(pattern)
print(len(largeMem))
def reportConflictSet(self, closureSummary=False, stream=sys.stdout):
tNodeOrder = [tNode
for tNode in self.terminalNodes
if self.instantiations.get(tNode, 0)]
tNodeOrder.sort(key=lambda x: self.instantiations[x], reverse=True)
for termNode in tNodeOrder:
print(termNode)
print("\t", termNode.clauseRepresentation())
print("\t\t%s instantiations" % self.instantiations[termNode])
if closureSummary:
print(self.inferredFacts.serialize(
destination=stream, format='turtle'))
def parseN3Logic(self, src):
store = N3RuleStore(additionalBuiltins=self.ruleStore.filters)
Graph(store).parse(src, format='n3')
store._finalize()
assert len(store.rules), "There are no rules passed in."
from FuXi.Horn.HornRules import Ruleset
for rule in Ruleset(n3Rules=store.rules,
nsMapping=self.nsMap):
self.buildNetwork(iter(rule.formula.body),
iter(rule.formula.head),
rule)
self.rules.add(rule)
self.alphaNodes = [node for node in list(self.nodes.values()) if isinstance(node, AlphaNode)]
self.alphaBuiltInNodes = [node for node in list(self.nodes.values()) if isinstance(node, BuiltInAlphaNode)]
def __repr__(self):
total = 0
for node in list(self.nodes.values()):
if isinstance(node, BetaNode):
total += len(node.memories[LEFT_MEMORY])
total += len(node.memories[RIGHT_MEMORY])
return "<Network: %s rules, %s nodes, %s tokens in working memory, %s inferred tokens>" % (
len(self.terminalNodes), len(self.nodes), total, len(self.inferredFacts))
def closureGraph(self, sourceGraph, readOnly=True, store=None):
if readOnly:
if store is None and not sourceGraph:
store = Graph().store
store = store is None and sourceGraph.store or store
roGraph = ReadOnlyGraphAggregate([sourceGraph, self.inferredFacts],
store=store)
roGraph.namespace_manager = NamespaceManager(roGraph)
for srcGraph in [sourceGraph, self.inferredFacts]:
for prefix, uri in srcGraph.namespaces():
roGraph.namespace_manager.bind(prefix, uri)
return roGraph
else:
cg = ConjunctiveGraph()
cg += sourceGraph
cg += self.inferredFacts
return cg
def _setupDefaultRules(self):
"""
Checks every alpha node to see if it may match against a 'universal truth' (one w/out a LHS)
"""
for node in list(self.nodes.values()):
if isinstance(node, AlphaNode):
node.checkDefaultRule(self.universalTruths)
def clear(self):
self.nodes = {}
self.alphaPatternHash = {}
self.rules = set()
for alphaPattern in xcombine(('1', '0'), ('1', '0'), ('1', '0')):
self.alphaPatternHash[tuple(alphaPattern)] = {}
self.proofTracers = {}
self.terminalNodes = set()
self.justifications = {}
self._resetinstantiationStats()
self.workingMemory = set()
self.dischargedBindings = {}
def reset(self, newinferredFacts=None):
"Reset the network by emptying the memory associated with all Beta Nodes nodes"
for node in list(self.nodes.values()):
if isinstance(node, BetaNode):
node.memories[LEFT_MEMORY].reset()
node.memories[RIGHT_MEMORY].reset()
self.justifications = {}
self.proofTracers = {}
self.inferredFacts = newinferredFacts if newinferredFacts is not None else Graph()
self.workingMemory = set()
self._resetinstantiationStats()
def fireConsequent(self, tokens, termNode, debug=False):
"""
"In general, a p-node also contains a specifcation of what production it corresponds to | the
name of the production, its right-hand-side actions, etc. A p-node may also contain information
about the names of the variables that occur in the production. Note that variable names
are not mentioned in any of the Rete node data structures we describe in this chapter. This is
intentional |it enables nodes to be shared when two productions have conditions with the same
basic form, but with different variable names."
Takes a set of tokens and the terminal Beta node they came from
and fires the inferred statements using the patterns associated
with the terminal node. Statements that have been previously inferred
or already exist in the working memory are not asserted
"""
if debug:
print("%s from %s" % (tokens, termNode))
# newTokens = []
termNode.instanciatingTokens.add(tokens)
def iterCondition(condition):
if isinstance(condition, Exists):
return condition.formula
return isinstance(condition, SetOperator) and condition or iter([condition])
def extractVariables(term, existential=True):
if isinstance(term, existential and BNode or Variable):
yield term
elif isinstance(term, Uniterm):
for t in term.toRDFTuple():
if isinstance(t, existential and BNode or Variable):
yield t
#replace existentials in the head with new BNodes!
BNodeReplacement = {}
for rule in termNode.rules:
if isinstance(rule.formula.head, Exists):
for bN in rule.formula.head.declare:
if not isinstance(rule.formula.body, Exists) or \
bN not in rule.formula.body.declare:
BNodeReplacement[bN] = BNode()
for rhsTriple in termNode.consequent:
if BNodeReplacement:
rhsTriple = tuple([BNodeReplacement.get(term, term) for term in rhsTriple])
if debug:
if not tokens.bindings:
tokens._generateBindings()
key = tuple([None if isinstance(item, BNode) else item for item in rhsTriple])
override, executeFn = termNode.executeActions.get(key, (None, None))
if override:
#There is an execute action associated with this production
#that is attaced to the given consequent triple and
#is meant to perform all of the production duties
#(bypassing the inference of triples, etc.)
executeFn(termNode, None, tokens, None, debug)
else:
for inferredTriple, binding in _mulPatternWithSubstitutions(tokens, rhsTriple, termNode):
if [term for term in inferredTriple if isinstance(term, Variable)]:
#Unfullfilled bindings (skip non-ground head literals)
if executeFn:
#The indicated execute action is supposed to be triggered
#when the indicates RHS triple is inferred for the
#(even if it is not ground)
executeFn(termNode, inferredTriple, tokens, binding, debug)
continue
# if rhsTriple[1].find('subClassOf_derived')+1:import pdb;pdb.set_trace()
inferredToken = ReteToken(inferredTriple)
self.proofTracers.setdefault(inferredTriple, []).append(binding)
self.justifications.setdefault(inferredTriple, set()).add(termNode)
if termNode.filter and inferredTriple not in self.filteredFacts:
self.filteredFacts.add(inferredTriple)
if inferredTriple not in self.inferredFacts and inferredToken not in self.workingMemory:
# if (rhsTriple == (Variable('A'), RDFS.RDFSNS['subClassOf_derived'], Variable('B'))):
# import pdb;pdb.set_trace()
if debug:
print("Inferred triple: ", inferredTriple, " from ", termNode.clauseRepresentation())
inferredToken.debug = True
self.inferredFacts.add(inferredTriple)
self.addWME(inferredToken)
currIdx = self.instantiations.get(termNode, 0)
currIdx += 1
self.instantiations[termNode] = currIdx
if executeFn:
#The indicated execute action is supposed to be triggered
#when the indicates RHS triple is inferred for the
#first time
executeFn(termNode, inferredTriple, tokens, binding, debug)
if self.goal is not None and self.goal in self.inferredFacts:
raise InferredGoal("Proved goal " + repr(self.goal))
else:
if debug:
print("Inferred triple skipped: ", inferredTriple)
if executeFn:
#The indicated execute action is supposed to be triggered
#when the indicates RHS triple is inferred for the
#first time
executeFn(termNode, inferredTriple, tokens, binding, debug)
def addWME(self, wme):
"""
procedure add-wme (w: WME) exhaustive hash table versiong
let v1, v2, and v3 be the symbols in the three fields of w
alpha-mem = lookup-in-hash-table (v1, v2, v3)
if alpha-mem then alpha-memory-activation (alpha-mem, w)
alpha-mem = lookup-in-hash-table (v1, v2, *)
if alpha-mem then alpha-memory-activation (alpha-mem, w)
alpha-mem = lookup-in-hash-table (v1, *, v3)
if alpha-mem then alpha-memory-activation (alpha-mem, w)
...
alpha-mem = lookup-in-hash-table (*, *, *)
if alpha-mem then alpha-memory-activation (alpha-mem, w)
end
"""
# print(wme.asTuple())
for termComb, termDict in iteritems(self.alphaPatternHash):
for alphaNode in termDict.get(wme.alphaNetworkHash(termComb), []):
# print("\t## Activated AlphaNode ##")
# print("\t\t", termComb, wme.alphaNetworkHash(termComb))
# print("\t\t", alphaNode)
alphaNode.activate(wme.unboundCopy())
def feedFactsToAdd(self, tokenIterator):
"""
Feeds the network an iterator of facts / tokens which are fed to the alpha nodes
which propagate the matching process through the network
"""
for token in tokenIterator:
self.workingMemory.add(token)
# print(token.unboundCopy().bindingDict)
self.addWME(token)
def _findPatterns(self, patternList):
rt = []
for betaNodePattern, alphaNodePatterns in \
[(patternList.__getslice__(0, -i), patternList.__getslice__(-i, len(patternList))) for i in range(1, len(patternList))]:
# [(patternList[:-i], patternList[-i:]) for i in xrange(1, len(patternList))]:
assert isinstance(betaNodePattern, HashablePatternList)
assert isinstance(alphaNodePatterns, HashablePatternList)
if betaNodePattern in self.nodes:
rt.append(betaNodePattern)
rt.extend([HashablePatternList([aPattern]) for aPattern in alphaNodePatterns])
return rt
for alphaNodePattern in patternList:
rt.append(HashablePatternList([alphaNodePattern]))
return rt
def createAlphaNode(self, currentPattern):
"""
"""
if isinstance(currentPattern, N3Builtin):
node = BuiltInAlphaNode(currentPattern)
else:
node = AlphaNode(currentPattern, self.ruleStore.filters)
self.alphaPatternHash[node.alphaNetworkHash()].setdefault(node.alphaNetworkHash(groundTermHash=True), []).append(node)
if not isinstance(node, BuiltInAlphaNode) and node.builtin:
s, p, o = currentPattern
node = BuiltInAlphaNode(N3Builtin(p, self.ruleStore.filters[p](s, o), s, o))
return node
def _resetinstantiationStats(self):
self.instantiations = dict([(tNode, 0) for tNode in self.terminalNodes])
def checkDuplicateRules(self):
checkedClauses = {}
for tNode in self.terminalNodes:
for rule in tNode.rules:
collision = checkedClauses.get(rule.formula)
assert collision is None, "%s collides with %s" % (
tNode, checkedClauses[rule.formula])
checkedClauses.setdefault(tNode.rule.formula, []).append(tNode)
def registerReteAction(self, headTriple, override, executeFn):
"""
Register the given execute function for any rule with the
given head using the override argument to determine whether or
not the action completely handles the firing of the rule.
The signature of the execute action is as follows:
def someExecuteAction(tNode, inferredTriple, token, binding):
.. pass ..
"""
for tNode in self.terminalNodes:
for rule in tNode.rules:
if not isinstance(rule.formula.head, (Exists, Uniterm)):
continue
headTriple = GetUterm(rule.formula.head).toRDFTuple()
headTriple = tuple(
[None if isinstance(item, BNode) else item for item in headTriple])
tNode.executeActions[headTriple] = (override, executeFn)
def buildNetwork(self, lhsIterator, rhsIterator, rule, aFilter=False):
"""
Takes an iterator of triples in the LHS of an N3 rule and an iterator of the RHS and extends
the Rete network, building / reusing Alpha
and Beta nodes along the way (via a dictionary mapping of patterns to the built nodes)
"""
matchedPatterns = HashablePatternList()
attachedPatterns = []
# hasBuiltin = False
LHS = []
while True:
try:
currentPattern = next(lhsIterator) if py3compat.PY3 else lhsIterator.next()
#The LHS isn't done yet, stow away the current pattern
#We need to convert the Uniterm into a triple
if isinstance(currentPattern, Uniterm):
currentPattern = currentPattern.toRDFTuple()
LHS.append(currentPattern)
except StopIteration:
#The LHS is done, need to initiate second pass to recursively build join / beta
#nodes towards a terminal node
#We need to convert the Uniterm into a triple
consequents = [isinstance(fact, Uniterm) and fact.toRDFTuple() or fact for fact in rhsIterator]
if matchedPatterns and matchedPatterns in self.nodes:
attachedPatterns.append(matchedPatterns)
elif matchedPatterns:
rt = self._findPatterns(matchedPatterns)
attachedPatterns.extend(rt)
if len(attachedPatterns) == 1:
node = self.nodes[attachedPatterns[0]]
if isinstance(node, BetaNode):
terminalNode = node
else:
paddedLHSPattern = HashablePatternList([None]) + attachedPatterns[0]
terminalNode = self.nodes.get(paddedLHSPattern)
if terminalNode is None:
#New terminal node
terminalNode = BetaNode(None, node, aPassThru=True)
self.nodes[paddedLHSPattern] = terminalNode
node.connectToBetaNode(terminalNode, RIGHT_MEMORY)
terminalNode.consequent.update(consequents)
terminalNode.rules.add(rule)
terminalNode.antecedent = rule.formula.body
terminalNode.network = self
terminalNode.headAtoms.update(rule.formula.head)
terminalNode.filter = aFilter
self.terminalNodes.add(terminalNode)
else:
moveToEnd = []
# endIdx = len(attachedPatterns) - 1
finalPatternList = []
for idx, pattern in enumerate(attachedPatterns):
assert isinstance(pattern, HashablePatternList), repr(pattern)
currNode = self.nodes[pattern]
if (isinstance(currNode, BuiltInAlphaNode) or
isinstance(currNode, BetaNode) and currNode.fedByBuiltin):
moveToEnd.append(pattern)
else:
finalPatternList.append(pattern)
terminalNode = self.attachBetaNodes(chain(finalPatternList, moveToEnd))
terminalNode.consequent.update(consequents)
terminalNode.rules.add(rule)
terminalNode.antecedent = rule.formula.body
terminalNode.network = self
terminalNode.headAtoms.update(rule.formula.head)
terminalNode.filter = aFilter
self.terminalNodes.add(terminalNode)
self._resetinstantiationStats()
#self.checkDuplicateRules()
return terminalNode
if HashablePatternList([currentPattern]) in self.nodes:
#Current pattern matches an existing alpha node
matchedPatterns.append(currentPattern)
elif matchedPatterns in self.nodes:
#preceding patterns match an existing join/beta node
newNode = self.createAlphaNode(currentPattern)
if len(matchedPatterns) == 1 \
and HashablePatternList([None]) + matchedPatterns in self.nodes:
existingNode = self.nodes[HashablePatternList([None]) + matchedPatterns]
newBetaNode = BetaNode(existingNode, newNode)
self.nodes[HashablePatternList([None]) + \
matchedPatterns + \
HashablePatternList([currentPattern])] = newBetaNode
matchedPatterns = HashablePatternList([None]) + \
matchedPatterns + \
HashablePatternList([currentPattern])
else:
existingNode = self.nodes[matchedPatterns]
newBetaNode = BetaNode(existingNode, newNode)
self.nodes[matchedPatterns + \
HashablePatternList([currentPattern])] = newBetaNode
matchedPatterns.append(currentPattern)
self.nodes[HashablePatternList([currentPattern])] = newNode
newBetaNode.connectIncomingNodes(existingNode, newNode)
#Extend the match list with the current pattern and add it
#to the list of attached patterns for the second pass
attachedPatterns.append(matchedPatterns)
matchedPatterns = HashablePatternList()
else:
#The current pattern is not in the network and the match list isn't
#either. Add an alpha node
newNode = self.createAlphaNode(currentPattern)
self.nodes[HashablePatternList([currentPattern])] = newNode
#Add to list of attached patterns for the second pass
attachedPatterns.append(HashablePatternList([currentPattern]))
def attachBetaNodes(self, patternIterator, lastBetaNodePattern=None):
"""
The second 'pass' in the Rete network compilation algorithm:
Attaches Beta nodes to the alpha nodes associated with all the patterns
in a rule's LHS recursively towards a 'root' Beta node - the terminal node
for the rule. This root / terminal node is returned
"""
try:
nextPattern = next(patternIterator) if py3compat.PY3 else patternIterator.next()
except StopIteration:
assert lastBetaNodePattern
if lastBetaNodePattern:
return self.nodes[lastBetaNodePattern]
else:
assert len(self.universalTruths), "should be empty LHSs"
terminalNode = BetaNode(None, None, aPassThru=True)
self.nodes[HashablePatternList([None])] = terminalNode
return terminalNode # raise Exception("Ehh. Why are we here?")
if lastBetaNodePattern:
firstNode = self.nodes[lastBetaNodePattern]
secondNode = self.nodes[nextPattern]
newBNodePattern = lastBetaNodePattern + nextPattern
newBetaNode = BetaNode(firstNode, secondNode)
self.nodes[newBNodePattern] = newBetaNode
else:
firstNode = self.nodes[nextPattern]
oldAnchor = self.nodes.get(HashablePatternList([None]) + nextPattern)
if not oldAnchor:
if isinstance(firstNode, AlphaNode):
newfirstNode = BetaNode(None, firstNode, aPassThru=True)
newfirstNode.connectIncomingNodes(None, firstNode)
self.nodes[HashablePatternList([None]) + nextPattern] = newfirstNode
else:
newfirstNode = firstNode
else:
newfirstNode = oldAnchor
firstNode = newfirstNode
secondPattern = next(patternIterator) if py3compat.PY3 else patternIterator.next()
secondNode = self.nodes[secondPattern]
newBetaNode = BetaNode(firstNode, secondNode)
newBNodePattern = HashablePatternList([None]) + nextPattern + secondPattern
self.nodes[newBNodePattern] = newBetaNode
newBetaNode.connectIncomingNodes(firstNode, secondNode)
return self.attachBetaNodes(patternIterator, newBNodePattern)
def ComplementExpand(tBoxGraph, complementAnnotation):
complementExpanded = []
for negativeClass in tBoxGraph.subjects(predicate=OWL_NS.complementOf):
containingList = first(tBoxGraph.subjects(RDF.first, negativeClass))
prevLink = None
while containingList:
prevLink = containingList
containingList = first(tBoxGraph.subjects(RDF.rest, containingList))
if prevLink:
for s, p, o in tBoxGraph.triples_choices((None,
[OWL_NS.intersectionOf,
OWL_NS.unionOf],
prevLink)):
if (s, complementAnnotation, None) in tBoxGraph:
continue
_class = Class(s)
complementExpanded.append(s)
print("Added %s to complement expansion" % _class)
ComplementExpansion(_class)
def test():
import doctest
doctest.testmod()
if __name__ == '__main__':
test()
# from FuXi.Rete.Network import iteritems
# from FuXi.Rete.Network import any
# from FuXi.Rete.Network import ComplementExpand
# from FuXi.Rete.Network import HashablePatternList
# from FuXi.Rete.Network import InferredGoal
# from FuXi.Rete.Network import ReteNetwork
| 44.182692
| 132
| 0.58363
| 4,019
| 41,355
| 5.969644
| 0.181637
| 0.014255
| 0.012254
| 0.005835
| 0.244498
| 0.197524
| 0.176309
| 0.164805
| 0.160137
| 0.155802
| 0
| 0.004033
| 0.334421
| 41,355
| 935
| 133
| 44.229947
| 0.867611
| 0.194487
| 0
| 0.289121
| 0
| 0.00149
| 0.024595
| 0.001346
| 0
| 0
| 0
| 0
| 0.020864
| 1
| 0.065574
| false
| 0.007452
| 0.04769
| 0.010432
| 0.165425
| 0.020864
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dcbed133eb3a7b3cdb7874d0063d3eca6ce4f69
| 792
|
py
|
Python
|
flask_sqlalchemy/app.py
|
andreeaionescu/graphql-example
|
ceeff3888ea87312d4df138093d7f6fcaa1ae973
|
[
"MIT"
] | null | null | null |
flask_sqlalchemy/app.py
|
andreeaionescu/graphql-example
|
ceeff3888ea87312d4df138093d7f6fcaa1ae973
|
[
"MIT"
] | null | null | null |
flask_sqlalchemy/app.py
|
andreeaionescu/graphql-example
|
ceeff3888ea87312d4df138093d7f6fcaa1ae973
|
[
"MIT"
] | null | null | null |
'''
Unlike a RESTful API, there is only a single URL from which GraphQL is accessed.
We are going to use Flask to create a server that expose the GraphQL schema under /graphql and a interface for querying
it easily: GraphiQL (also under /graphql when accessed by a browser).
'''
from flask import Flask
from flask_graphql import GraphQLView
from flask_sqlalchemy.models import db_session
from flask_sqlalchemy.schema import schema, Department
app = Flask(__name__)
app.debug = True
app.add_url_rule(
'/graphql',
view_func=GraphQLView.as_view(
'graphql',
schema=schema,
graphiql=True # for having the GraphiQL interface
)
)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
if __name__ == '__main__':
app.run()
| 26.4
| 119
| 0.744949
| 113
| 792
| 5.017699
| 0.566372
| 0.063492
| 0.067019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183081
| 792
| 30
| 120
| 26.4
| 0.876352
| 0.386364
| 0
| 0
| 0
| 0
| 0.048017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.210526
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dcf6d8a2796f5c3d07f258930f33ef3e7528467
| 4,021
|
py
|
Python
|
src/skansensor/datacollector.py
|
fadykuzman/Rodeo-App
|
2972b371ed38fad4f93e6afcb699b51cec865510
|
[
"BSD-3-Clause"
] | null | null | null |
src/skansensor/datacollector.py
|
fadykuzman/Rodeo-App
|
2972b371ed38fad4f93e6afcb699b51cec865510
|
[
"BSD-3-Clause"
] | null | null | null |
src/skansensor/datacollector.py
|
fadykuzman/Rodeo-App
|
2972b371ed38fad4f93e6afcb699b51cec865510
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This class searches for data in a hierarchy of folders and sorts
them in a list.
Attributes to the data are:
path: path to the raw data file
type: whether from Picarro, DropSense sensors.
data: the data set after reading with the modules:
read_dropsense
read_picarro
Please refer to the documentation of the above data
reading modules to know the data structure of the
resultant datasets
"""
import os
from collections import namedtuple
import skansensor.skansensor as ss
class DataCollector:
dir_list = []
def __init__(self):
pass
def get_files(self, path = '.'):
"""
Loops through a folder hierarchy from a given path.
If no path is given, it searches through the current
directory.
The method returns a namedtuple of:
path, kind of file/folder, and if a file, the extension
Parameters:
-------------
path: path to the parent directory to loop through.
Default is current directory
returns:
-------------
dir_list: a list of all files or folders as a namedtuple.
Attributes of the namedtuple are:
path: path to file or folder
whatis: dir or data
whatext: what extension the data file has.
(only dropsense and picarro)
"""
# Opens an instance of a directory
with os.scandir(path) as it:
# loop through all items in a directory
for entry in it:
cat = namedtuple('cat', ['path', 'whatis', 'whatext'])
if entry.is_dir():
cat.path = entry.path
cat.whatis = 'dir'
self.dir_list.append(cat)
self.get_files(cat.path)
else:
filename, fileext = os.path.splitext(entry.path)
if (
(fileext == '.mta')
or (fileext == '.mtc')
or (fileext == '.mtzc')
):
cat.path = entry.path
cat.whatis = 'data'
cat.whatext = fileext
self.dir_list.append(cat)
elif fileext == '.dat':
cat.path = entry.path
cat.whatis = 'data'
cat.whatext = fileext
self.dir_list.append(cat)
else:
pass
return self.dir_list
def collect(self, dir_list):
"""
Parameters:
------------
dir_list: the list of files and folders.
Expected the result of the method get_files()
returns:
------------
data_list: a list of dictionaries that contain data read
from data files.
Refer to 'skansensor' module for the data structure.
"""
datalist = []
for a in dir_list:
if a.whatis == 'data':
if (a.whatext == '.mta') or (a.whatext == '.mtc') or (a.whatext == '.mtzc'):
d = {
'path' : a.path,
'type' : 'dropsense',
'data' : ss.read_dropsense(a.path)
}
elif a.whatext == '.dat':
d = {
'path' : a.path,
'type' : 'picarro',
'data' : ss.read_picarro(a.path)
}
if d not in datalist:
datalist.append(d)
return datalist
| 36.225225
| 92
| 0.437453
| 391
| 4,021
| 4.442455
| 0.278772
| 0.036269
| 0.031664
| 0.027634
| 0.11802
| 0.090386
| 0.075993
| 0.075993
| 0.075993
| 0.075993
| 0
| 0
| 0.48197
| 4,021
| 110
| 93
| 36.554545
| 0.833893
| 0.361104
| 0
| 0.333333
| 0
| 0
| 0.049887
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.037037
| 0.055556
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dd2c58e17cfa913515f063e288c4ff6d601590c
| 875
|
py
|
Python
|
pybuildtool/core/context.py
|
dozymoe/PyBuildTool
|
d938a8d6335b801e102159e82a6e0002dfaa1b1a
|
[
"MIT"
] | 5
|
2017-02-10T07:54:49.000Z
|
2017-07-11T09:14:26.000Z
|
pybuildtool/core/context.py
|
dozymoe/PyBuildTool
|
d938a8d6335b801e102159e82a6e0002dfaa1b1a
|
[
"MIT"
] | null | null | null |
pybuildtool/core/context.py
|
dozymoe/PyBuildTool
|
d938a8d6335b801e102159e82a6e0002dfaa1b1a
|
[
"MIT"
] | 1
|
2017-05-21T20:35:10.000Z
|
2017-05-21T20:35:10.000Z
|
import os
from waflib import Context, Errors # pylint:disable=import-error
class WatchContext(Context.Context):
cmd = 'watch'
fun = 'watch'
variant = ''
def __init__(self, **kw):
super().__init__(**kw)
self.top_dir = kw.get('top_dir', Context.top_dir)
self.out_dir = kw.get('out_dir', Context.out_dir)
if not(os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)):
raise Errors.WafError('The project was not configured: ' +\
'run "waf configure" first!')
self.path = self.srcnode = self.root.find_dir(self.top_dir)
self.bldnode = self.root.make_node(self.variant_dir)
def get_variant_dir(self):
if not self.variant:
return self.out_dir
return os.path.join(self.out_dir, self.variant)
variant_dir = property(get_variant_dir, None)
| 31.25
| 76
| 0.637714
| 123
| 875
| 4.317073
| 0.398374
| 0.067797
| 0.07533
| 0.056497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238857
| 875
| 27
| 77
| 32.407407
| 0.797297
| 0.030857
| 0
| 0
| 0
| 0
| 0.096927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.55
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dd5f7a4941e24796fe51eb9276d1b79188a16ea
| 15,499
|
py
|
Python
|
au/fixtures/dataset.py
|
pwais/au2018
|
edd224e5fb649b9f0095ffad39b94f72f73e4853
|
[
"Apache-2.0"
] | null | null | null |
au/fixtures/dataset.py
|
pwais/au2018
|
edd224e5fb649b9f0095ffad39b94f72f73e4853
|
[
"Apache-2.0"
] | 3
|
2019-01-05T22:43:37.000Z
|
2019-01-26T05:45:01.000Z
|
au/fixtures/dataset.py
|
pwais/au2018
|
edd224e5fb649b9f0095ffad39b94f72f73e4853
|
[
"Apache-2.0"
] | 1
|
2020-05-03T21:10:03.000Z
|
2020-05-03T21:10:03.000Z
|
import io
import os
from collections import OrderedDict
import imageio
import numpy as np
from au import conf
from au.util import create_log
from au import util
##
## Images
##
class ImageRow(object):
"""For expected usage, see `test_imagerow_demo`"""
# NB: While pyspark uses cloudpickle for *user code*, it uses normal
# pickle for *data*, so the contents of ImageRow instances must be
# pickle-able. I.e. attributes cannot be free functions, since only
# cloudpickle can serialize functions. FMI:
# http://apache-spark-user-list.1001560.n3.nabble.com/pyspark-serializer-can-t-handle-functions-td7650.html
# https://github.com/apache/spark/blob/c3c45cbd76d91d591d98cf8411fcfd30079f5969/python/pyspark/worker.py#L50
# https://github.com/apache/spark/blob/c3c45cbd76d91d591d98cf8411fcfd30079f5969/python/pyspark/worker.py#L359
__slots__ = (
'dataset',
'split',
'uri',
'_image_bytes', # NB: see property image_bytes
'_cached_image_arr', # TODO: use __ for privates .. idk
'_cached_image_fobj',
'_arr_factory', # NB: must be a callable *object*; see above
'label',
'attrs',
# '_label_bytes', # NB: see property label and label_bytes
# '_cached_label',
# '_cached_label_arr',
# '_cached_label_fobj',
)
DEFAULT_PQ_PARTITION_COLS = ['dataset', 'split']
# NB: must be a list and not a tuple due to pyarrow c++ api
# Old pickle API requires __{get,set}state__ for classes that define
# __slots__. Some part of Spark uses this API for serializatio, so we
# provide an impl.
def __getstate__(self):
return {'as_tuple': self.astuple()}
def __setstate__(self, d):
for k, v in zip(self.__slots__, d['as_tuple']):
setattr(self, k, v)
# self._image_bytes = d.get('image_bytes', self._image_bytes)
def __init__(self, **kwargs):
for k in self.__slots__:
setattr(self, k, kwargs.get(k, ''))
if ('_image_bytes' not in kwargs and
kwargs.get('image_bytes', '') is not ''):
self._image_bytes = kwargs['image_bytes']
# if ('_label_bytes' not in kwargs and
# kwargs.get('label_bytes', '') is not ''):
# self._label_bytes = kwargs['label_bytes']
def astuple(self):
return tuple(getattr(self, k) for k in self.__slots__)
def __lt__(self, other):
# Important! Otherwise Python might break ties in unexpected ways
return self.astuple() < other.astuple()
@staticmethod
def from_np_img_labels(np_img, label='', **kwargs):
row = ImageRow(**kwargs)
row._cached_image_arr = np_img
row.label = label
return row
@staticmethod
def wrap_factory(np_img_factory, **kwargs):
row = ImageRow(**kwargs)
row._arr_factory = np_img_factory
return row
@staticmethod
def from_path(path, **kwargs):
# NB: The ImageRow instance will be a flyweight for the image data
row = ImageRow(uri=path, **kwargs)
row._cached_image_fobj = open(path, 'rb')
return row
def to_dict(self):
attrs = []
for k in self.__slots__:
if not k.startswith('_'):
# pyarrow + python 2.7 -> str gets interpreted as binary
# https://stackoverflow.com/a/49507268
# Can skip for python3 ...
v = getattr(self, k)
if isinstance(v, basestring):
v = unicode(v.encode('utf-8'))
attrs.append((k, v))
elif k == '_image_bytes':
attrs.append(('image_bytes', bytearray(self.image_bytes)))
# NB: must be bytearray to support parquet / pyspark type inference
# elif k == '_label_bytes':
# attrs.append(('label_bytes', self.label_bytes))
return OrderedDict(attrs)
def as_numpy(self):
if self._cached_image_arr is '':
if self._arr_factory is not '':
self._cached_image_arr = self._arr_factory()
else:
image_bytes = self.image_bytes
if image_bytes is '':
# Can't make an array
return np.array([])
self._cached_image_arr = imageio.imread(io.BytesIO(image_bytes))
return self._cached_image_arr
@property
def image_bytes(self):
if self._image_bytes is '':
# Read lazily
if self._arr_factory is not '' and self._cached_image_arr is '':
self._cached_image_arr = self._arr_factory()
if self._cached_image_arr is not '':
buf = io.BytesIO()
imageio.imwrite(buf, self._cached_image_arr, format='png')
self._image_bytes = buf.getvalue()
elif self._cached_image_fobj is not '':
self._image_bytes = self._cached_image_fobj.read()
self._cached_image_fobj = ''
return self._image_bytes
# @property
# def label_bytes(self):
# if self._label_bytes is '':
# # Read lazily
# if self._cached_label_arr is not '':
# buf = io.BytesIO()
# imageio.imwrite(buf, self._cached_image_arr, format='png')
# self._image_bytes = buf.getvalue()
# elif self._cached_image_fobj is not '':
# self._image_bytes = self._cached_image_fobj.read()
# self._cached_image_fobj = ''
# return self._image_bytes
#
# @property
# def label(self):
# if self._cached_label is '':
# if self.label_encoding == 'json':
#
#
# if self._label is '':
# # Read lazily
# if self._cached_label_arr is not '':
# buf = io.BytesIO()
# imageio.imwrite(buf, self._cached_label_arr, format='png')
# self._label_bytes = buf.getvalue()
# elif self._cached_label_fobj is not '':
# self._label_bytes = self._cached_label_fobj.read()
# self._cached_label_fobj = ''
# return self._label_bytes
def fname(self):
has_fnamable_label = (
self.label is not '' and
isinstance(self.label, (basestring, int, float)))
toks = (
self.dataset,
self.split,
'label_%s' % str(self.label).replace(' ', '-') if has_fnamable_label else '',
self.uri.split('/')[-1] if self.uri else '',
)
fname = '-'.join(str(tok) for tok in toks if tok) + '.png'
return fname
def to_debug(self, fname=''):
"""Convenience for dumping an image to a place on disk where the user can
view locally (e.g. using Apple Finder file preview, Ubuntu
image browser, an nginx instance pointed at the folder, etc).
FMI see conf.AU_CACHE_TMP
"""
if self.image_bytes == '':
return None
dest = os.path.join(conf.AU_CACHE_TMP, self.fname())
util.mkdir(conf.AU_CACHE_TMP)
with open(dest, 'wb') as f:
f.write(self.image_bytes)
return dest
@staticmethod
def rows_from_images_dir(img_dir, pattern='*', **kwargs):
import pathlib2 as pathlib
log = create_log()
log.info("Reading images from dir %s ..." % img_dir)
paths = pathlib.Path(img_dir).glob(pattern)
n = 0
for path in paths:
path = str(path) # pathlib uses PosixPath thingies ...
yield ImageRow.from_path(path, **kwargs)
n += 1
if (n % 100) == 0:
log.info("... read %s paths ..." % n)
log.info("... read %s total paths." % n)
@staticmethod
def from_pandas(df, **kwargs):
for row in df.to_dict(orient='records'):
row.update(**kwargs)
yield ImageRow(**row)
@staticmethod
def write_to_parquet(
rows,
dest_dir,
rows_per_file=-1,
partition_cols=DEFAULT_PQ_PARTITION_COLS,
compression='lz4',
spark=None):
is_rdd, is_pyspark_df = False, False
try:
import pyspark.rdd
import pyspark.sql
is_rdd = isinstance(rows, pyspark.rdd.RDD)
is_pyspark_df = isinstance(rows, pyspark.sql.dataframe.DataFrame)
if is_pyspark_df:
df = rows
except ImportError:
pass
if is_rdd:
assert spark is not None
from pyspark.sql import Row
# RDD[ImageRow] -> DataFrame[ImageRow]
rows_rdd = rows.map(lambda r: Row(**r.to_dict()))
df = spark.createDataFrame(rows_rdd)
is_pyspark_df = True
if is_pyspark_df:
util.log.info("Writing parquet to %s ..." % dest_dir)
df.printSchema() # NB: can't .show() b/c of binary data
df.write.parquet(
dest_dir,
mode='append',
partitionBy=partition_cols,
compression=compression)
util.log.info("... done! Wrote to %s ." % dest_dir)
else:
# Use Pyarrow to write Parquet in this process
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
log = create_log()
if rows_per_file >= 1:
irows = util.ichunked(rows, rows_per_file)
else:
rows = list(rows)
if not rows:
return
irows = iter([rows])
util.log.info("Writing parquet to %s ..." % dest_dir)
for row_chunk in irows:
r = row_chunk[0]
# Pandas wants dicts
if isinstance(r, ImageRow):
row_chunk = [r.to_dict() for r in row_chunk]
df = pd.DataFrame(row_chunk)
table = pa.Table.from_pandas(df)
util.mkdir(dest_dir)
pq.write_to_dataset(
table,
dest_dir,
partition_cols=partition_cols,
preserve_index=False, # Don't care about pandas index
compression='snappy',
# NB: pyarrow lz4 is totes broken https://github.com/apache/arrow/issues/3491
flavor='spark')
util.log.info("... wrote %s rows ..." % len(row_chunk))
util.log.info("... done writing to %s ." % dest_dir)
@staticmethod
def write_to_pngs(rows, dest_root=None):
dest_root = dest_root or conf.AU_DATA_CACHE
util.log.info("Writing PNGs to %s ..." % dest_root)
n = 0
for row in rows:
dest_dir = os.path.join(
dest_root,
row.dataset or 'default_dataset',
row.split or 'default_split')
util.mkdir(dest_dir)
fname = row.fname()
dest = os.path.join(dest_dir, fname)
with open(dest, 'wb') as f:
f.write(row.image_bytes)
n += 1
if n % 100 == 0:
util.log.info("... write %s PNGs ..." % n)
util.log.info("... wrote %s total PNGs to %s ." % (n, dest_root))
## Ops & Utils
import cv2
def _make_have_target_chan(img, nchan):
shape = img.shape
if len(shape) == 2:
img = np.expand_dims(img, axis=-1)
elif len(shape) != 3:
raise ValueError("Hmm input image has shape: %s" % (shape,))
shape = img.shape
if shape[-1] == nchan:
return img
elif nchan == 1:
if len(shape) == 3 and shape[-1] == 3:
# Make the image greyscale
img2 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return np.expand_dims(img2, axis=-1)
else:
raise ValueError("TODO input image has != 3 chan %s" % (shape,))
elif nchan == 3:
if len(shape) == 3 and shape[-1] == 1:
# Repeate the grey channel to create an RGB image
# (or BGR or who knows)
img = np.squeeze(img, axis=-1)
return np.stack([img, img, img], axis=-1)
else:
raise ValueError("TODO input image has != 1 chan %s" % (shape,))
else:
raise ValueError("TODO idk yet %s %s" % (nchan, shape,))
class FillNormalized(object):
def __init__(self, target_hw=None, target_nchan=None, norm_func=None):
self.norm_func = norm_func
self.target_hw = target_hw
self.target_nchan = target_nchan
self.thruput = util.ThruputObserver(
name='FillNormalized',
log_on_del=True)
def __call__(self, row):
self.thruput.start_block()
normalized = row.as_numpy()
bytes_in = normalized.nbytes
if self.target_hw is not None:
h, w = self.target_hw
normalized = cv2.resize(normalized, (w, h)) # Sneaky, opencv!
if self.target_nchan is not None:
normalized = _make_have_target_chan(normalized, self.target_nchan)
if self.norm_func is not None:
normalized = self.norm_func(normalized)
row.attrs = row.attrs or {}
row.attrs.update({
'normalized': normalized,
})
self.thruput.stop_block(n=1, num_bytes=bytes_in)
return row
##
## Tables of images
##
class ImageTable(object):
"""A (partitioned Parquet) table of images (perhaps use one table
per dataset / label type)."""
TABLE_NAME = 'default'
ROWS_PER_FILE = 100
@classmethod
def setup(cls, spark=None):
"""Subclasses should override to create a dataset from scratch
(e.g. download images, create a table, etc). The base class
is just a bunch of images from ImageNet.
"""
if os.path.exists(cls.table_root()):
util.log.info(
"Skipping setup for %s, %s exists." % (
cls.TABLE_NAME, cls.table_root()))
return
rows = ImageRow.rows_from_images_dir(
conf.AU_IMAGENET_SAMPLE_IMGS_DIR,
dataset=cls.TABLE_NAME,
split='__default')
rows = list(rows)
import json
with open(conf.AU_IMAGENET_SAMPLE_LABELS_PATH, 'rb') as f:
fname_to_label = json.load(f)
for row in rows:
fname = row.uri.split('/')[-1]
row.label = fname_to_label[fname]
cls.save_to_image_table(rows)
@classmethod
def table_root(cls):
return os.path.join(conf.AU_TABLE_CACHE, cls.TABLE_NAME)
@classmethod
def save_to_image_table(cls, rows):
dest = os.path.join(conf.AU_TABLE_CACHE, cls.TABLE_NAME)
if not os.path.exists(dest):
return ImageRow.write_to_parquet(
rows,
cls.table_root(),
rows_per_file=cls.ROWS_PER_FILE)
@classmethod
def get_rows_by_uris(cls, uris):
import pandas as pd
import pyarrow.parquet as pq
pa_table = pq.read_table(cls.table_root())
df = pa_table.to_pandas()
matching = df[df.uri.isin(uris)]
return list(ImageRow.from_pandas(matching))
@classmethod
def iter_all_rows(cls):
"""Convenience method (mainly for testing) using Pandas"""
import pandas as pd
import pyarrow.parquet as pq
pa_table = pq.read_table(cls.table_root())
df = pa_table.to_pandas()
for row in ImageRow.from_pandas(df):
yield row
@classmethod
def as_imagerow_rdd(cls, spark):
df = spark.read.parquet(cls.table_root())
row_rdd = df.rdd.map(lambda row: ImageRow(**row.asDict()))
return row_rdd
# @classmethod
# def show_stats(cls, spark=None):
#
# @staticmethod
# def write_tf_dataset_to_parquet(
# dataset,
# dest_dir,
#
"""
make a dataset for 1-channel mnist things
make a dataset for our handful of images
try to coerce dataset from mscoco
make one for bbd100k
record activations for mnist
then for mobilenet on bdd100k / mscoco
take note of deeplab inference: https://colab.research.google.com/github/tensorflow/models/blob/master/research/deeplab/deeplab_demo.ipynb#scrollTo=edGukUHXyymr
and we'll wanna add maskrcnn mebbe ?
SPARK_LOCAL_IP=127.0.0.1 $SPARK_HOME/bin/pyspark --packages databricks:tensorframes:0.5.0-s_2.11 --packages databricks:spark-deep-learning:1.2.0-spark2.3-s_2.11
class DatasetFactoryBase(object):
class ParamsBase(object):
def __init__(self):
self.BASE_DIR = ''
@classmethod
def create_dataset(cls):
pass
@classmethod
def get_ctx_for_entry(cls, entry_id):
pass
"""
| 28.595941
| 160
| 0.623653
| 2,107
| 15,499
| 4.375415
| 0.21215
| 0.029287
| 0.024406
| 0.017572
| 0.194707
| 0.164335
| 0.142206
| 0.124851
| 0.119861
| 0.103373
| 0
| 0.013811
| 0.261888
| 15,499
| 542
| 161
| 28.595941
| 0.792045
| 0.24079
| 0
| 0.213592
| 0
| 0
| 0.066036
| 0
| 0
| 0
| 0
| 0.001845
| 0.003236
| 1
| 0.084142
| false
| 0.003236
| 0.071197
| 0.012945
| 0.252427
| 0.003236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dd85933e9edf9c201a35fe12dd563f0c97ddb8b
| 417
|
py
|
Python
|
Python Fundamentals/Regular Expressions/More Exercises/Task05.py
|
DonikaChervenkova/SoftUni
|
bff579c037ec48f39ed193b34bc3502a32e90732
|
[
"MIT"
] | 1
|
2022-03-16T10:23:04.000Z
|
2022-03-16T10:23:04.000Z
|
Python Fundamentals/Regular Expressions/More Exercise/Task05.py
|
IvanTodorovBG/SoftUni
|
7b667f6905d9f695ab1484efbb02b6715f6d569e
|
[
"MIT"
] | null | null | null |
Python Fundamentals/Regular Expressions/More Exercise/Task05.py
|
IvanTodorovBG/SoftUni
|
7b667f6905d9f695ab1484efbb02b6715f6d569e
|
[
"MIT"
] | 1
|
2021-12-04T12:30:57.000Z
|
2021-12-04T12:30:57.000Z
|
import re
title_regex = r'<title>([^<>]*)<\/title>'
info = input()
title = re.findall(title_regex, info)
title = ''.join(title)
print(f"Title: {title}")
body_regex = r'<body>.*<\/body>'
body = re.findall(body_regex, info)
body = ''.join(body)
content_regex = r">([^><]*)<"
content = re.findall(content_regex, body)
content = ''.join(content)
content = content.replace('\\n', '')
print(f'Content: {content}')
| 17.375
| 41
| 0.630695
| 56
| 417
| 4.589286
| 0.267857
| 0.070039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1247
| 417
| 24
| 42
| 17.375
| 0.70411
| 0
| 0
| 0
| 0
| 0
| 0.203349
| 0.057416
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ddb9acc38ebe942d14b28143c5d4ded77045159
| 320
|
py
|
Python
|
code_references/graph.py
|
nathanShepherd/Intelligent-Interface
|
4ab8a223ef6dfaed7cf5ebf61b24ec355d00b593
|
[
"MIT"
] | 3
|
2018-03-26T21:08:45.000Z
|
2018-11-16T21:16:57.000Z
|
code_references/graph.py
|
nathanShepherd/Intelligent-Interface
|
4ab8a223ef6dfaed7cf5ebf61b24ec355d00b593
|
[
"MIT"
] | null | null | null |
code_references/graph.py
|
nathanShepherd/Intelligent-Interface
|
4ab8a223ef6dfaed7cf5ebf61b24ec355d00b593
|
[
"MIT"
] | 2
|
2018-03-26T21:08:51.000Z
|
2020-05-06T09:22:52.000Z
|
# Testing various methods to graph with matplotlib
# Developed by Nathan Shepherd
import numpy as np
import matplotlib.pyplot as plt
n = 100
y = [round(np.random.normal(scale=n/10)) for _ in range(n)]
x = [i for i in range(-n, n)]
_y = []
for i in range(-n, n):
_y.append(y.count(i))
plt.plot(x, _y)
plt.show()
| 18.823529
| 59
| 0.671875
| 60
| 320
| 3.516667
| 0.566667
| 0.099526
| 0.113744
| 0.104265
| 0.132701
| 0.132701
| 0.132701
| 0
| 0
| 0
| 0
| 0.019231
| 0.1875
| 320
| 16
| 60
| 20
| 0.792308
| 0.240625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ddc0994a3a96b2f0830b2f0c227911b21552e3f
| 1,741
|
py
|
Python
|
scripts/taxonomy_frequency.py
|
STRIDES-Codes/Exploring-the-Microbiome-
|
bd29c8c74d8f40a58b63db28815acb4081f20d6b
|
[
"MIT"
] | null | null | null |
scripts/taxonomy_frequency.py
|
STRIDES-Codes/Exploring-the-Microbiome-
|
bd29c8c74d8f40a58b63db28815acb4081f20d6b
|
[
"MIT"
] | null | null | null |
scripts/taxonomy_frequency.py
|
STRIDES-Codes/Exploring-the-Microbiome-
|
bd29c8c74d8f40a58b63db28815acb4081f20d6b
|
[
"MIT"
] | 2
|
2021-06-05T07:40:20.000Z
|
2021-06-05T08:02:58.000Z
|
import sys
from Bio import Entrez
from collections import Counter
import pandas as pd
###########################################
def get_tax_id(species):
species = species.replace(" ", "+").strip()
search = Entrez.esearch(term = species, db = "taxonomy", retmode = "xml")
record = Entrez.read(search)
return record['IdList'][0]
###############################################
def get_tax_data(taxid):
search = Entrez.efetch(id = taxid, db = "taxonomy", retmode = "xml")
return Entrez.read(search)
###############################################
def tax_to_freq(in_file,out_file):
Entrez.email = 'idrissi.azami.abdellah@gmail.com'
print('Reading input file .....')
with open (in_file,'r', encoding='utf-8') as inpt:
sps = []
content = inpt.readlines()
print('Extracting nodes ...')
for i in content:
if '# Model Data:' in i:
sp = i.split ('|')[1].split('|')[0].replace('_',' ')
sps.append(sp)
print('Counting nodes....')
counter = dict(Counter(sps))
total = 0
for i in counter:
try:
taxid = get_tax_id(i)
taxdata = get_tax_data(taxid)
total = total + counter[i]
except:
pass
print ('Retriving taxonomy ...')
tax = []
for i in counter:
try:
taxid = get_tax_id(i)
taxdata = get_tax_data(taxid)
lineage = {d['Rank']:d['ScientificName'] for d in
taxdata[0]['LineageEx'] if d['Rank'] in ['superkingdom','phylum','class','order', 'family','genus','species']}
lineage['Strain']=i
lineage['#Hits']=counter[i]
lineage['Frequency (%)']=(counter[i]/total)*100
tax.append(lineage)
except:
pass
df = pd.DataFrame(tax)
df.to_csv(out_file,sep='\t',index=False)
return df
| 31.654545
| 118
| 0.564618
| 217
| 1,741
| 4.437788
| 0.442396
| 0.037383
| 0.024922
| 0.046729
| 0.107996
| 0.107996
| 0.107996
| 0.107996
| 0.107996
| 0.107996
| 0
| 0.006466
| 0.20046
| 1,741
| 54
| 119
| 32.240741
| 0.685345
| 0
| 0
| 0.24
| 0
| 0
| 0.169682
| 0.019963
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0.04
| 0.08
| 0
| 0.2
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dddfed29dba919369dee25697fc63339866499f
| 12,754
|
py
|
Python
|
gipf/GipfLogic.py
|
callix2/alphaZero-gipf
|
fd8dac7606611126d2d14beca0333b53bd5ee995
|
[
"MIT"
] | null | null | null |
gipf/GipfLogic.py
|
callix2/alphaZero-gipf
|
fd8dac7606611126d2d14beca0333b53bd5ee995
|
[
"MIT"
] | null | null | null |
gipf/GipfLogic.py
|
callix2/alphaZero-gipf
|
fd8dac7606611126d2d14beca0333b53bd5ee995
|
[
"MIT"
] | null | null | null |
'''
Author: Eric P. Nichols
Date: Feb 8, 2008.
Board class.
Board data:
1=white, -1=black, 0=empty
first dim is column , 2nd is row:
pieces[1][7] is the square in column 2,
at the opposite end of the board in row 8.
Squares are stored and manipulated as (x,y) tuples.
x is the column, y is the row.
'''
import numpy as np
class Board():
# list of all 6 directions on the board, as (x,y) offsets
__directions = [(2,0),(-2,0),(1,1),(1,-1),(-1,1),(-1,-1)]
# list of all entries of the matrix, which are actually spots on the board
actBoard = [(2,3),(3,2),(3,4),(4,1),(4,3),(4,5),(5,2),(5,4),(6,1),(6,3),(6,5),(7,2),(7,4),(8,1),(8,3),(8,5),(9,2),(9,4),(10,3)]
# list of all starting Points on the board
startingPoints = [(0,3),(1,2),(1,4),(2,1),(2,5),(3,0),(3,6),(5,0),(5,6),(7,0),(7,6),(9,0),(9,6),(10,1),(10,5),(11,2),(11,4),(12,3)]
# dictionary for the translation of the spot names into the entries of the matrix (as tuple)
move_dict = {"a1": (9,0), "a2": (7,0), "a3": (5,0), "a4": (3,0), "b1": (10,1), "b2": (8,1), "b3": (6,1), "b4": (4,1), "b5": (2,1), "c1": (11,2),
"c2": (9,2), "c5": (3,2), "c6": (1,2), "d1": (12,3), "d2": (10,3), "d6": (2,3), "d7": (0,3), "e1": (11,4), "e2": (9,4), "e5": (3,4),
"e6": (1,4), "f1": (10,5), "f2": (8,5), "f3": (6,5), "f4": (4,5), "f5": (2,5), "g1": (9,6), "g2": (7,6), "g3": (5,6), "g4": (3,6)}
def __init__(self, n):
"Set up initial board configuration."
self.n = n
# Create the empty board array.
self.pieces = [None]*self.n # rows: mini: 13, normal: 17
for i in range(self.n):
self.pieces[i] = [0]*(int(self.n//(1.8))) # columns: mini: 13//1.8=7 normal: 17//1.8=9
#Set up reserve in board corner
self.pieces[0][0] = 5
self.pieces[0][2] = 5
# Set up the initial 6 pieces.
self.pieces[4][1] = 1
self.pieces[4][5] = 1
self.pieces[10][3] = 1
self.pieces[8][1] = -1
self.pieces[8][5] = -1
self.pieces[2][3] = -1
"""
#Testfall Sym
self.pieces[8][1] = 1
self.pieces[10][3] = 1
self.pieces[4][5] = 1
self.pieces[2][3] = -1
self.pieces[7][4] = -1
self.pieces[8][5] = -1
#Testfall A
self.pieces[8][1] = -1
self.pieces[7][2] = -1
self.pieces[4][3] = -1
self.pieces[10][3] = 1
self.pieces[8][3] = 1
self.pieces[4][5] = 1
self.pieces[5][4] = 1
#Testfall B
self.pieces[7][2] = 1
self.pieces[6][1] = 1
self.pieces[10][3] = 1
self.pieces[8][3] = -1
self.pieces[4][3] = -1
self.pieces[2][3] = -1
#Testfall C
self.pieces[4][1] = 1
self.pieces[5][2] = -1
self.pieces[10][3] = 1
self.pieces[4][3] = -1
self.pieces[2][3] = -1
#Testfall D
self.pieces[6][1] = -1
self.pieces[7][2] = -1
self.pieces[9][4] = 1
self.pieces[10][3] = -1
self.pieces[6][3] = -1
self.pieces[4][3] = -1
self.pieces[2][3] = 1
"""
# add [][] indexer syntax to the Board
def __getitem__(self, index):
return self.pieces[index]
def __setitem__(self, index, color):
self.pieces[index] = color
def get_actBoard(self):
if self.n == 13:
return self.actBoard
else:
pass # return actBoard + ext
def get_startingPoints(self):
if self.n == 13:
return self.startingPoints
else:
pass # return actBoard + ext
@staticmethod
def translate_move(move):
"""Returns a tuple of the spot names as a tuple of the matrix
"""
try:
move_new = (Board.move_dict[move[0]],Board.move_dict[move[1]])
return move_new
except KeyError:
'Invalid Field'
def get_legal_moves(self):
"""Returns all the legal moves
"""
moves = set() # stores the legal moves.
# discover the possible moves for every starting point
for start in self.startingPoints:
newmoves = self.get_moves_for_dot(start)[1],[2]
moves.update(newmoves)
return list(moves)
def get_legal_moves_binary(self):
"""Returns all the legal moves
"""
moves = [] # stores the legal moves.
# discover the possible moves for every starting point
for start in self.startingPoints:
newmoves = self.get_moves_for_dot(start)[2]
moves.extend(newmoves)
return moves
def get_all_moves(self):
"""Returns all the legal moves
"""
moves = [] # stores the legal moves.
# discover the possible moves for every starting point
for start in self.startingPoints:
newmoves = self.get_moves_for_dot(start)[1]
moves.extend(newmoves)
return moves
def get_moves_for_dot(self, dot):
"""Returns all the legal moves that use the given dot as a base.
"""
# search all possible directions.
legal_moves = []
all_moves = []
all_moves_binary = []
for direction in self.__directions:
target = tuple(np.add(dot, direction))
if target in self.actBoard:
move = (dot, target)
all_moves.append(move)
if self.check_move(target, direction):
legal_moves.append(move)
all_moves_binary.extend([1])
else:
all_moves_binary.extend([0])
# return the generated move list
return legal_moves, all_moves, all_moves_binary
def check_move(self, target, direction):
"""Returns True if there is a free field along the given direction
if not returns Flase because the move is not valid
"""
s = target
while s in self.actBoard:
if self[s] == 0:
return True
s = tuple(np.add(s, direction))
return False
def execute_move(self, action, curPlayer):
"""Performs the given move on the board; does not remove pieces!
color gives the color of the piece to play (1=white,-1=black)
"""
all_moves = self.get_all_moves()
move = all_moves[action]
start=move[0]
target=move[1]
direction = tuple(np.subtract(target, start))
s=target
# Runs up to a gap and places the piece there
while s in self.actBoard:
if self[s] == 0:
break
s = tuple(np.add(s, direction))
self[start]=curPlayer
# Runs in opposite direction and moves the pieces
while s in self.actBoard:
s_prev = tuple(np.subtract(s, direction))
s_prev_color = self[s_prev]
self[s]= s_prev_color
s = tuple(np.subtract(s, direction))
self[s]=0
# Decreases reserve
#players[color+1].dec_reserve()
def remove_lines(self, curPlayer):
"""Checks for each field whether a row of four results.
If so, removes the entire line
"""
#prüfen ob mehrere 4er, wenn ja zuerst den der spielenden Farbe, wenn immer noch mehrere zuerst den der mehr schlägt
rows = []
add_reserve = [0, None, 0]
for spot in self.actBoard:
new_row = self.discover_row_of_4(spot)
if new_row and new_row not in rows:
rows.append(new_row)
while len(rows)>1:
#mehrere rows
rows_of_color = [] #alle rows der aktuellen Farbe (haben vorrang)
index_max = None
for row in rows:
row_color = self[list(row)[0]]
if row_color == curPlayer:
rows_of_color.append(row)
if len(rows_of_color)>1:
#mehrere rows der aktiven Farbe
#prüfen welche die meisten schlägt
c = [None]*len(rows_of_color)
for index, row in enumerate(rows_of_color):
c[index] = self.get_hit_count(row)
index_max = np.argmax(c)
add_reserve = np.add(add_reserve, self.remove_line(rows_of_color[index_max]), where=[1,0,1])
elif len(rows_of_color)>0:
#nur eine row der aktiven Farbe
add_reserve = np.add(add_reserve, self.remove_line(rows_of_color[0]), where=[1,0,1])
else:
#mehrer rows der anderen Farbe und keine der aktiven
#prüfen welche die meisten schlägt
c = [None]*len(rows)
for index, row in enumerate(rows):
c[index] = self.get_hit_count(row)
index_max = np.argmax(c)
add_reserve = np.add(add_reserve, self.remove_line(rows[index_max]), where=[1,0,1])
#prüfe ob rows noch aktuell
rows = self.check_rows(rows)
if len(rows)>0:
#nur eine row (egal welche Farbe)
add_reserve = np.add(add_reserve, self.remove_line(rows[0]), where=[1,0,1])
return add_reserve
def check_rows(self, rows):
rows_new = rows.copy()
for row in rows:
for spot in row:
if self[spot] == 0:
rows_new.remove(row)
break
return rows_new
def get_hit_count(self, row):
count = 0
row = list(row)
color_of_row = self[row[0]]
direction = tuple(np.subtract(row[0], row[1]))
s = row[0]
# Runs from the first of the 4 in one direction of the line
while s in self.actBoard:
if self[s] == 0:
break
else:
color = self[s]
if color != color_of_row:
count += 1
#self[s] = 0
s = tuple(np.add(s, direction))
# Runs in the opposite direction
s = tuple(np.subtract(row[0], direction))
while s in self.actBoard:
if self[s] == 0:
break
else:
color = self[s]
if color != color_of_row:
count += 1
#self[s] = 0
s = tuple(np.subtract(s, direction))
return count
def discover_row_of_4(self, spot):
"""Examines all directions for the given spot to see if a row of four exists
If found returns a array of the four, otherwise returns False
"""
color = self[spot]
for direction in self.__directions:
row_of_4 = [] #set() #weil unorderd
#row_of_4.update([spot])
row_of_4.append(spot)
s = tuple(np.add(spot, direction))
while s in self.actBoard:
if self[s] == 0 or self[s] != color:
break
else:
#row_of_4.update([s])
row_of_4.append(s)
s = tuple(np.add(s, direction))
if len(row_of_4)>2: #GipfMini: 3; Normal: 4
row_of_4.sort()
return row_of_4
def remove_line(self, row_of_4):
"""Removes the 4 pieces and the pieces that form a direct extension of these 4
The pieces with the color of the 4 return to his reserve
"""
add_reserve = [0, None, 0]
row_of_4 = list(row_of_4)
color_of_4 = self[row_of_4[0]]
direction = tuple(np.subtract(row_of_4[0], row_of_4[1]))
s = row_of_4[0]
# Runs from the first of the 4 in one direction of the line
while s in self.actBoard:
if self[s] == 0:
break
else:
color = self[s]
if color == color_of_4:
add_reserve[color+1]+=1
#players[color+1].inc_reserve()
self[s] = 0
s = tuple(np.add(s, direction))
# Runs in the opposite direction
s = tuple(np.subtract(row_of_4[0], direction))
while s in self.actBoard:
if self[s] == 0:
break
else:
color = self[s]
if color == color_of_4:
add_reserve[color+1]+=1
#players[color+1].inc_reserve()
self[s] = 0
s = tuple(np.subtract(s, direction))
return add_reserve
| 36.130312
| 148
| 0.509644
| 1,779
| 12,754
| 3.550871
| 0.156268
| 0.06807
| 0.053981
| 0.026595
| 0.436758
| 0.40019
| 0.364413
| 0.309799
| 0.296818
| 0.257084
| 0
| 0.054965
| 0.365219
| 12,754
| 352
| 149
| 36.232955
| 0.725296
| 0.225576
| 0
| 0.38835
| 0
| 0
| 0.01252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082524
| false
| 0.009709
| 0.004854
| 0.004854
| 0.184466
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dde1212ba406b7fb0a629963b918fa2448f2579
| 2,533
|
py
|
Python
|
pcwg/reporting/colour.py
|
lcameron05/PCWG
|
8ae8ea7d644aa5bec0d1651101d83d8f17994f4b
|
[
"MIT"
] | 14
|
2015-01-15T12:40:51.000Z
|
2019-06-14T16:10:08.000Z
|
pcwg/reporting/colour.py
|
lzhiwen3090/PCWG
|
795e3ea267c7b87187dce04721c91a9d9c7999a7
|
[
"MIT"
] | 121
|
2015-01-06T11:31:25.000Z
|
2018-05-29T21:13:23.000Z
|
pcwg/reporting/colour.py
|
lzhiwen3090/PCWG
|
795e3ea267c7b87187dce04721c91a9d9c7999a7
|
[
"MIT"
] | 26
|
2015-01-15T12:41:09.000Z
|
2019-04-11T14:45:32.000Z
|
import xlwt
class ColourGradient:
def __init__(self, minimum, maximum, interval, book):
self.levels = {}
self.minimum = minimum
self.maximum = maximum
dataRange = maximum - minimum
steps = int(dataRange / interval) + 1
if (steps >= 4):
steps_4 = steps / 4
else:
steps_4 = 1
for i in range(steps):
if (i <= steps_4):
red = 255
elif (i > steps_4 and i <= steps_4 * 2):
red = 255 - (255 / steps_4) * (i - steps_4)
elif (i > steps_4 * 2 and i <= steps_4 * 3):
red = (255 / 2 / steps_4) * (i - steps_4 * 2)
elif i < steps:
red = (255 / 2) - (255 / 2 / steps_4) * (i - steps_4 * 3)
else:
red = 0
if (i <= steps_4):
green = (255 / steps_4) * i
elif (i > steps_4 and i <= steps_4 * 2):
green = 255 - (255 / steps_4) * (i - steps_4)
elif (i > steps_4 * 2 and i <= steps_4 * 3):
green = (255 / steps_4) * (i - steps_4 * 2)
else:
green = 255
if (i <= steps_4):
blue = 0
elif (i > steps_4 and i <= steps_4 * 2):
blue = 0 + (255 / steps_4) * (i - steps_4)
elif i < steps:
blue = 255 - (255 / steps_4 / 2) * (i - steps_4 * 2)
else:
blue = 0
red = abs(red)
green = abs(green)
blue = abs(blue)
if (red > 255): red = 255
if (green > 255): green = 255
if (blue > 255): blue = 255
value = self.roundValue(minimum + i * interval)
excelIndex = 8 + i
colourName = "custom_colour_%d" % excelIndex
xlwt.add_palette_colour(colourName, excelIndex)
book.set_colour_RGB(excelIndex, red, green, blue)
style = xlwt.easyxf('pattern: pattern solid, fore_colour %s' % colourName, num_format_str='0%')
self.levels[value] = (red, green, blue, value, excelIndex, colourName, style)
def roundValue(self, value):
return round(value, 2)
def getStyle(self, value):
value = max(self.minimum, value)
value = min(self.maximum, value)
return self.levels[self.roundValue(value)][6]
| 32.474359
| 107
| 0.446901
| 293
| 2,533
| 3.716724
| 0.204778
| 0.176309
| 0.128558
| 0.05877
| 0.242424
| 0.209366
| 0.192837
| 0.161616
| 0.161616
| 0.077135
| 0
| 0.082979
| 0.443348
| 2,533
| 77
| 108
| 32.896104
| 0.689362
| 0
| 0
| 0.275862
| 0
| 0
| 0.022108
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.017241
| 0.017241
| 0.12069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ddf36ca544a3c8ccbf3d16260d57ec9db94a87c
| 2,021
|
py
|
Python
|
amap_distance_matrix/schemas/amap.py
|
Euraxluo/distance_matrix
|
680e3147c263ea5f1abb26998aeb0b1985442a4b
|
[
"MIT"
] | 1
|
2022-03-15T06:47:36.000Z
|
2022-03-15T06:47:36.000Z
|
amap_distance_matrix/schemas/amap.py
|
Euraxluo/distance_matrix
|
680e3147c263ea5f1abb26998aeb0b1985442a4b
|
[
"MIT"
] | null | null | null |
amap_distance_matrix/schemas/amap.py
|
Euraxluo/distance_matrix
|
680e3147c263ea5f1abb26998aeb0b1985442a4b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Time: 2022-03-01 15:43
# Copyright (c) 2022
# author: Euraxluo
from typing import *
from amap_distance_matrix.helper import haversine,format_loc
class AMapDefaultResultRouteStep(object):
def __init__(self, start: str, end: str):
self.polyline: str
self.instruction = "到达途经地"
self.orientation = "北"
self.road = "road"
self.distance = haversine(format_loc(start),format_loc(end))*1.5
self.tolls = "0"
self.toll_distance = "0"
self.toll_road = []
self.duration = self.distance/(25000/60/60)
self.action = []
self.assistant_action = "到达途经地"
self.tmcs: List
self.polyline = start + ";" + end
self.tmcs = [
{
"lcode": [],
"distance": "0",
"status": "畅通",
"polyline": self.polyline
}
]
class AMapDefaultResultPath(object):
def __init__(self, steps: List[AMapDefaultResultRouteStep]):
self.distance = "0"
self.duration = "0"
self.strategy = "速度最快"
self.tolls = "0"
self.toll_distance = "0"
self.steps = [i.__dict__ for i in steps]
self.restriction = "0"
self.traffic_lights = "0"
class AMapDefaultResultRoute(object):
def __init__(self, paths: AMapDefaultResultPath):
self.origin = "0"
self.destination = "0"
self.taxi_cost = "0"
self.paths = [paths.__dict__]
class AMapDefaultResult(object):
def __init__(self, points: List[str]):
self.status = "1"
self.info = "OK"
self.infocode = "10000"
self.count = "1"
self.route: AMapDefaultResultRoute
steps = []
for i, point in enumerate(points):
if i == 0:
continue
steps.append(AMapDefaultResultRouteStep(start=points[i - 1], end=point))
self.route = AMapDefaultResultRoute(paths=AMapDefaultResultPath(steps=steps)).__dict__
| 29.720588
| 94
| 0.573973
| 213
| 2,021
| 5.262911
| 0.389671
| 0.044603
| 0.046387
| 0.06066
| 0.055308
| 0.055308
| 0.055308
| 0.055308
| 0
| 0
| 0
| 0.034801
| 0.303315
| 2,021
| 67
| 95
| 30.164179
| 0.761364
| 0.040079
| 0
| 0.074074
| 0
| 0
| 0.036176
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6de2331479d616c60c982b16b354a172879db20e
| 393
|
py
|
Python
|
at_learner_core/at_learner_core/models/init_model.py
|
hieuvecto/CASIA-SURF_CeFA
|
71dfd846ce968b3ed26974392a6e0c9b40aa12ae
|
[
"MIT"
] | 133
|
2020-03-03T03:58:04.000Z
|
2022-03-28T21:42:36.000Z
|
at_learner_core/at_learner_core/models/init_model.py
|
lucaslu1987/CASIA-SURF_CeFA
|
205d3d976523ed0c15d1e709ed7f21d50d7cf19b
|
[
"MIT"
] | 24
|
2020-03-13T09:30:09.000Z
|
2022-03-22T07:47:15.000Z
|
at_learner_core/at_learner_core/models/init_model.py
|
lucaslu1987/CASIA-SURF_CeFA
|
205d3d976523ed0c15d1e709ed7f21d50d7cf19b
|
[
"MIT"
] | 29
|
2020-03-10T06:46:45.000Z
|
2022-01-29T15:35:21.000Z
|
from .wrappers import SimpleClassifierWrapper
def get_wrapper(config, wrapper_func=None):
if wrapper_func is not None:
wrapper = wrapper_func(config)
elif config.wrapper_config.wrapper_name == 'SimpleClassifierWrapper':
wrapper = SimpleClassifierWrapper(config.wrapper_config)
else:
raise Exception('Unknown wrapper architecture type')
return wrapper
| 32.75
| 73
| 0.750636
| 42
| 393
| 6.857143
| 0.52381
| 0.180556
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183206
| 393
| 11
| 74
| 35.727273
| 0.897196
| 0
| 0
| 0
| 0
| 0
| 0.142494
| 0.058524
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6de2eb54f1f884015cd25862ba629bbde92b8312
| 11,739
|
py
|
Python
|
register.py
|
khvmaths/Register_UM_Crawl
|
2741bfe9267e9ad068b438b27141cfc664f140f2
|
[
"MIT"
] | null | null | null |
register.py
|
khvmaths/Register_UM_Crawl
|
2741bfe9267e9ad068b438b27141cfc664f140f2
|
[
"MIT"
] | null | null | null |
register.py
|
khvmaths/Register_UM_Crawl
|
2741bfe9267e9ad068b438b27141cfc664f140f2
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from urllib.request import Request,urlopen
from urllib.error import HTTPError
from PyQt5 import QtCore, QtGui, QtWidgets, Qt
import sys
import threading
import datetime
import win32con
import os
import struct
import time
import pyttsx3
from win32api import *
from win32gui import *
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1236, 996)
self.groupBox = QtWidgets.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 361, 831))
self.groupBox.setObjectName("groupBox")
self.tableWidget = QtWidgets.QTableWidget(self.groupBox)
self.tableWidget.setGeometry(QtCore.QRect(10, 30, 341, 791))
self.tableWidget.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.tableWidget.horizontalHeader().setCascadingSectionResizes(True)
self.groupBox_2 = QtWidgets.QGroupBox(Form)
self.groupBox_2.setGeometry(QtCore.QRect(380, 10, 681, 831))
self.groupBox_2.setObjectName("groupBox_2")
self.tableWidget_2 = QtWidgets.QTableWidget(self.groupBox_2)
self.tableWidget_2.setGeometry(QtCore.QRect(10, 30, 651, 791))
self.tableWidget_2.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.tableWidget_2.setObjectName("tableWidget_2")
self.tableWidget_2.setColumnCount(0)
self.tableWidget_2.setRowCount(0)
self.tableWidget_2.horizontalHeader().setCascadingSectionResizes(True)
self.groupBox_3 = QtWidgets.QGroupBox(Form)
self.groupBox_3.setGeometry(QtCore.QRect(1070, 10, 141, 80))
self.groupBox_3.setObjectName("groupBox_3")
self.pushButton = QtWidgets.QPushButton(self.groupBox_3)
self.pushButton.setGeometry(QtCore.QRect(10, 50, 111, 28))
self.pushButton.setObjectName("pushButton")
self.lineEdit = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit.setGeometry(QtCore.QRect(10, 20, 113, 22))
self.lineEdit.setObjectName("lineEdit")
self.groupBox_4 = QtWidgets.QGroupBox(Form)
self.groupBox_4.setGeometry(QtCore.QRect(1070, 100, 141, 61))
self.groupBox_4.setObjectName("groupBox_4")
self.lineEdit_2 = QtWidgets.QLineEdit(self.groupBox_4)
self.lineEdit_2.setGeometry(QtCore.QRect(10, 20, 113, 22))
self.lineEdit_2.setText("")
self.lineEdit_2.setObjectName("lineEdit_2")
self.groupBox_5 = QtWidgets.QGroupBox(Form)
self.groupBox_5.setGeometry(QtCore.QRect(1070, 170, 141, 51))
self.groupBox_5.setObjectName("groupBox_5")
self.lineEdit_3 = QtWidgets.QLineEdit(self.groupBox_5)
self.lineEdit_3.setGeometry(QtCore.QRect(10, 20, 113, 22))
self.lineEdit_3.setText("")
self.lineEdit_3.setObjectName("lineEdit_3")
self.groupBox_6 = QtWidgets.QGroupBox(Form)
self.groupBox_6.setGeometry(QtCore.QRect(1070, 230, 141, 51))
self.groupBox_6.setObjectName("groupBox_6")
self.lineEdit_4 = QtWidgets.QLineEdit(self.groupBox_6)
self.lineEdit_4.setGeometry(QtCore.QRect(10, 20, 113, 22))
self.lineEdit_4.setText("")
self.lineEdit_4.setObjectName("lineEdit_4")
self.groupBox_7 = QtWidgets.QGroupBox(Form)
self.groupBox_7.setGeometry(QtCore.QRect(10, 840, 1051, 151))
self.groupBox_7.setObjectName("groupBox_7")
self.listWidget = QtWidgets.QListWidget(self.groupBox_7)
self.listWidget.setGeometry(QtCore.QRect(10, 20, 1021, 121))
self.listWidget.setObjectName("listWidget")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(1090, 930, 131, 41))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "REGISTER UM"))
self.groupBox.setTitle(_translate("Form", "Elective"))
self.tableWidget.setSortingEnabled(True)
self.groupBox_2.setTitle(_translate("Form", "KoK"))
self.tableWidget_2.setSortingEnabled(True)
self.groupBox_3.setTitle(_translate("Form", "Set Timer (sec)"))
self.pushButton.setText(_translate("Form", "START!!"))
self.lineEdit.setText(_translate("Form", "5"))
self.groupBox_4.setTitle(_translate("Form", "Targeted Elective 1"))
self.groupBox_5.setTitle(_translate("Form", "Targeted Elective 2"))
self.groupBox_6.setTitle(_translate("Form", "Targeted KoK 1"))
self.groupBox_7.setTitle(_translate("Form", "Command"))
self.label.setText(_translate("Form", "A program by\n"
"hongvin"))
class WindowsBalloonTip:
def __init__(self):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
}
# Register the Window class.
wc = WNDCLASS()
self.hinst = wc.hInstance = GetModuleHandle(None)
wc.lpszClassName = "PythonTaskbar"
wc.lpfnWndProc = message_map # could also specify a wndproc.
self.classAtom = RegisterClass(wc)
def ShowWindow(self,title, msg):
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = CreateWindow( self.classAtom, "Taskbar", style, \
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, \
0, 0, self.hinst, None)
UpdateWindow(self.hwnd)
#iconPathName = os.path.abspath(os.path.join( sys.path[0], "favicon.ico" ))
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
hicon = LoadIcon(0, win32con.IDI_APPLICATION)
flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
nid = (self.hwnd, 0, flags, win32con.WM_USER+20, hicon, "tooltip")
Shell_NotifyIcon(NIM_ADD, nid)
Shell_NotifyIcon(NIM_MODIFY, \
(self.hwnd, 0, NIF_INFO, win32con.WM_USER+20,\
hicon, "Balloon tooltip",msg,200,title))
# self.show_balloon(title, msg)
DestroyWindow(self.hwnd)
def OnDestroy(self, hwnd, msg, wparam, lparam):
nid = (self.hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, nid)
PostQuitMessage(0) # Terminate the app.
w=WindowsBalloonTip()
engine = pyttsx3.init()
def TTS(text,grp=""):
split=" ".join(text)
if grp=="":
engine.say('Found!'+split)
else:
engine.say('Found!' + split+'Group '+str(grp))
engine.runAndWait()
class App(QtWidgets.QMainWindow,Ui_Form):
def __init__(self):
super(self.__class__,self).__init__()
self.setupUi(self)
self.pushButton.clicked.connect(self.startengine)
def startengine(self):
self.listWidget.scrollToBottom()
self.lineEdit.setEnabled(False)
timeout=float(self.lineEdit.text())
time_now=time.time()
e1=self.lineEdit_2.text()
e2=self.lineEdit_3.text()
k1=self.lineEdit_4.text()
e1b=False
e2b=False
k1b=False
courses=['','','']
url = 'http://register.um.edu.my/el_kosong_bi.asp'
request = Request(url)
try:
self.tableWidget.setEnabled(True)
json = urlopen(request).read().decode()
soup = BeautifulSoup(json,"html.parser")
a = soup.find_all('div')
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount((len(a)-1)/3)
self.tableWidget.setHorizontalHeaderLabels(["Subject Code","Group","Vacant"])
j=-1
k=0
for i in range(0,len(a)):
if i%3==0:
j+=1
k=0
self.tableWidget.setItem(j,k,QtWidgets.QTableWidgetItem(a[i].text))
if e1==a[i].text:
self.listWidget.addItem('['+str(datetime.datetime.now().time())+']: Matched found for targeted elective '+a[i].text+' (Group '+a[i+1].text+')')
e1b=True
courses[0]=(a[i].text+'(G'+a[i+1].text+')')
TTS(a[i].text,a[i+1].text)
if e2==a[i].text:
self.listWidget.addItem('['+str(datetime.datetime.now().time())+']: Matched found for targeted elective '+a[i].text+' (Group '+a[i+1].text+')')
e2b=True
courses[1]=(a[i].text+'(G'+a[i+1].text+')')
TTS(a[i].text,a[i+1].text)
k+=1
except Exception as e:
print("Error",e)
self.tableWidget.setEnabled(False)
self.listWidget.addItem('['+str(datetime.datetime.now().time())+']: Error occured at Elective')
self.lineEdit.setEnabled(True)
url = 'http://register.um.edu.my/kok_kosong_bi.asp'
request = Request(url)
try:
self.tableWidget_2.setEnabled(True)
json = urlopen(request).read().decode()
soup = BeautifulSoup(json,"html.parser")
a = soup.find_all('td')
self.tableWidget_2.setColumnCount(3)
self.tableWidget_2.setRowCount((len(a)-10)/4)
self.tableWidget_2.setHorizontalHeaderLabels(["Subject Code","Course Name","Vacant"])
j=-1
k=0
m=1
for i in range(9,len(a)-1):
if (i-m)%4==0:
j+=1
k=0
continue
if a[i].text=='Bil' or a[i].text=='Code' or a[i].text=='Description' or a[i].text=='Vacant':
m=2
if a[i].text=='Vacant':
j-=1
self.tableWidget_2.setRowCount(((len(a)-10)/4)-1)
continue
if k1==a[i].text:
self.listWidget.addItem('['+str(datetime.datetime.now().time())+']: Matched found for targeted KoK '+a[i].text)
k1b=True
courses[2]=(a[i].text)
TTS(a[i].text)
self.tableWidget_2.setItem(j,k,QtWidgets.QTableWidgetItem(a[i].text))
k+=1
except Exception as e:
print("Error",e)
self.tableWidget.setEnabled(False)
self.listWidget.addItem('['+str(datetime.datetime.now().time())+']: Error occured at KoK')
self.lineEdit.setEnabled(True)
self.tableWidget.resizeColumnsToContents()
self.tableWidget_2.resizeColumnsToContents()
self.listWidget.scrollToBottom()
self.listWidget.addItem('['+str(datetime.datetime.now().time())+']: List refreshed')
if e1b==True or e2b==True or k1b==True:
w.ShowWindow("Matching course found!","Course Code: {0} {1} {2}".format(*courses))
next_time=time_now+timeout
t=threading.Timer(next_time-time.time(),self.startengine)
t.daemon=True
t.start()
def main():
app=QtWidgets.QApplication(sys.argv)
form=App()
form.show()
app.exec_()
if __name__ == '__main__':
main()
| 43.639405
| 164
| 0.597155
| 1,316
| 11,739
| 5.217325
| 0.224164
| 0.062919
| 0.016604
| 0.034955
| 0.31838
| 0.205797
| 0.176668
| 0.176668
| 0.148849
| 0.110399
| 0
| 0.042606
| 0.274214
| 11,739
| 268
| 165
| 43.802239
| 0.763263
| 0.016867
| 0
| 0.149378
| 0
| 0
| 0.076158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037344
| false
| 0
| 0.058091
| 0
| 0.107884
| 0.008299
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6de329be760fa541cce9f8961d309f42264a1df3
| 1,604
|
py
|
Python
|
tests/utils.py
|
ofek/hatch-containers
|
dd57acc812db8e62994f2b00160a05292d5f35c1
|
[
"MIT"
] | 3
|
2021-12-29T06:44:41.000Z
|
2022-02-28T09:27:20.000Z
|
tests/utils.py
|
ofek/hatch-containers
|
dd57acc812db8e62994f2b00160a05292d5f35c1
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
ofek/hatch-containers
|
dd57acc812db8e62994f2b00160a05292d5f35c1
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021-present Ofek Lev <oss@ofek.dev>
#
# SPDX-License-Identifier: MIT
import subprocess
from textwrap import dedent as _dedent
import tomli
import tomli_w
def dedent(text):
return _dedent(text[1:])
def check_container_output(container_name, command):
return subprocess.check_output(['docker', 'exec', container_name, *command]).decode('utf-8')
def container_exists(container_name):
output = (
subprocess.check_output(['docker', 'ps', '-a', '--format', '{{.Names}}', '--filter', f'name={container_name}'])
.strip()
.decode('utf-8')
)
return any(line.strip() == container_name for line in output.splitlines())
def container_running(container_name):
output = (
subprocess.check_output(['docker', 'ps', '--format', '{{.Names}}', '--filter', f'name={container_name}'])
.strip()
.decode('utf-8')
)
return any(line.strip() == container_name for line in output.splitlines())
def update_project_environment(project, name, config):
project_file = project.root / 'pyproject.toml'
with open(str(project_file), 'r', encoding='utf-8') as f:
raw_config = tomli.loads(f.read())
env_config = raw_config.setdefault('tool', {}).setdefault('hatch', {}).setdefault('envs', {}).setdefault(name, {})
env_config.update(config)
project.config.envs[name] = project.config.envs.get(name, project.config.envs['default']).copy()
project.config.envs[name].update(env_config)
with open(str(project_file), 'w', encoding='utf-8') as f:
f.write(tomli_w.dumps(raw_config))
| 30.846154
| 119
| 0.667082
| 204
| 1,604
| 5.098039
| 0.357843
| 0.1
| 0.065385
| 0.077885
| 0.373077
| 0.301923
| 0.301923
| 0.301923
| 0.209615
| 0.209615
| 0
| 0.007424
| 0.160224
| 1,604
| 51
| 120
| 31.45098
| 0.764662
| 0.055486
| 0
| 0.25
| 0
| 0
| 0.121112
| 0.027796
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15625
| false
| 0
| 0.125
| 0.0625
| 0.40625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6de4785de957dcd93698e538204a1309d3d31d03
| 881
|
py
|
Python
|
Question Set 3 - (Functions)/Version 1/main.py
|
Randula98/Python-For-Beginners
|
e41a6014be882f01c6ccdcbe2167e2b581646eee
|
[
"MIT"
] | 6
|
2021-12-14T17:52:11.000Z
|
2021-12-19T20:22:44.000Z
|
Question Set 3 - (Functions)/Version 1/main.py
|
GIHAA/Python-For-Beginners
|
e41a6014be882f01c6ccdcbe2167e2b581646eee
|
[
"MIT"
] | null | null | null |
Question Set 3 - (Functions)/Version 1/main.py
|
GIHAA/Python-For-Beginners
|
e41a6014be882f01c6ccdcbe2167e2b581646eee
|
[
"MIT"
] | 2
|
2021-12-19T18:50:30.000Z
|
2022-01-01T23:05:18.000Z
|
#define calcIncrement function
def calcIncrement(salary , noOfYearsWorked):
if(noOfYearsWorked > 2):
increment = (salary * 10 / 100)
else:
increment = 0
return increment
#define calcTotalSalary function
def calcTotalSalary(salary , increment):
total = salary + increment
return total
#get user inputs for salary
salary = input("Enter Salary : ")
salary = float(salary)
#get user inputs for number of years worked
years = input("Enter no of years worked : ")
years = int(years)
#calculate the increment by passing the given values to the function
increment = calcIncrement(salary , years)
#calculate the total salary by passing the given values to the function
totalSalary = calcTotalSalary(salary , increment)
#display the increment and the total salary
print("Increment : " + str(increment))
print("Total Salary : " + str(totalSalary))
| 26.69697
| 71
| 0.732123
| 108
| 881
| 5.972222
| 0.37963
| 0.068217
| 0.093023
| 0.049612
| 0.111628
| 0.111628
| 0.111628
| 0.111628
| 0
| 0
| 0
| 0.00979
| 0.188422
| 881
| 32
| 72
| 27.53125
| 0.892308
| 0.348468
| 0
| 0
| 0
| 0
| 0.121908
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0
| 0
| 0.235294
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6de56b62032d39a0f8c492c5d736fc6926aeb427
| 2,576
|
py
|
Python
|
setup.py
|
vomaufgang/publish
|
6e610c055118f9761d49962a12d9095cf2936386
|
[
"MIT"
] | 1
|
2019-08-19T01:45:29.000Z
|
2019-08-19T01:45:29.000Z
|
setup.py
|
vomaufgang/publish
|
6e610c055118f9761d49962a12d9095cf2936386
|
[
"MIT"
] | 11
|
2019-08-18T09:31:10.000Z
|
2021-01-27T19:02:53.000Z
|
setup.py
|
vomaufgang/publish
|
6e610c055118f9761d49962a12d9095cf2936386
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# anited. publish - Python package with cli to turn markdown files into ebooks
# Copyright (c) 2014 Christopher Knörndel
#
# Distributed under the MIT License
# (license terms are at http://opensource.org/licenses/MIT).
"""Setup script for easy_install and pip."""
import sys
import codecs
import os.path
MIN_SUPPORTED_PYTHON_VERSION = (3, 6)
if sys.version_info < MIN_SUPPORTED_PYTHON_VERSION:
sys.exit('Sorry, Python < {} is not supported.'.format(
'.'.join(map(str, MIN_SUPPORTED_PYTHON_VERSION))
))
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(rel_path):
"""Reads the contents of the file atthe relative path `rel_path`.
"""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as file_:
return file_.read()
def get_version(rel_path):
"""Gets the version number declared in the `__version__` constant of
the Python file at `rel_path`.
"""
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
README = open('README.md').read()
VERSION = get_version('publish/__init__.py')
REQUIREMENTS = open('requirements.txt').readlines()
DEV_REQUIREMENTS = open('dev-requirements.txt').readlines()[1:]
setup(
name='anited-publish',
version=VERSION,
description='Python package with command line interface to turn markdown '
'files into ebooks.',
long_description=README,
long_description_content_type='text/markdown',
author='Christopher Knörndel',
author_email='cknoerndel@anited.de',
url='https://gitlab.com/anited/publish',
packages=[
'publish',
],
package_data={
'publish': ['template.jinja', 'VERSION']
},
entry_points={
'console_scripts': [
'publish = publish.cli:main'
]
},
python_requires=">=3.6",
install_requires=REQUIREMENTS,
tests_require=DEV_REQUIREMENTS,
extras_require={
'dev': DEV_REQUIREMENTS
},
license="MIT",
zip_safe=False,
keywords='publish',
classifiers=[
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 28
| 78
| 0.65295
| 309
| 2,576
| 5.271845
| 0.511327
| 0.025783
| 0.033149
| 0.046041
| 0.035605
| 0.035605
| 0
| 0
| 0
| 0
| 0
| 0.007921
| 0.215839
| 2,576
| 91
| 79
| 28.307692
| 0.798515
| 0.177795
| 0
| 0.03125
| 0
| 0
| 0.285235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.09375
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6de7ad1350d5b902468a609df5d16498912264b6
| 1,347
|
py
|
Python
|
examples/DGL/alagnn.py
|
dongzizhu/GraphGallery
|
c65eab42daeb52de5019609fe7b368e30863b4ae
|
[
"MIT"
] | 1
|
2020-07-29T08:00:32.000Z
|
2020-07-29T08:00:32.000Z
|
examples/DGL/alagnn.py
|
dongzizhu/GraphGallery
|
c65eab42daeb52de5019609fe7b368e30863b4ae
|
[
"MIT"
] | null | null | null |
examples/DGL/alagnn.py
|
dongzizhu/GraphGallery
|
c65eab42daeb52de5019609fe7b368e30863b4ae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import random
import math
import torch
import dgl
import graphgallery
from graphgallery.datasets import Planetoid
print("GraphGallery version: ", graphgallery.__version__)
print("PyTorch version: ", torch.__version__)
print("DGL version: ", dgl.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# splits = data.split_nodes()
graphgallery.set_backend("dgl")
# experimental setup in
# `When Do GNNs Work: Understanding and Improving Neighborhood Aggregation
# <https://www.ijcai.org/Proceedings/2020/0181.pdf>`
random.seed(2020)
split = 0.01
n_nodes = graph.num_nodes
sample_size = math.ceil(n_nodes * split)
train_idx = random.sample(range(n_nodes - 1000), sample_size)
train_nodes = [idx if idx < 500 else idx + 1000 for idx in train_idx]
test_nodes = list(range(500, 1500))
from graphgallery.gallery.nodeclas import ALaGCN, ALaGAT
# trainer = ALaGAT(device=device, seed=123).setup_graph(graph).build()
trainer = ALaGCN(device=device, seed=123).setup_graph(graph).build()
trainer.fit(train_nodes, verbose=1)
results = trainer.evaluate(test_nodes)
print(f'Test loss {results.loss:.5}, Test accuracy {results.accuracy:.2%}')
| 32.071429
| 83
| 0.76095
| 192
| 1,347
| 5.1875
| 0.489583
| 0.018072
| 0.032129
| 0.038153
| 0.092369
| 0.092369
| 0.092369
| 0.092369
| 0.092369
| 0
| 0
| 0.035626
| 0.103935
| 1,347
| 41
| 84
| 32.853659
| 0.789561
| 0.205642
| 0
| 0
| 0
| 0
| 0.149166
| 0.042198
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.28
| 0
| 0.28
| 0.16
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dea6c9087b914af198d695841147682ba1f18e7
| 1,453
|
py
|
Python
|
garden_test/setup.py
|
jad-b/garden
|
44169c57fdaa08e0edd751d7459da99334e97323
|
[
"MIT"
] | null | null | null |
garden_test/setup.py
|
jad-b/garden
|
44169c57fdaa08e0edd751d7459da99334e97323
|
[
"MIT"
] | null | null | null |
garden_test/setup.py
|
jad-b/garden
|
44169c57fdaa08e0edd751d7459da99334e97323
|
[
"MIT"
] | null | null | null |
import subprocess
import os
from setuptools import setup, find_packages
def readme():
with open('README.md') as _file:
return _file.read()
def requirements():
reqs_file = 'reqs.txt'
if os.path.isfile(reqs_file):
with open('reqs.txt') as reqs:
return [line.strip() for line in reqs
if line and not line.startswith('#')]
return []
def latest_git_tag():
try:
tag = subprocess.check_output(
['git', 'describe', '--abbrev=0', '--tags']
).decode().rstrip()
except subprocess.CalledProcessError:
return '0.0.0'
return tag
setup(
name='garden_test',
version=latest_git_tag(),
long_description=readme(),
description='Python package for testing garden',
author='Jeremy Dobbins-Bucklad',
author_email='j.american.db@gmail.com',
url='https://github.com/jad-b/garden',
install_requires=requirements(),
packages = find_packages(),
package_dir = {'garden': 'garden_test'},
py_modules=['testfile'],
entry_points={
'garden.bump': ['garden_test = garden_test.bump:Bumper.bump'],
},
zip_safe=False,
include_package_data=True,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'
),
)
| 26.907407
| 70
| 0.618032
| 166
| 1,453
| 5.26506
| 0.590361
| 0.045767
| 0.02746
| 0.059497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007266
| 0.242257
| 1,453
| 53
| 71
| 27.415094
| 0.786558
| 0
| 0
| 0
| 0
| 0
| 0.295251
| 0.0351
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.065217
| 0
| 0.23913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6debe89876c11c370db73006de84c2358493d8ef
| 19,992
|
py
|
Python
|
test/coco_save.py
|
ZCDu/CenternessNet
|
03f5d01999a4e1595eaceef9f62b4450ed017843
|
[
"MIT"
] | null | null | null |
test/coco_save.py
|
ZCDu/CenternessNet
|
03f5d01999a4e1595eaceef9f62b4450ed017843
|
[
"MIT"
] | null | null | null |
test/coco_save.py
|
ZCDu/CenternessNet
|
03f5d01999a4e1595eaceef9f62b4450ed017843
|
[
"MIT"
] | null | null | null |
import os
import cv2
import pdb
import json
import copy
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
import matplotlib
import math
from tqdm import tqdm
from config import system_configs
from utils import crop_image, normalize_
from external.nms import soft_nms, soft_nms_merge
import pdb
colours = np.random.rand(80, 3)
def _rescale_dets(detections, ratios, borders, sizes):
xs, ys = detections[..., 0:4:2], detections[..., 1:4:2]
xs /= ratios[:, 1][:, None, None]
ys /= ratios[:, 0][:, None, None]
xs -= borders[:, 2][:, None, None]
ys -= borders[:, 0][:, None, None]
tx_inds = xs[:, :, 0] <= -5
bx_inds = xs[:, :, 1] >= sizes[0, 1] + 5
ty_inds = ys[:, :, 0] <= -5
by_inds = ys[:, :, 1] >= sizes[0, 0] + 5
np.clip(xs, 0, sizes[:, 1][:, None, None], out=xs)
np.clip(ys, 0, sizes[:, 0][:, None, None], out=ys)
detections[:, tx_inds[0, :], 4] = -1
detections[:, bx_inds[0, :], 4] = -1
detections[:, ty_inds[0, :], 4] = -1
detections[:, by_inds[0, :], 4] = -1
def save_image(data, fn):
sizes = np.shape(data)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width / height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data)
plt.savefig(fn, dpi=height)
plt.close()
def kp_decode(nnet, images, K, ae_threshold=0.5, kernel=3):
detections, center = nnet.test([images],
ae_threshold=ae_threshold,
K=K,
kernel=kernel)
detections = detections.data.cpu().numpy()
center = center.data.cpu().numpy()
return detections, center
def kp_detection(db, nnet, result_dir, debug=False, decode_func=kp_decode):
debug_dir = os.path.join(result_dir, "debug")
if not os.path.exists(debug_dir):
os.makedirs(debug_dir)
if db.split != "trainval":
db_inds = db.db_inds[:100] if debug else db.db_inds
else:
db_inds = db.db_inds[:100] if debug else db.db_inds[:5000]
num_images = db_inds.size
K = db.configs["top_k"]
ae_threshold = db.configs["ae_threshold"]
nms_kernel = db.configs["nms_kernel"]
scales = db.configs["test_scales"]
weight_exp = db.configs["weight_exp"]
merge_bbox = db.configs["merge_bbox"]
categories = db.configs["categories"]
nms_threshold = db.configs["nms_threshold"]
max_per_image = db.configs["max_per_image"]
nms_algorithm = {
"nms": 0,
"linear_soft_nms": 1,
"exp_soft_nms": 2
}[db.configs["nms_algorithm"]]
top_bboxes = {}
num_images = 1
for root, dirs, files in os.walk(
"/media/dl/train_disk/zcdu/work/CenterNet/pic"):
for f in files:
#db_ind = db_inds[ind]
#image_id = db.image_ids(db_ind)
#image_file = db.image_file(db_ind)
#name = os.path.join(root, f)
#print('name':name)
#image_file = os.path.join('/media/dl/train_disk/zcdu/work/CenterNet',
# name)
image_file = os.path.join(root, f)
print("image:", image_file)
image = cv2.imread(image_file)
height, width = image.shape[0:2]
detections = []
center_points = []
for scale in scales:
new_height = int(height * scale)
new_width = int(width * scale)
new_center = np.array([new_height // 2, new_width // 2])
inp_height = new_height | 127
inp_width = new_width | 127
images = np.zeros((1, 3, inp_height, inp_width),
dtype=np.float32)
ratios = np.zeros((1, 2), dtype=np.float32)
borders = np.zeros((1, 4), dtype=np.float32)
sizes = np.zeros((1, 2), dtype=np.float32)
out_height, out_width = (inp_height + 1) // 4, (inp_width +
1) // 4
height_ratio = out_height / inp_height
width_ratio = out_width / inp_width
resized_image = cv2.resize(image, (new_width, new_height))
resized_image, border, offset = crop_image(
resized_image, new_center, [inp_height, inp_width])
resized_image = resized_image / 255.
normalize_(resized_image, db.mean, db.std)
images[0] = resized_image.transpose((2, 0, 1))
borders[0] = border
sizes[0] = [int(height * scale), int(width * scale)]
ratios[0] = [height_ratio, width_ratio]
images = np.concatenate((images, images[:, :, :, ::-1]),
axis=0)
images = torch.from_numpy(images)
dets, center = decode_func(nnet,
images,
K,
ae_threshold=ae_threshold,
kernel=nms_kernel)
dets = dets.reshape(2, -1, 8)
center = center.reshape(2, -1, 4)
dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
center[1, :, [0]] = out_width - center[1, :, [0]]
dets = dets.reshape(1, -1, 8)
center = center.reshape(1, -1, 4)
_rescale_dets(dets, ratios, borders, sizes)
center[..., [0]] /= ratios[:, 1][:, None, None]
center[..., [1]] /= ratios[:, 0][:, None, None]
center[..., [0]] -= borders[:, 2][:, None, None]
center[..., [1]] -= borders[:, 0][:, None, None]
np.clip(center[..., [0]],
0,
sizes[:, 1][:, None, None],
out=center[..., [0]])
np.clip(center[..., [1]],
0,
sizes[:, 0][:, None, None],
out=center[..., [1]])
dets[:, :, 0:4] /= scale
center[:, :, 0:2] /= scale
if scale == 1:
center_points.append(center)
detections.append(dets)
detections = np.concatenate(detections, axis=1)
center_points = np.concatenate(center_points, axis=1)
classes = detections[..., -1]
classes = classes[0]
detections = detections[0]
center_points = center_points[0]
valid_ind = detections[:, 4] > -1
valid_detections = detections[valid_ind]
box_width = valid_detections[:, 2] - valid_detections[:, 0]
box_height = valid_detections[:, 3] - valid_detections[:, 1]
s_ind = (box_width * box_height <= 22500)
l_ind = (box_width * box_height > 22500)
s_detections = valid_detections[s_ind]
l_detections = valid_detections[l_ind]
s_left_x = (2 * s_detections[:, 0] + s_detections[:, 2]) / 3
s_right_x = (s_detections[:, 0] + 2 * s_detections[:, 2]) / 3
s_top_y = (2 * s_detections[:, 1] + s_detections[:, 3]) / 3
s_bottom_y = (s_detections[:, 1] + 2 * s_detections[:, 3]) / 3
s_temp_score = copy.copy(s_detections[:, 4])
s_detections[:, 4] = -1
center_x = center_points[:, 0][:, np.newaxis]
center_y = center_points[:, 1][:, np.newaxis]
s_left_x = s_left_x[np.newaxis, :]
s_right_x = s_right_x[np.newaxis, :]
s_top_y = s_top_y[np.newaxis, :]
s_bottom_y = s_bottom_y[np.newaxis, :]
ind_lx = (center_x - s_left_x) > 0
ind_rx = (center_x - s_right_x) < 0
ind_ty = (center_y - s_top_y) > 0
ind_by = (center_y - s_bottom_y) < 0
ind_cls = (center_points[:, 2][:, np.newaxis] -
s_detections[:, -1][np.newaxis, :]) == 0
ind_s_new_score = np.max(
((ind_lx + 0) & (ind_rx + 0) & (ind_ty + 0) & (ind_by + 0) &
(ind_cls + 0)),
axis=0) == 1
index_s_new_score = np.argmax(
((ind_lx + 0) & (ind_rx + 0) & (ind_ty + 0) & (ind_by + 0) &
(ind_cls + 0))[:, ind_s_new_score],
axis=0)
s_detections[:, 4][ind_s_new_score] = (
s_temp_score[ind_s_new_score] * 2 +
center_points[index_s_new_score, 3]) / 3
l_left_x = (3 * l_detections[:, 0] + 2 * l_detections[:, 2]) / 5
l_right_x = (2 * l_detections[:, 0] + 3 * l_detections[:, 2]) / 5
l_top_y = (3 * l_detections[:, 1] + 2 * l_detections[:, 3]) / 5
l_bottom_y = (2 * l_detections[:, 1] + 3 * l_detections[:, 3]) / 5
l_temp_score = copy.copy(l_detections[:, 4])
l_detections[:, 4] = -1
center_x = center_points[:, 0][:, np.newaxis]
center_y = center_points[:, 1][:, np.newaxis]
l_left_x = l_left_x[np.newaxis, :]
l_right_x = l_right_x[np.newaxis, :]
l_top_y = l_top_y[np.newaxis, :]
l_bottom_y = l_bottom_y[np.newaxis, :]
ind_lx = (center_x - l_left_x) > 0
ind_rx = (center_x - l_right_x) < 0
ind_ty = (center_y - l_top_y) > 0
ind_by = (center_y - l_bottom_y) < 0
ind_cls = (center_points[:, 2][:, np.newaxis] -
l_detections[:, -1][np.newaxis, :]) == 0
ind_l_new_score = np.max(
((ind_lx + 0) & (ind_rx + 0) & (ind_ty + 0) & (ind_by + 0) &
(ind_cls + 0)),
axis=0) == 1
index_l_new_score = np.argmax(
((ind_lx + 0) & (ind_rx + 0) & (ind_ty + 0) & (ind_by + 0) &
(ind_cls + 0))[:, ind_l_new_score],
axis=0)
l_detections[:, 4][ind_l_new_score] = (
l_temp_score[ind_l_new_score] * 2 +
center_points[index_l_new_score, 3]) / 3
detections = np.concatenate([l_detections, s_detections], axis=0)
detections = detections[np.argsort(-detections[:, 4])]
classes = detections[..., -1]
#for i in range(detections.shape[0]):
# box_width = detections[i,2]-detections[i,0]
# box_height = detections[i,3]-detections[i,1]
# if box_width*box_height<=22500 and detections[i,4]!=-1:
# left_x = (2*detections[i,0]+1*detections[i,2])/3
# right_x = (1*detections[i,0]+2*detections[i,2])/3
# top_y = (2*detections[i,1]+1*detections[i,3])/3
# bottom_y = (1*detections[i,1]+2*detections[i,3])/3
# temp_score = copy.copy(detections[i,4])
# detections[i,4] = -1
# for j in range(center_points.shape[0]):
# if (classes[i] == center_points[j,2])and \
# (center_points[j,0]>left_x and center_points[j,0]< right_x) and \
# ((center_points[j,1]>top_y and center_points[j,1]< bottom_y)):
# detections[i,4] = (temp_score*2 + center_points[j,3])/3
# break
# elif box_width*box_height > 22500 and detections[i,4]!=-1:
# left_x = (3*detections[i,0]+2*detections[i,2])/5
# right_x = (2*detections[i,0]+3*detections[i,2])/5
# top_y = (3*detections[i,1]+2*detections[i,3])/5
# bottom_y = (2*detections[i,1]+3*detections[i,3])/5
# temp_score = copy.copy(detections[i,4])
# detections[i,4] = -1
# for j in range(center_points.shape[0]):
# if (classes[i] == center_points[j,2])and \
# (center_points[j,0]>left_x and center_points[j,0]< right_x) and \
# ((center_points[j,1]>top_y and center_points[j,1]< bottom_y)):
# detections[i,4] = (temp_score*2 + center_points[j,3])/3
# break
# reject detections with negative scores
keep_inds = (detections[:, 4] > -1)
detections = detections[keep_inds]
classes = classes[keep_inds]
image_id = 0
top_bboxes[image_id] = {}
for j in range(categories):
keep_inds = (classes == j)
top_bboxes[image_id][j +
1] = detections[keep_inds][:, 0:7].astype(
np.float32)
if merge_bbox:
soft_nms_merge(top_bboxes[image_id][j + 1],
Nt=nms_threshold,
method=nms_algorithm,
weight_exp=weight_exp)
else:
soft_nms(top_bboxes[image_id][j + 1],
Nt=nms_threshold,
method=nms_algorithm)
top_bboxes[image_id][j + 1] = top_bboxes[image_id][j + 1][:,
0:5]
scores = np.hstack([
top_bboxes[image_id][j][:, -1]
for j in range(1, categories + 1)
])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, -1] >= thresh)
top_bboxes[image_id][j] = top_bboxes[image_id][j][
keep_inds]
if debug:
#image_file = db.image_file(db_ind)
#image_file = os.path.join(
# "/media/dl/train_disk/zcdu/work/CenterNet", name)
image = cv2.imread(image_file)
im = image[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
fig = ax.imshow(im, aspect='equal')
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
#bboxes = {}
for j in range(1, categories + 1):
keep_inds = (top_bboxes[image_id][j][:, -1] >= 0.4)
cat_name = db.class_name(j)
bboxes = top_bboxes[image_id][j][keep_inds]
if len(bboxes) > 3:
bboxes = select_box(bboxes)
#print('test select_box bboxes later:', bboxes.shape)
if len(bboxes) == 0:
continue
p1 = 0
for bbox in bboxes:
p1 += 1
bbox = bbox[0:4].astype(np.int32)
xmin = bbox[0]
ymin = bbox[1]
xmax = bbox[2]
ymax = bbox[3]
#if (xmax - xmin) * (ymax - ymin) > 5184:
ax.add_patch(
plt.Rectangle((xmin, ymin),
xmax - xmin,
ymax - ymin,
fill=False,
edgecolor=colours[j - 1],
linewidth=4.0))
ax.text(xmin + 1,
ymin - 3,
'{:s}'.format(cat_name),
bbox=dict(facecolor=colours[j - 1],
ec='black',
lw=2,
alpha=0.5),
fontsize=15,
color='white',
weight='bold')
print("count:!!!!!!!!", p1)
out_name = f.replace('jpg', 'pdf')
debug_file1 = os.path.join(
"/media/dl/train_disk/zcdu/work/CenterNet", "result",
"centernet_lite", "{}".format(out_name))
debug_file2 = os.path.join(
"/media/dl/train_disk/zcdu/work/CenterNet", "result",
"centernet_lite", f)
plt.savefig(debug_file1)
plt.savefig(debug_file2)
plt.close()
debug_file = os.path.join(
"/media/dl/train_disk/zcdu/work/CenterNet", "result",
"centernet_lite", '{}'.format(f))
cv2.imwrite(debug_file, image,
[int(cv2.IMWRITE_JPEG_QUALITY), 100])
#result_json = os.path.join(result_dir, "results.json")
#detections = db.convert_to_coco(top_bboxes)
#with open(result_json, "w") as f:
# json.dump(detections, f)
#cls_ids = list(range(1, categories + 1))
#image_ids = [db.image_ids(ind) for ind in db_inds]
#db.evaluate(result_json, cls_ids, image_ids)
print('successful!!!!')
return 0
def select_box(boxes):
length = len(boxes)
if length > 3:
print('test coco_Save boxes:', boxes)
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
#print('test coco_Save areas:', areas)
#print('test coco_Save areas:', areas.shape)
max_index = np.argsort(-areas)[0]
max_area = areas[max_index]
print('select_box max_index:', boxes[max_index])
count_m = 0
for i in range(length):
if i == max_index:
continue
else:
if (int(boxes[i][0]) >= int(boxes[max_index][0])
and int(boxes[i][1]) >= int(boxes[max_index][1])
and int(boxes[i][2]) <= int(boxes[max_index][2])
and int(boxes[i][3]) <= int(boxes[max_index][3])):
print('test inside max_area:', boxes[i])
count_m = 1
break
#for m in range(length):
# if (math.isclose(boxes[m][0],
# boxes[max_index][0],
# abs_tol=0.00001)
# and math.isclose(boxes[m][1],
# boxes[max_index][1],
# abs_tol=0.00001)):
# count_m = 1
# break
# elif (math.isclose(boxes[m][2],
# boxes[max_index][2],
# abs_tol=0.00001)
# and math.isclose(boxes[m][3],
# boxes[max_index][3],
# abs_tol=0.00001)):
# count_m = 1
# break
# else:
# continue
if (count_m == 1):
print('test coco delete!!!')
boxes = np.delete(boxes, max_index, axis=0)
count_m = 0
return boxes
def testing(db, nnet, result_dir, debug=False):
return globals()[system_configs.sampling_function](db,
nnet,
result_dir,
debug=debug)
| 42.626866
| 89
| 0.456633
| 2,322
| 19,992
| 3.71404
| 0.121016
| 0.038961
| 0.018089
| 0.022263
| 0.355635
| 0.300093
| 0.250232
| 0.206169
| 0.187848
| 0.180427
| 0
| 0.041217
| 0.411415
| 19,992
| 468
| 90
| 42.717949
| 0.69168
| 0.146609
| 0
| 0.126844
| 0
| 0
| 0.030959
| 0.009653
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017699
| false
| 0
| 0.047198
| 0.00295
| 0.076696
| 0.020649
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dee89b3a35ae4ccdd9553002d458259723951b4
| 31,184
|
py
|
Python
|
Apps/polls/Views/CourseArrangement.py
|
shadowofgost/WebEngineering
|
693af827e3458806cdace959262cf393d29f6504
|
[
"Apache-2.0"
] | 1
|
2021-04-05T05:40:17.000Z
|
2021-04-05T05:40:17.000Z
|
Apps/polls/Views/CourseArrangement.py
|
shadowofgost/WebEngineering
|
693af827e3458806cdace959262cf393d29f6504
|
[
"Apache-2.0"
] | null | null | null |
Apps/polls/Views/CourseArrangement.py
|
shadowofgost/WebEngineering
|
693af827e3458806cdace959262cf393d29f6504
|
[
"Apache-2.0"
] | null | null | null |
from django.http import HttpResponse
from django.db.models import Q
from drf_yasg.utils import swagger_auto_schema
from drf_yasg.openapi import Parameter, Schema, Response, TYPE_INTEGER, TYPE_OBJECT, TYPE_STRING, IN_QUERY
from json import dumps
from .. import models
from .Public import responses_success, responses_fail, get_request_args, data_page_response, content_type_tmp, post_search, put_success, put_error, post_error, data_base_error_specific, patch_success, patch_error, id_error, delete_schema
from rest_framework.views import APIView
from django.views.decorators.csrf import csrf_exempt
class CourseArrangement(APIView):
'''
list
list all information about Equipment
'''
data_schema = {
'id':
Schema(
title='课程id',
description='课程id,其中课程id是唯一的标识',
type=TYPE_INTEGER,
format='int32',
enum=None,
),
'id_curricula__name':
Schema(
title='课程名称',
description='课程名称 是课程安排表对应的课程课程名称',
type=TYPE_STRING,
format='string',
enum=None,
),
'timebegin':
Schema(
title='课程开时间 ',
description=' 项目开始时间记录最后更新时间;(2000-1-1 0:0:0 经过的秒),必须有值 ',
type=TYPE_INTEGER,
format='int32',
enum=None,
),
'timeend':
Schema(
title='课程结束时间',
description='项目结束时间记录最后更新时间;(2000-1-1 0:0:0 经过的秒),必须有值 ',
type=TYPE_INTEGER,
format='int32',
enum=None,
),
'id_location__name':
Schema(
title=' 课程所在教室的地点的名称 ',
description='课程所在教室的地点的名称 ',
type=TYPE_STRING,
format='string',
enum=None,
),
'id_speaker__name':
Schema(
title='主讲人',
description='主讲人也就是课程老师的姓名',
type=TYPE_STRING,
format='string',
enum=None,
),
'attr':
Schema(
title='课程属性',
description='1代表实验类型、2代表普通上课类型、3讲座考勤类型,必须有值',
type=TYPE_INTEGER,
format='int32',
enum=[1, 2, 3],
),
'charge':
Schema(
title=' 是否收费的字段 ',
description=' 免费0、收费1、开放2,必须有值 ',
type=TYPE_INTEGER,
format='int32',
enum=[0, 1, 2],
),
'pwaccess':
Schema(
title='派位',
description='不派位0、刷卡派位1(派位指用户刷卡时系统指定座位),必须有值',
type=TYPE_INTEGER,
format='int32',
enum=[0, 1],
),
'pwcontinuous':
Schema(
title='派位连续性',
description='连续派位0、随机派位1,必须有值',
type=TYPE_INTEGER,
format='int32',
enum=[0, 1],
),
'pwdirection':
Schema(
title='排位顺序',
description='顺序派位0、逆序派位1(当设置为随机派位时本功能无效),必须有值',
type=TYPE_INTEGER,
format='int32',
enum=[0, 1],
),
'dooropen':
Schema(
title='是否开门',
description='匹配的用户刷卡是否开门,0开门,1不开门',
type=TYPE_INTEGER,
format='int32',
enum=[0, 1],
),
'timebegincheckbegin':
Schema(
title='最早开始考勤的最早时间',
description=' 安排考勤开始的最早时间(单位为分钟,0代表无效),必须有值 ',
type=TYPE_INTEGER,
format='int32',
enum=None,
),
'timebegincheckend':
Schema(
title='最早签到结束时间 ',
description=' 安排考勤开始的最迟时间(单位为分钟,0代表无效),必须有值 ',
type=TYPE_INTEGER,
format='int32',
enum=None,
),
'timeendcheckbegin':
Schema(
title='考勤结束的最早时间(签退) ',
description=' 安排考勤结束的最早时间(单位为分钟,0代表无效),必须有值 ',
type=TYPE_INTEGER,
format='int32',
enum=None,
),
'timeendcheckend':
Schema(
title='考勤结束的最迟时间(签退)',
description=' 安排考勤结束的最迟时间(单位为分钟,0代表无效),必须有值',
type=TYPE_INTEGER,
format='int32',
enum=None,
),
'listdepts':
Schema(
title=' 参加本安排的学生部门列表 ',
description=' 参加本安排的学生部门列表 ',
type=TYPE_STRING,
format='string',
enum=None,
),
'rangeusers':
Schema(
title='参加本安排的学生学号列表(与RangeUser为相加的关系)',
description='参加本安排的学生学号列表(与RangeUser为相加的关系)',
type=TYPE_STRING,
format='string',
enum=None,
),
'rangeequs':
Schema(
title=' 座位表 ',
description=' 课程使用的座位范围列表 ',
type=TYPE_STRING,
format='string',
enum=None,
),
'listplaces':
Schema(
title=' 课程使用的地点 ',
description=' 课程使用的地点列表(与课程使用的座位范围列表为相加的关系)',
type=TYPE_STRING,
format='string',
enum=None,
),
'mapuser2equ':
Schema(
title='学生和座位对应表',
description='学生和座位对应表',
type=TYPE_STRING,
format='string',
enum=None,
),
'aboutspeaker':
Schema(
title='本课程主讲人介绍',
description=' 本课程主讲人也就是上课老师的介绍',
type=TYPE_STRING,
format='string',
enum=None,
),
'rem':
Schema(
title='课程介绍',
description='课程内容的介绍',
type=TYPE_STRING,
format='string',
enum=None,
),
'timeupdate':
Schema(
title='update time ',
description=' 记录最后更新时间;(2000-1-1 0:0:0 经过的秒),必须有值 ',
type=TYPE_INTEGER,
format='int32',
enum=None,
),
'idmanager__name':
Schema(
title=' 更新信息的管理员的姓名 ',
description=' 更新信息的管理员的姓名 ',
type=TYPE_STRING,
format='string',
enum=None,
)
}
data_schema_present = Schema(
title='查询成功的返回',
description='查询成功返回的函数值',
type=TYPE_OBJECT, # 类型
properties=data_schema
)
get_responses_success = Schema(
title='成功获取查询数据',
description='这个接口用于展示成功获取全部数据的格式',
type=TYPE_OBJECT,
properties={
'page': Schema(
title='页码',
description='用于表示展示的页码数',
type=TYPE_INTEGER,
format='int32',
),
'limits': Schema(
title='页码',
description='用于表示每页展示的行数',
type=TYPE_INTEGER,
format='int32',
),
'error_code': Schema(
title='是否有报错数据',
description='用于传达是否有报错数据,0表示没有报错数据,1表示有报错数据',
type=TYPE_INTEGER,
format='int32',
),
'data': Schema(
title='数据',
description='用于传递查询到的全部数据',
type=TYPE_OBJECT,
properties=[data_schema_present, data_schema_present]
),
}
)
CourseInformation_get_responses_success = Response(
description='查询课程信息成功的响应',
schema=get_responses_success,
examples=None,
)
CourseInformation_get_responses_fail = Response(
description='查询课程信息失败的响应',
schema=responses_fail,
examples={
'error_code': 1,
'message': '查询课程信息失败'
}
)
page_get_parammeter = Parameter(
name='page',
in_=IN_QUERY,
description='查询时设定的页码数',
required=True,
type=TYPE_INTEGER,
format='int32',
)
limits_get_parammeter = Parameter(
name='limits',
in_=IN_QUERY,
description='查询时设定的每页行数',
required=True,
type=TYPE_INTEGER,
format='int32',
)
@swagger_auto_schema(
request_body=None,
manual_parameters=[
page_get_parammeter, limits_get_parammeter],
operation_id=None,
operation_description='这个端口用于查询课程信息',
operation_summary=None,
security=None,
responses={
200: CourseInformation_get_responses_success, 401: CourseInformation_get_responses_fail
},
tags=None)
@get_request_args
@csrf_exempt
def get(self, request, args, session):
is_login = request.COOKIES.get('is_login')
if not request.session.get(is_login, None):
return HttpResponse(dumps({'code': 0}), content_type=content_type_tmp, charset='utf-8')
pages = int(args.get('page', 1))
limits = int(args.get('limits', 20))
data_equipment = models.TCyplan.objects.all().values('id',
'id_curricula__name', 'timebegin', 'timeend', 'id_location__name', 'id_speaker__name', 'attr', 'charge', 'pwaccess', 'pwcontinuous', 'pwdirection', 'dooropen', 'timebegincheckbegin', 'timebegincheckend', 'timeendcheckbegin', 'timeendcheckend', 'rangeusers', 'listdepts', 'rangeequs', 'timeupdate', 'listplaces', 'idmanager__name', 'mapuser2equ', 'aboutspeaker', 'rem').distinct().order_by('id')
return data_page_response(data_equipment, pages, limits)
'''
list
list all information about Equipment
'''
CourseArrangement_post_request_body = Schema(
title='查询课程安排所需要的信息', # 标题
description=' 输入查询字符串用于查询课程安排信息 ', # 接口描述
type=TYPE_OBJECT, # 类型 "object" ,"string" ,"number" ,"integer" ,"boolean" ,"array"" ,"boolean" ,"array" ,"file"
format=None, # 格式 date,date-time,password,binary,bytes,float,double,int32,int64,email,ipv4, ipv6, uri, uuid, slug, decimal
enum=None, # [列表]列举参数的请求值
pattern=None, # 当 format为 string是才填此项
# 当 type为object时,为dict对象 {'str1': Schema对象, 'str2': SchemaRef对象}
properties=post_search,
required=['input_string', 'page', 'limits'], # [必须的属性列表]
items=None, # 当type是array时,填此项
)
CourseArrangement_post_responses_success = Response(
description='查询课程安排表成功的响应',
schema=get_responses_success,
examples={
'error_code': 0,
'message': '查询成功'
})
CourseArrangement_post_responses_fail = Response(
description='查询课程安排表失败的响应',
schema=responses_fail,
examples={
'error_code': 1,
'message': post_error
})
@swagger_auto_schema(
request_body=CourseArrangement_post_request_body,
manual_parameters=None,
operation_id=None,
operation_description='这个端口用于查询课程安排表(某些条件下的课程安排表)',
operation_summary=None,
security=None,
responses={
201: CourseArrangement_post_responses_success,
400: CourseArrangement_post_responses_fail
},
tags=None)
@get_request_args
@csrf_exempt
def post(self, request, args, session):
is_login = request.COOKIES.get('is_login')
if not request.session.get(is_login, None):
return HttpResponse(dumps({'code': 0}), content_type=content_type_tmp, charset='utf-8')
input_string = args.get('input_string', None)
pages = int(args.get('page', 1))
limits = int(args.get('limits', 20))
if input_string == None:
data_equipment = models.TCyplan.objects.all().values(
'id',
'id_curricula__name',
'timebegin',
'timeend',
'id_location__name',
'id_speaker__name',
'attr',
'charge',
'pwaccess',
'pwcontinuous',
'pwdirection',
'dooropen',
'timebegincheckbegin',
'timebegincheckend',
'timeendcheckbegin',
'timeendcheckend',
'rangeusers',
'listdepts',
'rangeequs',
'timeupdate',
'listplaces',
'idmanager__name',
'mapuser2equ',
'aboutspeaker',
'rem'
).distinct().order_by('id')
else:
input_string = input_string.strip()
try:
test_input = eval(input_string)
except:
test_input = input_string
if isinstance(test_input, int):
data_equipment = models.TCyplan.objects.filter(
Q(id=test_input)
| Q(id_curricula=test_input)
| Q(timebegin=test_input)
| Q(timeend=test_input)
| Q(id_location=test_input)
| Q(id_speaker=test_input)
| Q(attr=test_input)
| Q(charge=test_input)
| Q(pwaccess=test_input)
| Q(pwcontinuous=test_input)
| Q(pwdirection=test_input)
| Q(dooropen=test_input)
| Q(timebegincheckbegin=test_input)
| Q(timebegincheckend=test_input)
| Q(timeendcheckbegin=test_input)
| Q(timeendcheckend=test_input)
| Q(timeupdate=test_input)
| Q(idmanager=test_input)).values(
'id',
'id_curricula',
'timebegin',
'timeend',
'id_location',
'id_speaker',
'attr',
'charge',
'pwaccess',
'pwcontinuous',
'pwdirection',
'dooropen',
'timebegincheckbegin',
'timebegincheckend',
'timeendcheckbegin',
'timeendcheckend',
'rangeusers',
'listdepts',
'rangeequs',
'timeupdate',
'listplaces',
'idmanager',
'mapuser2equ',
'aboutspeaker',
'rem'
).distinct().order_by('id')
else:
data_equipment = models.TCyplan.objects.filter(
Q(rem__icontains=test_input)
| Q(rangeequs__icontains=test_input)
| Q(rangeequs__icontains=test_input)
| Q(listdepts__icontains=test_input)
| Q(listplaces__icontains=test_input)
| Q(mapuser2equ__icontains=test_input)
| Q(aboutspeaker__icontains=test_input)
| Q(idmanager__name__icontains=test_input)
| Q(id_location__name__icontains=test_input)
| Q(id_speaker__name__icontains=test_input)).values(
'id',
'id_curricula',
'timebegin',
'timeend',
'id_location__name',
'id_speaker__name',
'attr',
'charge',
'pwaccess',
'pwcontinuous',
'pwdirection',
'dooropen',
'timebegincheckbegin',
'timebegincheckend',
'timeendcheckbegin',
'timeendcheckend',
'rangeusers',
'listdepts',
'rangeequs',
'timeupdate',
'listplaces',
'idmanager__name',
'mapuser2equ',
'aboutspeaker',
'rem',
).distinct().order_by('id')
return data_page_response(data_equipment, pages, limits)
'''
list
list all information about Equipment
'''
CourseArrangement_put_request_body = Schema(
title=' 增加课程安排表需要的数据 ', # 标题
description='向数据库增加课程安排表需要的数据和字段', # 接口描述
type=TYPE_OBJECT, # 类型 "object" ,"string" ,"number" ,"integer" ,"boolean" ,"array" ,"file"
properties=data_schema,
required=[
'id', 'id_curricula__name', 'timebegin', 'timeend', 'id_location__name', 'id_speaker__name',
'attr', 'charge', 'pwaccess', 'pwcontinuous',
'pwdirection', 'dooropen', 'timebegincheckbegin',
'timebegincheckend', 'timeendcheckbegin', 'timeendcheckend',
'rangeusers', 'listdepts', 'rangeequs', 'timeupdate', 'listplaces',
'idmanager__name', 'mapuser2equ', 'aboutspeaker', 'rem']
)
CourseArrangement_put_responses_success = Response(
description='增加课程安排表数据成功的响应',
schema=responses_success,
examples={
'error_code': 0,
'message': put_success
})
CourseArrangement_put_responses_fail = Response(
description='增加课程安排表数据失败的响应',
schema=responses_fail,
examples={
'error_code': 1,
'message': put_error
})
@swagger_auto_schema(
request_body=CourseArrangement_put_request_body,
manual_parameters=None,
operation_id=None,
operation_description='这个端口用于向数据库增加课程安排表的数据',
operation_summary=None,
security=None,
responses={
201: CourseArrangement_put_responses_success,
400: CourseArrangement_put_responses_fail
},
tags=None)
@get_request_args
@csrf_exempt
def put(self, request, args, session):
field_name = [
'id', 'id_curricula__name', 'timebegin', 'timeend', 'id_location__name', 'id_speaker__name', 'timeupdate',
'idmanager__name', 'aboutspeaker', 'rem'
]
is_login = request.COOKIES.get('is_login')
if not request.session.get(is_login, None):
return HttpResponse(dumps({'code': 0}), content_type=content_type_tmp, charset='utf-8')
variable_name = locals()
for i in field_name:
variable_name[i] = args.get(i, 0)
user_id = request.COOKIES.get('user_id')
user_id = request.session.get(user_id)
variable_name['idmanager'] = user_id
del variable_name['idmanager__name']
if isinstance(variable_name['id_location__name'], int) and isinstance(variable_name['id_speaker__name'], int) and isinstance(variable_name['id_curricula__name'], int):
variable_name['id_location'] = variable_name['id_location__name']
variable_name['id_speaker'] = variable_name['id_speaker__name']
variable_name['id_curricula'] = variable_name['id_curricula__name']
else:
return HttpResponse(dumps(
{'error_code': 1, 'message': '请确保所填的id类数据是数字'}),
content_type=content_type_tmp,
charset='utf-8')
# 批量命名变量
try:
curricula_object = models.TCycurricula.objects.get(
id=variable_name.get('id_curricula'))
location_object = models.TCylocation.objects.get(
id=variable_name.get('id_location'))
speaker_object = models.TCyuser.objects.get(
id=variable_name.get('id_speaker'))
idmanager_object = models.TCyuser.objects.get(
id=variable_name.get('idmanager'))
ueses_tmp = models.TCyplan.objects.create(
id=variable_name.get('id'),
id_curricula=curricula_object,
id_location=location_object,
id_speaker=speaker_object,
timebegin=variable_name.get('timebegin'),
timeend=variable_name.get('timeend'),
attr=variable_name.get('attr'),
charge=variable_name.get('charge'),
pwaccess=variable_name.get('pwaccess'),
pwcontinuous=variable_name.get('pwcontinuous'),
pwdirection=variable_name.get('pwdirection'),
dooropen=variable_name.get('dooropen'),
timebegincheckbegin=variable_name.get('timebegincheckbegin'),
timebegincheckend=variable_name.get('timebegincheckend'),
timeendcheckbegin=variable_name.get('timeendcheckbegin'),
timeendcheckend=variable_name.get('timeendcheckend'),
rangeusers=variable_name.get('rangeusers'),
listdepts=variable_name.get('listdepts'),
rangeequs=variable_name.get('rangeequs'),
timeupdate=variable_name.get('timeupdate'),
listplaces=variable_name.get('listplaces'),
idmanager=idmanager_object,
mapuser2equ=variable_name.get('mapuser2equ'),
aboutspeaker=variable_name.get('aboutspeaker'),
rem=variable_name.get('rem'))
return HttpResponse(dumps({'error_code': 0, 'message': put_success}),
content_type=content_type_tmp,
charset='utf-8')
except Exception as error:
return HttpResponse(dumps(
{'error_code': 1, 'message': data_base_error_specific + str(error)}),
content_type=content_type_tmp,
charset='utf-8')
'''
list
list all information about Equipment
'''
CourseArrangement_patch_request_body = Schema(
title=' 修改课程安排表所需要的数据 ', # 标题
description=' 修改课程安排表 ', # 接口描述
type=TYPE_OBJECT, # 类型 "object" ,"string" ,"number" ,"integer" ,"boolean" ,"array" ,"file"
format=None, # 格式 date,date-time,password,binary,bytes,float,double,int32,int64,email,ipv4, ipv6, uri, uuid, slug, decimal
enum=None, # [列表]列举参数的请求值
pattern=None, # 当 format为 string是才填此项
# 当 type为object时,为dict对象 {'str1': Schema对象, 'str2': SchemaRef对象}
properties=data_schema,
required=['id'], # [必须的属性列表]
items=None, # 当type是array时,填此项
)
CourseArrangement_patch_responses_success = Response(
description='修改课程安排表成功的响应',
schema=responses_success,
examples={
'error_code': 0,
'message': patch_success
})
CourseArrangement_patch_responses_fail = Response(
description='修改课程安排表失败的响应',
schema=responses_fail,
examples={
'error_code': 1,
'message': patch_error
})
@swagger_auto_schema(request_body=CourseArrangement_patch_request_body,
manual_parameters=None,
operation_id=None,
operation_description='这个端口用于修改课程安排表的数据',
operation_summary=None,
security=None,
responses={
201: CourseArrangement_patch_responses_success,
400: CourseArrangement_patch_responses_fail
},
tags=None)
@get_request_args
@csrf_exempt
def patch(self, request, args, session):
is_login = request.COOKIES.get('is_login')
if not request.session.get(is_login, None):
return HttpResponse(dumps({'code': 0}), content_type=content_type_tmp, charset='utf-8')
id_equipment = args.get('id')
data_equipment_initial = list(
models.TCyplan.objects.filter(id=id_equipment).values(
'id',
'id_curricula',
'timebegin',
'timeend',
'id_location',
'id_speaker',
'attr',
'charge',
'pwaccess',
'pwcontinuous',
'pwdirection',
'dooropen',
'timebegincheckbegin',
'timebegincheckend',
'timeendcheckbegin',
'timeendcheckend',
'rangeusers',
'listdepts',
'rangeequs',
'timeupdate',
'listplaces',
'idmanager',
'mapuser2equ',
'aboutspeaker',
'rem'))
if data_equipment_initial == []:
return HttpResponse(dumps({'error_code': 1, 'message': id_error}),
content_type=content_type_tmp,
charset='utf-8')
data_equipment = data_equipment_initial[0]
field_name = [
'id', 'id_curricula__name', 'timebegin', 'timeend', 'id_location__name', 'id_speaker__name',
'attr', 'charge', 'pwaccess', 'pwcontinuous',
'pwdirection', 'dooropen', 'timebegincheckbegin',
'timebegincheckend', 'timeendcheckbegin', 'timeendcheckend',
'rangeusers', 'listdepts', 'rangeequs', 'timeupdate', 'listplaces',
'idmanager__name', 'mapuser2equ', 'aboutspeaker', 'rem'
]
args['id_curricula'] = args.get(
'id_curricula__name', data_equipment['id_curricula'])
args['id_location'] = args.get(
'id_location__name', data_equipment['id_location'])
args['id_speaker'] = args.get(
'id_speaker__name', data_equipment['id_speaker'])
args['idmanager'] = args.get(
'idmanager__name', data_equipment['idmanager'])
if isinstance(args['id_location__name'], int) and isinstance(args['id_speaker__name'], int) and isinstance(args['id_curricula__name'], int) and isinstance(args['idmanager__name'], int):
args['id_location'] = args['id_location__name']
args['id_speaker'] = args['id_speaker__name']
args['id_curricula'] = args['id_curricula__name']
args['idmanager'] = args['idmanager__name']
else:
return HttpResponse(dumps(
{'error_code': 1, 'message': '请确保所填的id类数据是数字'}),
content_type=content_type_tmp,
charset='utf-8')
variable_name = locals()
for i in field_name:
if args[i] == 0:
variable_name[i] = data_equipment[i]
else:
variable_name[i] = args.get(i, data_equipment[i])
user_id = request.COOKIES.get('user_id')
user_id = request.session.get(user_id)
variable_name['idmanager'] = user_id
try:
models.TCyplan.objects.filter(id=id_equipment).update(
id=variable_name.get('id'),
id_curricula=variable_name.get('id_curricula'),
id_location=variable_name.get('id_location'),
id_speaker=variable_name.get('id_speaker'),
timebegin=variable_name.get('timebegin'),
timeend=variable_name.get('timeend'),
attr=variable_name.get('attr'),
charge=variable_name.get('charge'),
pwaccess=variable_name.get('pwaccess'),
pwcontinuous=variable_name.get('pwcontinuous'),
pwdirection=variable_name.get('pwdirection'),
dooropen=variable_name.get('dooropen'),
timebegincheckbegin=variable_name.get('timebegincheckbegin'),
timebegincheckend=variable_name.get('timebegincheckend'),
timeendcheckbegin=variable_name.get('timeendcheckbegin'),
timeendcheckend=variable_name.get('timeendcheckend'),
rangeusers=variable_name.get('rangeusers'),
listdepts=variable_name.get('listdepts'),
rangeequs=variable_name.get('rangeequs'),
timeupdate=variable_name.get('timeupdate'),
listplaces=variable_name.get('listplaces'),
idmanager=variable_name.get('idmanager'),
mapuser2equ=variable_name.get('mapuser2equ'),
aboutspeaker=variable_name.get('aboutspeaker'),
rem=variable_name.get('rem'))
return HttpResponse(dumps({'message': '修改课程安排表成功'}), content_type=content_type_tmp, charset='utf-8')
except Exception as error:
return HttpResponse(dumps(
{'error_code': 1, 'message': data_base_error_specific + str(error)}),
content_type=content_type_tmp,
charset='utf-8')
APIView_delete_request_body = Schema(
title=' 删除数据库中的信息 ', # 标题
description='删除数据库中具体的id名称', # 接口描述
type=TYPE_OBJECT, # 类型 "object" ,"string" ,"number" ,"integer" ,"boolean" ,"array" ,"file"
format=None, # 格式 date,date-time,password,binary,bytes,float,double,int32,int64,email,ipv4, ipv6, uri, uuid, slug, decimal
enum=None, # [列表]列举参数的请求值
pattern=None, # 当 format为 string是才填此项
# 当 type为object时,为dict对象 {'str1': Schema对象, 'str2': SchemaRef对象}
properties=delete_schema,
required=['ids'], # [必须的属性列表]
items=None, # 当type是array时,填此项
)
APIView_delete_responses_success = Response(
description='APIView_delete_responses is success',
schema=responses_success,
examples={
'error_code': 0,
'message': '删除成功'
}
)
APIView_delete_responses_fail = Response(
description='APIView_delete_responses is failure',
schema=responses_fail,
examples={
'error_code': 1,
'message': '删除失败,请输入正确的id'
}
)
@ swagger_auto_schema(
request_body=APIView_delete_request_body,
manual_parameters=None,
operation_id=None,
operation_description='api是用来删除数据库中的给定字段',
operation_summary=None,
security=None,
responses={
204: APIView_delete_request_body,
500: APIView_delete_request_body
},
tags=None)
@ get_request_args
def delete(self, request, args, session):
is_login = request.COOKIES.get('is_login')
if not request.session.get(is_login, None):
return HttpResponse(dumps({'code': 0}), content_type=content_type_tmp, charset='utf-8')
variable_name = locals()
delete_data = args.get('ids')
numbers_id = len(delete_data)
for i in range(numbers_id):
variable_name['id_'+str(i)] = delete_data[i].get('data_id')
try:
for i in range(numbers_id):
models.TCyplan.objects.filter(
id=variable_name.get('id_'+str(i), 'id_1')).delete()
return HttpResponse(dumps({'error_code': 0, 'message': '数据删除成功'}), content_type=content_type_tmp, charset='utf-8')
except Exception as error:
return HttpResponse(dumps({'error_code': 1, 'message': data_base_error_specific + str(error)}), content_type=content_type_tmp, charset='utf-8')
| 38.594059
| 455
| 0.536942
| 2,703
| 31,184
| 5.938217
| 0.118017
| 0.052333
| 0.047661
| 0.024858
| 0.654663
| 0.616846
| 0.580961
| 0.508006
| 0.468195
| 0.446265
| 0
| 0.010524
| 0.353996
| 31,184
| 807
| 456
| 38.641884
| 0.78625
| 0.035178
| 0
| 0.598965
| 0
| 0
| 0.179586
| 0.015074
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006468
| false
| 0
| 0.011643
| 0
| 0.064683
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6defdfc013df6a621f25fd5ffba934ad58dd3acd
| 3,312
|
py
|
Python
|
lights.py
|
team-7108/computer-vision-tutorials
|
cfb7e455b5d8bba8779c440907344d9763573f57
|
[
"MIT"
] | 3
|
2018-09-12T02:56:46.000Z
|
2020-11-13T13:48:44.000Z
|
lights.py
|
team-7108/computer-vision-tutorials
|
cfb7e455b5d8bba8779c440907344d9763573f57
|
[
"MIT"
] | null | null | null |
lights.py
|
team-7108/computer-vision-tutorials
|
cfb7e455b5d8bba8779c440907344d9763573f57
|
[
"MIT"
] | 1
|
2020-11-13T13:48:45.000Z
|
2020-11-13T13:48:45.000Z
|
# Import OpenCV module
import cv2
# Import numpy for array operations
import numpy as np
image = cv2.imread('images/five_cubes.jpeg')
# Show the image
cv2.imshow('Image',image)
# Resize the image if it is too big, also helps to speed up the processing
image = cv2.resize(image, (600, 600))
cv2.imshow('Resized Image',image)
# Equalizing histograms, we try to reduce the effect of light here
image = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
channel = cv2.split(image)
cv2.equalizeHist(channel[0], channel[0])
cv2.merge(channel,image)
image = cv2.cvtColor(image,cv2.COLOR_YUV2BGR)
cv2.imshow('Normalized Image',image)
# This is a dummy function needed for creating trackbars
def nothing(x):
pass
# Create a window named 'Colorbars'
cv2.namedWindow('Colorbars')
# Assign strings for ease of coding
bh='Blue High'
bl='Blue Low'
gh='Green High'
gl='Green Low'
rh='Red High'
rl='Red Low'
wnd = 'Colorbars'
# Begin Creating trackbars for each BGR value
cv2.createTrackbar(bl, wnd, 0, 255, nothing)
cv2.createTrackbar(bh, wnd, 149, 255, nothing)
cv2.createTrackbar(gl, wnd, 156, 255, nothing)
cv2.createTrackbar(gh, wnd, 255, 255, nothing)
cv2.createTrackbar(rl, wnd, 199, 255, nothing)
cv2.createTrackbar(rh, wnd, 255, 255, nothing)
while True:
mergedImage = np.zeros((600,150,3), np.uint8)
# Split image into four pieces and merge again
for i in range(0,4):
resizedImage = image[0:600, i*150:(i+1)*150]
cv2.imshow("cropped", resizedImage)
bLow = cv2.getTrackbarPos(bl, wnd)
bHigh = cv2.getTrackbarPos(bh, wnd)
gLow = cv2.getTrackbarPos(gl, wnd)
gHigh = cv2.getTrackbarPos(gh, wnd)
rLow = cv2.getTrackbarPos(rl, wnd)
rHigh = cv2.getTrackbarPos(rh, wnd)
rgbLow=np.array([bLow,gLow,rLow])
rgbHigh=np.array([bHigh,gHigh,rHigh])
maskedImage = cv2.inRange(resizedImage, rgbLow, rgbHigh)
cv2.imshow('Masked Image', maskedImage)
kernel = np.ones((15,15),np.uint8)
# the first morphological transformation is called opening, it will sweep out extra lone pixels around the image
openedImage = cv2.morphologyEx(maskedImage, cv2.MORPH_OPEN, kernel)
cv2.imshow("Open Image", openedImage)
outImage = resizedImage.copy()
try:
contourImage, contours, hierarchy = cv2.findContours(openedImage.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
print(cnt) # contours are the points on the outline of the image
# bounding rectangle is the minimum rectangle that includes all the contours
# this bounding rectangle is perpendicular to image
x,y,w,h = cv2.boundingRect(cnt)
# We mark that bounding rectangle with green
cv2.rectangle(outImage,(x,y),(x+w,y+h),(255,0,0),4)
except:
pass
cv2.imshow("Bboxed",outImage)
mergedImage = np.concatenate((mergedImage,outImage), axis=1)
mergedImage = mergedImage[0:600, 150:750]
cv2.imshow("Merged",mergedImage)
keyPressed = cv2.waitKey(1) # Look for keys to be pressed
if keyPressed == 27: # if the key is ESC, check the ASCII table, 27 = ESC
break # Exit the loop
cv2.destroyAllWindows() # Destroy the windows and close the program
| 35.234043
| 128
| 0.679348
| 459
| 3,312
| 4.88671
| 0.433551
| 0.028533
| 0.028979
| 0.060187
| 0.025858
| 0.025858
| 0
| 0
| 0
| 0
| 0
| 0.051361
| 0.212258
| 3,312
| 93
| 129
| 35.612903
| 0.808356
| 0.267512
| 0
| 0.031746
| 0
| 0
| 0.068994
| 0.009144
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0.031746
| 0.031746
| 0
| 0.047619
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6df0403cfe638d0fa7c9fc0942bb17cdd38113df
| 569
|
py
|
Python
|
exastics/publish_github_api_releases.py
|
exastro-suite/exastics
|
de6193159943319333abc2688f543e7424810823
|
[
"Apache-2.0"
] | null | null | null |
exastics/publish_github_api_releases.py
|
exastro-suite/exastics
|
de6193159943319333abc2688f543e7424810823
|
[
"Apache-2.0"
] | 1
|
2020-10-25T08:30:59.000Z
|
2020-10-25T08:30:59.000Z
|
exastics/publish_github_api_releases.py
|
exastro-suite/exastics
|
de6193159943319333abc2688f543e7424810823
|
[
"Apache-2.0"
] | 8
|
2020-10-09T13:11:08.000Z
|
2021-11-04T06:26:27.000Z
|
import exastics.collect
import pathlib
import sys
import urllib.parse
if __name__ == '__main__':
github_account = sys.argv[1]
github_repository = sys.argv[2]
url_parts = (
'https',
'api.github.com',
urllib.parse.quote(f'/repos/{github_account}/{github_repository}/releases'),
'',
'',
''
)
headers = {
'Accept': 'application/vnd.github.v3+json'
}
output_dir = pathlib.PurePath(github_repository, 'github-releases')
exastics.collect.publish_api(url_parts, headers, output_dir)
| 21.074074
| 84
| 0.630931
| 63
| 569
| 5.412698
| 0.555556
| 0.140762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006912
| 0.237258
| 569
| 26
| 85
| 21.884615
| 0.778802
| 0
| 0
| 0.1
| 0
| 0
| 0.228471
| 0.144112
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6df0ee5285eb665d18e287fcf75e62d896c148dd
| 1,471
|
py
|
Python
|
cohesity_management_sdk/models/rpo_schedule.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
cohesity_management_sdk/models/rpo_schedule.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
cohesity_management_sdk/models/rpo_schedule.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class RPOSchedule(object):
"""Implementation of the 'RPO Schedule.' model.
Specifies an RPO Schedule.
Attributes:
rpo_inteval_minutes (long|int): If this field is set, then at any
point, a recovery point should be available not older than the
given interval minutes.
"""
# Create a mapping from Model property names to API property names
_names = {
"rpo_inteval_minutes":'rpoIntevalMinutes'
}
def __init__(self,
rpo_inteval_minutes=None):
"""Constructor for the RPOSchedule class"""
# Initialize members of the class
self.rpo_inteval_minutes = rpo_inteval_minutes
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
rpo_inteval_minutes = dictionary.get('rpoIntevalMinutes')
# Return an object of this model
return cls(rpo_inteval_minutes)
| 26.745455
| 81
| 0.633583
| 168
| 1,471
| 5.428571
| 0.488095
| 0.076754
| 0.130482
| 0.046053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004902
| 0.306594
| 1,471
| 54
| 82
| 27.240741
| 0.889216
| 0.566281
| 0
| 0
| 0
| 0
| 0.099812
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6df1040ee952e6ce1e567165568234bfbe1f725c
| 5,834
|
py
|
Python
|
Gh compilation files/text.py
|
ibois-epfl/Manis-timber-plate-joinery-solver
|
fecdb1dfe23348de261f034f85baf24ac396e8cc
|
[
"MIT"
] | 3
|
2021-10-19T11:55:59.000Z
|
2022-02-04T15:29:04.000Z
|
Gh compilation files/text.py
|
ibois-epfl/Manis-timber-plate-joinery-solver
|
fecdb1dfe23348de261f034f85baf24ac396e8cc
|
[
"MIT"
] | null | null | null |
Gh compilation files/text.py
|
ibois-epfl/Manis-timber-plate-joinery-solver
|
fecdb1dfe23348de261f034f85baf24ac396e8cc
|
[
"MIT"
] | null | null | null |
"""Export a text file."""
from ghpythonlib.componentbase import dotnetcompiledcomponent as component
import Grasshopper, GhPython
import System
import os
import datetime
__author__ = "Nicolas Rogeau"
__laboratory__ = "IBOIS, Laboratory for Timber Construction"
__university__ = "EPFL, Ecole Polytechnique Federale de Lausanne"
__funding__ = "NCCR Digital Fabrication, ETH Zurich"
__version__ = "2021.09"
class MyComponent(component):
def __new__(cls):
instance = Grasshopper.Kernel.GH_Component.__new__(cls,
"Export Text File", "TextOut", """Export a text file.""", "Manis", "Utility")
return instance
def get_ComponentGuid(self):
return System.Guid("02ba4a11-7b1c-48b3-8376-55637e7a1ed2")
def SetUpParam(self, p, name, nickname, description):
p.Name = name
p.NickName = nickname
p.Description = description
p.Optional = True
def RegisterInputParams(self, pManager):
p = Grasshopper.Kernel.Parameters.Param_Boolean()
self.SetUpParam(p, "run", "run", "Export file if True.")
p.Access = Grasshopper.Kernel.GH_ParamAccess.item
self.Params.Input.Add(p)
p = Grasshopper.Kernel.Parameters.Param_String()
self.SetUpParam(p, "text", "text", "Text to export.")
p.Access = Grasshopper.Kernel.GH_ParamAccess.list
self.Params.Input.Add(p)
p = Grasshopper.Kernel.Parameters.Param_String()
self.SetUpParam(p, "folder", "folder", "Folder path.")
p.Access = Grasshopper.Kernel.GH_ParamAccess.item
self.Params.Input.Add(p)
p = Grasshopper.Kernel.Parameters.Param_String()
self.SetUpParam(p, "name", "name", "File name.")
p.Access = Grasshopper.Kernel.GH_ParamAccess.item
self.Params.Input.Add(p)
p = Grasshopper.Kernel.Parameters.Param_String()
self.SetUpParam(p, "extension", "extension", "(Optional) Custom file extension.")
p.Access = Grasshopper.Kernel.GH_ParamAccess.item
self.Params.Input.Add(p)
p = Grasshopper.Kernel.Parameters.Param_Boolean()
self.SetUpParam(p, "date", "date", "(Optional) Add the date of today to the file name.")
p.Access = Grasshopper.Kernel.GH_ParamAccess.item
self.Params.Input.Add(p)
p = Grasshopper.Kernel.Parameters.Param_Boolean()
self.SetUpParam(p, "x", "incremental", "(Optional) Check for existing file with the same name and increment if necessary.")
p.Access = Grasshopper.Kernel.GH_ParamAccess.item
self.Params.Input.Add(p)
def RegisterOutputParams(self, pManager):
pass
def SolveInstance(self, DA):
p0 = self.marshal.GetInput(DA, 0)
p1 = self.marshal.GetInput(DA, 1)
p2 = self.marshal.GetInput(DA, 2)
p3 = self.marshal.GetInput(DA, 3)
p4 = self.marshal.GetInput(DA, 4)
p5 = self.marshal.GetInput(DA, 5)
p6 = self.marshal.GetInput(DA, 6)
result = self.RunScript(p0, p1, p2, p3, p4, p5, p6)
def get_Internal_Icon_24x24(self):
o = "iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAJOgAACToAYJjBRwAAACKSURBVEhL7c3RCoQwDETR/v9Pq7dOSoygFDrggweGrhM2aT+3Ta8NB6xH4oDtyAZeZbl+APxWbvJwOlnqLzReg33KUAcj0We5rwmp61Sf6jeie5pV9Mr7Acz2YHbk/UB0T7OKXrn+Od4w+w06pVO9BvuUIZfTyVK/jFZ7lsO6HNblsC6HdfkXtLYDh4phuyx2L58AAAAASUVORK5CYII="
return System.Drawing.Bitmap(System.IO.MemoryStream(System.Convert.FromBase64String(o)))
def RunScript(self, run, text, folder, name, extension, date, incremental):
inc = incremental
ext = extension
if run is True:
# gh doc path
ghP = self.LocalScope.ghdoc.Path
# folder and file name
if name == None: name = 'this_script_has_no_name'
if folder == None: folder = os.path.dirname(os.path.realpath(ghP))
outputName = folder + '\\' + str(name)
# date
if date is True:
date = datetime.datetime.today()
outputName += '_' + str(date.year) + '_' + str(date.month) + '_' + str(date.day)
# extension
if ext == None: ext = '.txt'
# avoid overwrite
if inc is True:
i = 0
iter = outputName + '_' + str(i)
while os.path.exists(iter + str(ext)) and i<100: #safety
i += 1
iter = outputName + '_' + str(i)
outputName = iter
outputName += str(ext)
# create file
myFile = open(outputName,'w')
# pass values to file
if text != None:
for i in range(len(text)):
myFile.write(str(text[i]))
if i != len(text)-1:
myFile.write('\n')
# close file
myFile.close()
# confirm file write
if os.stat(outputName).st_size > 0:
print('File successfully written as ' + outputName)
else:
print('output file is empty - check your values')
return
class AssemblyInfo(GhPython.Assemblies.PythonAssemblyInfo):
def get_AssemblyName(self):
return "Text File Output"
def get_AssemblyDescription(self):
return """"""
def get_AssemblyVersion(self):
return "0.1"
def get_AuthorName(self):
return "Nicolas Rogeau"
def get_Id(self):
return System.Guid("bc9186be-9321-4eb3-ba5e-58a615f66a50")
| 38.130719
| 343
| 0.594961
| 610
| 5,834
| 5.588525
| 0.334426
| 0.074802
| 0.044588
| 0.057495
| 0.238486
| 0.238486
| 0.227633
| 0.227633
| 0.227633
| 0.211499
| 0
| 0.030935
| 0.301851
| 5,834
| 153
| 344
| 38.130719
| 0.80604
| 0.026054
| 0
| 0.203704
| 0
| 0
| 0.18673
| 0.076686
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12037
| false
| 0.009259
| 0.046296
| 0.055556
| 0.268519
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6df14ec0665b31a613e368f74d43196adfd0df56
| 877
|
py
|
Python
|
setup.py
|
anthonytw/dutyroll
|
489dd452ba614a2214756eba0831b33111187225
|
[
"MIT"
] | 2
|
2019-01-22T20:44:03.000Z
|
2019-11-30T07:59:32.000Z
|
setup.py
|
anthonytw/dutyroll
|
489dd452ba614a2214756eba0831b33111187225
|
[
"MIT"
] | null | null | null |
setup.py
|
anthonytw/dutyroll
|
489dd452ba614a2214756eba0831b33111187225
|
[
"MIT"
] | null | null | null |
import sys
from packaging.version import LegacyVersion
from skbuild.exceptions import SKBuildError
from skbuild.cmaker import get_cmake_version
from skbuild import setup
setup_requires = []
# Require pytest-runner only when running tests.
if any(arg in sys.argv for arg in ('pytest', 'test')):
setup_requires.append('pytest-runner>=2.0')
# Add CMake as a build requirement if cmake is not installed or is too low a version.
try:
if LegacyVersion(get_cmake_version()) < LegacyVersion('3.10'):
setup_requires.append('cmake')
except SKBuildError:
setup_requires.append('cmake')
setup(
name='dutyroll',
version='1.0.1',
description='Parallel implementation of rolling window duty cycle.',
author='"Anthony Wertz"<awertz@cmu.edu>',
license='MIT',
packages=['dutyroll'],
tests_require=['pytest'],
setup_requires=setup_requires
)
| 28.290323
| 85
| 0.733181
| 118
| 877
| 5.355932
| 0.559322
| 0.123418
| 0.09019
| 0.075949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010796
| 0.155074
| 877
| 30
| 86
| 29.233333
| 0.842105
| 0.148233
| 0
| 0.086957
| 0
| 0
| 0.209677
| 0.02957
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.217391
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6df2b3c71a785d6478f03f2023bb542307a17b8f
| 1,195
|
py
|
Python
|
crits/locations/forms.py
|
dutrow/crits
|
6b357daa5c3060cf622d3a3b0c7b41a9ca69c049
|
[
"MIT"
] | 738
|
2015-01-02T12:39:55.000Z
|
2022-03-23T11:05:51.000Z
|
crits/locations/forms.py
|
deadbits/crits
|
154097a1892e9d3960d6faaed4bd2e912a196a47
|
[
"MIT"
] | 605
|
2015-01-01T01:03:39.000Z
|
2021-11-17T18:51:07.000Z
|
crits/locations/forms.py
|
deadbits/crits
|
154097a1892e9d3960d6faaed4bd2e912a196a47
|
[
"MIT"
] | 316
|
2015-01-07T12:35:01.000Z
|
2022-03-30T04:44:30.000Z
|
from django import forms
from crits.locations.location import Location
from crits.core.handlers import get_item_names
class AddLocationForm(forms.Form):
"""
Django form for adding a location to a TLO.
The list of names comes from :func:`get_item_names`.
"""
error_css_class = 'error'
required_css_class = 'required'
location_type = forms.ChoiceField(widget=forms.Select, required=True)
country = forms.ChoiceField(widget=forms.Select, required=True)
description = forms.CharField(
widget=forms.TextInput(attrs={'size': '50'}),
required=False)
latitude = forms.CharField(
widget=forms.TextInput(attrs={'size': '50'}),
required=False)
longitude = forms.CharField(
widget=forms.TextInput(attrs={'size': '50'}),
required=False)
def __init__(self, *args, **kwargs):
super(AddLocationForm, self).__init__(*args, **kwargs)
self.fields['location_type'].choices = [
('Originated From', 'Originated From'),
('Destined For', 'Destined For'),
]
self.fields['country'].choices = [
(c.name, c.name) for c in get_item_names(Location, True)]
| 34.142857
| 73
| 0.650209
| 139
| 1,195
| 5.446043
| 0.402878
| 0.072655
| 0.047556
| 0.099075
| 0.348745
| 0.348745
| 0.348745
| 0.229855
| 0.229855
| 0.229855
| 0
| 0.006431
| 0.219247
| 1,195
| 34
| 74
| 35.147059
| 0.80493
| 0.081172
| 0
| 0.24
| 0
| 0
| 0.097403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.48
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6df4ba8add8eb7e8c911008f72f03e4dab32f5ab
| 3,641
|
py
|
Python
|
utils/utils_preprocess_v3.py
|
microsoft/normalized_trend_filtering
|
eb73f124243dfc3dc610abba35a3ad1a6303a227
|
[
"MIT"
] | 2
|
2021-09-06T14:04:17.000Z
|
2021-11-09T11:55:10.000Z
|
utils/utils_preprocess_v3.py
|
microsoft/normalized_trend_filtering
|
eb73f124243dfc3dc610abba35a3ad1a6303a227
|
[
"MIT"
] | null | null | null |
utils/utils_preprocess_v3.py
|
microsoft/normalized_trend_filtering
|
eb73f124243dfc3dc610abba35a3ad1a6303a227
|
[
"MIT"
] | 1
|
2021-11-10T11:44:36.000Z
|
2021-11-10T11:44:36.000Z
|
import pandas as pd
import numpy as np
import sys
import os
import itertools
import pandas as pd
import os
from tqdm import tqdm_notebook, tnrange
import numpy as np
import networkx as nx
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import scipy
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
import cvxpy as cp
from scipy.sparse import csr_matrix, vstack, hstack
from copy import deepcopy
module_path = os.path.abspath(os.path.join('..'))
def getReducedGraph(sample_nodes, graph_nodes,
interactome):
'''
Reduce graph with only intersection nodes from sample and
interactome.
'''
#find intersection between sample nodes and graph nodes
sample_nodes = set(sample_nodes)
graph_nodes = set(graph_nodes)
intersection_nodes = sample_nodes.intersection(graph_nodes)
print('Number of Intersection Nodes : ', len(intersection_nodes))
g = []
for line in tqdm_notebook(range(len(interactome))):
if (interactome.iloc[line]['node1'] in intersection_nodes
and interactome.iloc[line]['node2'] in intersection_nodes):
g.append(interactome.iloc[line])
return pd.DataFrame(g)
def getNodeCharacterization(g, sample_nodes):
'''
Characterizes nodes based on if node is connected or orphan
'''
connected_nodes = set(g.nodes())
orphan_nodes = set(sample_nodes) - connected_nodes
return connected_nodes, orphan_nodes
def getDataSorting(connected_nodes, sample_df):
'''
Sorts covariant matrix such that connected nodes are first
followed by orphan nodes and nodes not in interactome
'''
sample_df_sorted = deepcopy(sample_df)
sample_df_sorted['IN_INTERACTOME'] = sample_df["node"].isin(list(connected_nodes)).tolist()
sample_df_sorted = sample_df_sorted.sort_values(by="IN_INTERACTOME", ascending=False).reset_index(drop=True)
#get dictionary to map node to number
num_to_node = {}
for i,nod in enumerate(sample_df_sorted['node'].tolist()):
num_to_node[i] = nod
#get ordered list of nodes in interactome
ordered_nodelist = sample_df_sorted.loc[sample_df_sorted['IN_INTERACTOME'] == True]['node'].tolist()
#delete 'IN_INTERACTOME' column
sample_df_sorted = sample_df_sorted.drop(columns = ['IN_INTERACTOME', 'node'])
return sample_df_sorted, ordered_nodelist, num_to_node
def getLaplacian(g, ordered_nodelist, orphan_nodes):
'''
Calculates laplacian matrix with respect to ordering of
covariant matrix
'''
L_norm = nx.normalized_laplacian_matrix(g, nodelist = ordered_nodelist, weight = 'confidence')
L = nx.laplacian_matrix(g, nodelist = ordered_nodelist, weight = 'confidence')
return csr_matrix(scipy.linalg.block_diag(L.todense(),np.eye(len(orphan_nodes)))), \
csr_matrix(scipy.linalg.block_diag(L_norm.todense(),np.eye(len(orphan_nodes))))
class Preprocessing():
def __init__(self):
self.g = None
self.connected_nodes = None
self.orphan_nodes = None
self.sorted_X = None
self.ordered_nodelist = None
self.num_to_node = None
self.L = None
self.L_norm = None
def transform(self,X_nodes, graph_nodes, graph, X, save_location, load_graph = False):
if load_graph == False:
self.g = getReducedGraph(X_nodes, graph_nodes, graph)
self.g.to_csv(save_location, header=None, index=None, sep='\t')
self.g = nx.read_edgelist(save_location,
data=(('confidence',float),))
self.connected_nodes, self.orphan_nodes = \
getNodeCharacterization(self.g, X_nodes)
self.sorted_X, self.ordered_nodelist, self.num_to_node = \
getDataSorting(self.connected_nodes,X)
self.L, self.L_norm = getLaplacian(self.g, self.ordered_nodelist, self.orphan_nodes, )
| 31.119658
| 109
| 0.762703
| 521
| 3,641
| 5.115163
| 0.285988
| 0.039024
| 0.052533
| 0.012008
| 0.170356
| 0.104315
| 0.06379
| 0.041276
| 0
| 0
| 0
| 0.000636
| 0.136226
| 3,641
| 117
| 110
| 31.119658
| 0.846741
| 0.131557
| 0
| 0.083333
| 0
| 0
| 0.047115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.263889
| 0
| 0.416667
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6df5916ec657908f3c7be4eae54758a97075100c
| 791
|
py
|
Python
|
server.py
|
marwano/remoterobot
|
80409bde8e20de2b9fe97a8f214295aa5290decd
|
[
"BSD-3-Clause"
] | 1
|
2019-05-26T10:41:07.000Z
|
2019-05-26T10:41:07.000Z
|
server.py
|
marwano/remoterobot
|
80409bde8e20de2b9fe97a8f214295aa5290decd
|
[
"BSD-3-Clause"
] | 1
|
2018-02-28T23:47:23.000Z
|
2018-02-28T23:47:23.000Z
|
server.py
|
marwano/remoterobot
|
80409bde8e20de2b9fe97a8f214295aa5290decd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import tornado.ioloop
import tornado.web
import json
import logging
from uf.wrapper.swift_api import SwiftAPI
class MainHandler(tornado.web.RequestHandler):
def initialize(self, swift):
self.swift = swift
def post(self):
data = json.loads(self.request.body.decode())
logging.info(repr(data))
func = getattr(self.swift, data['action'])
results = func(**data['kwargs'])
self.write(json.dumps(dict(results=results)))
def main():
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
swift = SwiftAPI()
app = tornado.web.Application([('/', MainHandler, dict(swift=swift))])
app.listen(8000)
tornado.ioloop.IOLoop.current().start()
if __name__ == '__main__':
main()
| 25.516129
| 77
| 0.667509
| 98
| 791
| 5.295918
| 0.540816
| 0.057803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007716
| 0.180784
| 791
| 30
| 78
| 26.366667
| 0.79321
| 0.026549
| 0
| 0
| 0
| 0
| 0.057217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.227273
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6df62870aa4daf08157f0c702682542e2f8979fe
| 2,765
|
py
|
Python
|
open/core/betterself/serializers/daily_productivity_log_serializers.py
|
lawrendran/open
|
d136f694bafab647722c78be6f39ec79d589f774
|
[
"MIT"
] | 105
|
2019-06-01T08:34:47.000Z
|
2022-03-15T11:48:36.000Z
|
open/core/betterself/serializers/daily_productivity_log_serializers.py
|
lawrendran/open
|
d136f694bafab647722c78be6f39ec79d589f774
|
[
"MIT"
] | 111
|
2019-06-04T15:34:14.000Z
|
2022-03-12T21:03:20.000Z
|
open/core/betterself/serializers/daily_productivity_log_serializers.py
|
lawrendran/open
|
d136f694bafab647722c78be6f39ec79d589f774
|
[
"MIT"
] | 26
|
2019-09-04T06:06:12.000Z
|
2022-01-03T03:40:11.000Z
|
from rest_framework.exceptions import ValidationError
from rest_framework.fields import DateField, ChoiceField, CharField
from open.core.betterself.constants import (
BETTERSELF_LOG_INPUT_SOURCES,
WEB_INPUT_SOURCE,
)
from open.core.betterself.models.daily_productivity_log import DailyProductivityLog
from open.core.betterself.serializers.mixins import (
BaseCreateUpdateSerializer,
BaseModelReadSerializer,
)
from open.core.betterself.serializers.validators import ModelValidatorsMixin
from open.utilities.date_and_time import (
format_datetime_to_human_readable,
yyyy_mm_dd_format_1,
)
class DailyProductivityLogReadSerializer(BaseModelReadSerializer):
class Meta:
model = DailyProductivityLog
fields = (
"uuid",
"source",
"date",
"very_productive_time_minutes",
"productive_time_minutes",
"neutral_time_minutes",
"distracting_time_minutes",
"very_distracting_time_minutes",
"notes",
"mistakes",
"created",
"modified",
"display_name",
"pomodoro_count",
)
def get_display_name(self, instance):
model = self.Meta.model
model_name = model._meta.verbose_name
time_label = instance.date
serialized_time = format_datetime_to_human_readable(
time_label, yyyy_mm_dd_format_1
)
display_name = f"{model_name} | Date: {serialized_time}"
return display_name
class DailyProductivityLogCreateUpdateSerializer(
BaseCreateUpdateSerializer, ModelValidatorsMixin
):
# allow an regular isoformat of milliseconds also be passed
date = DateField(input_formats=["iso-8601"])
source = ChoiceField(choices=BETTERSELF_LOG_INPUT_SOURCES, default=WEB_INPUT_SOURCE)
mistakes = CharField(trim_whitespace=True, default="", allow_blank=True)
class Meta:
model = DailyProductivityLog
fields = (
"source",
"date",
"very_productive_time_minutes",
"productive_time_minutes",
"neutral_time_minutes",
"distracting_time_minutes",
"very_distracting_time_minutes",
"pomodoro_count",
"notes",
"mistakes",
"user",
)
def validate(self, validated_data):
user = self.context["request"].user
is_creating_instance = not self.instance
if is_creating_instance:
if self.Meta.model.objects.filter(
user=user, date=validated_data["date"],
).exists():
raise ValidationError(f"Fields user and date need to be unique!")
return validated_data
| 31.420455
| 88
| 0.654973
| 267
| 2,765
| 6.479401
| 0.389513
| 0.063584
| 0.027746
| 0.050867
| 0.276301
| 0.14104
| 0.14104
| 0.14104
| 0.14104
| 0.14104
| 0
| 0.002964
| 0.267993
| 2,765
| 87
| 89
| 31.781609
| 0.851779
| 0.020615
| 0
| 0.351351
| 0
| 0
| 0.167406
| 0.076866
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.094595
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6df927163bf069ad2144fb5439fa950c5da79469
| 1,409
|
py
|
Python
|
Sources/Mavsdk/proto/pb_plugins/setup.py
|
obe711/MAVSDK-Swift
|
3ed35bbb57754824f8235f9acf828c73cc10b72b
|
[
"BSD-3-Clause"
] | null | null | null |
Sources/Mavsdk/proto/pb_plugins/setup.py
|
obe711/MAVSDK-Swift
|
3ed35bbb57754824f8235f9acf828c73cc10b72b
|
[
"BSD-3-Clause"
] | null | null | null |
Sources/Mavsdk/proto/pb_plugins/setup.py
|
obe711/MAVSDK-Swift
|
3ed35bbb57754824f8235f9acf828c73cc10b72b
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import subprocess
import sys
from distutils.command.build import build
from distutils.spawn import find_executable
from setuptools import setup
def parse_requirements(filename):
"""
Helper which parses requirement_?.*.txt files
:param filename: relative path, e.g. `./requirements.txt`
:returns: List of requirements
"""
# Get absolute filepath
filepath = os.path.join(os.getcwd(), filename)
# Check if file exists
if not os.path.exists(filepath):
print("[!] File {} not found".format(filename))
return []
# Parse install requirements
with open(filepath, encoding="utf-8") as f:
return [requires.strip() for requires in f.readlines()]
setup(
name="protoc-gen-mavsdk",
version="1.0.1",
description="Protoc plugin used to generate MAVSDK bindings",
url="https://github.com/mavlink/MAVSDK-Proto",
maintainer="Jonas Vautherin, Julian Oes",
maintainer_email="jonas.vautherin@gmail.com, julian@oes.ch",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
],
packages=["protoc_gen_mavsdk"],
install_requires=parse_requirements("requirements.txt"),
entry_points={
"console_scripts": [
"protoc-gen-mavsdk= protoc_gen_mavsdk.__main__:main"
]
}
)
| 27.627451
| 65
| 0.66785
| 164
| 1,409
| 5.640244
| 0.634146
| 0.038919
| 0.064865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0045
| 0.211498
| 1,409
| 50
| 66
| 28.18
| 0.828083
| 0.146203
| 0
| 0
| 0
| 0
| 0.348343
| 0.048428
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.181818
| 0
| 0.272727
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dfb865d03b79b2e933642d474e469577e44cc93
| 690
|
py
|
Python
|
count.py
|
sunray97/countrows_excel
|
7a95e0f6901051942615c6c16d15fee8e6fd4ded
|
[
"MIT"
] | null | null | null |
count.py
|
sunray97/countrows_excel
|
7a95e0f6901051942615c6c16d15fee8e6fd4ded
|
[
"MIT"
] | null | null | null |
count.py
|
sunray97/countrows_excel
|
7a95e0f6901051942615c6c16d15fee8e6fd4ded
|
[
"MIT"
] | null | null | null |
import xlrd
import os
import sys
# rootdir = 'D:/工作/code/electric/'
rootdir = sys.argv[1]
xlrd.Book.encoding = "gbk"
sumnum=0
filenum = 0
list = os.listdir(rootdir) #列出文件夹下所有的目录与文件
for i in range(0,len(list)):
path = os.path.join(rootdir,list[i])
if os.path.isfile(path):
print('正在处理:'+path)
data = xlrd.open_workbook(path)
table = data.sheet_by_index(0)
# table = data.sheet_by_name(u'Sheet1')
nrows = table.nrows
data.release_resources()
sumnum=sumnum+nrows
filenum=filenum+1
print('-------------------------------------------------------------------------')
print('共有%d个文件'%filenum)
print('共有%d行记录'%sumnum)
| 28.75
| 90
| 0.571014
| 89
| 690
| 4.359551
| 0.52809
| 0.030928
| 0.072165
| 0.082474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012567
| 0.192754
| 690
| 23
| 91
| 30
| 0.684022
| 0.121739
| 0
| 0
| 0
| 0
| 0.157807
| 0.121262
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.190476
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dfbe53d06b44a62a35e17a43905a5b258b2a411
| 1,442
|
py
|
Python
|
0_mesh2html/preprocess_segments.py
|
ygCoconut/volume2stl
|
bd95fc39620afd21ce08c8c805ac213583d9daaa
|
[
"MIT"
] | null | null | null |
0_mesh2html/preprocess_segments.py
|
ygCoconut/volume2stl
|
bd95fc39620afd21ce08c8c805ac213583d9daaa
|
[
"MIT"
] | null | null | null |
0_mesh2html/preprocess_segments.py
|
ygCoconut/volume2stl
|
bd95fc39620afd21ce08c8c805ac213583d9daaa
|
[
"MIT"
] | null | null | null |
'''
0 Preprocess segments:
-
- specify segments you want to process
- dilate slightly the segments
- create mask for dilation.
- np.unique(my_masked_id) --> select only part with biggest uc
- eliminates ouliers too disconnected/far from main structure
'''
import numpy as np
import h5py
from scipy.ndimage import binary_dilation, label
from tqdm import tqdm
def writeh5_file(file, filename=None):
hf = h5py.File(filename, 'w')
hf.create_dataset('main', data=file)
hf.close()
if __name__=='__main__':
print('start')
segpath = '/n/pfister_lab2/Lab/donglai/mito/db/30um_human/seg_64nm.h5'
savepath = '/n/pfister_lab2/Lab/nils/snowproject/seg_64nm_maindendrite.h5'
seg = h5py.File(segpath, 'r')
seg = np.array(seg['main'], np.uint32) # x y z
dendrite_ids = np.loadtxt('seg_spiny_v2.txt', int)
for i, did in enumerate(tqdm(dendrite_ids)):
# dil = binary_dilation(seg==did)*did
# find all components of the dendrite, tolerate tiny gaps
s = np.ones((3, 3, 3), int)
dil, nf = label((seg==did)*did, structure=s)
# find main component
ui, uc = np.unique(dil, return_counts=True)
uc = uc[ui>0]; ui = ui[ui>0]
max_id = ui[np.argmax(uc)]
# remove non-main components from segmentation
seg[seg==did] = 0
seg[dil==max_id] = did
writeh5_file(seg, savepath)
print('start')
| 27.730769
| 78
| 0.640777
| 209
| 1,442
| 4.287081
| 0.54067
| 0.020089
| 0.026786
| 0.033482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022665
| 0.23509
| 1,442
| 51
| 79
| 28.27451
| 0.789665
| 0.291262
| 0
| 0.08
| 0
| 0
| 0.161386
| 0.117822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.16
| 0
| 0.2
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dfdee78a36f76a22a8222a5f71ca90b9c824b58
| 2,665
|
py
|
Python
|
branch/runner.py
|
sahibsin/Pruning
|
acc1db31c19c8b23599950cec4fe6399513ed306
|
[
"MIT"
] | null | null | null |
branch/runner.py
|
sahibsin/Pruning
|
acc1db31c19c8b23599950cec4fe6399513ed306
|
[
"MIT"
] | null | null | null |
branch/runner.py
|
sahibsin/Pruning
|
acc1db31c19c8b23599950cec4fe6399513ed306
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from dataclasses import dataclass
import sys
from cli import arg_utils
from foundations.runner import Runner
from branch import registry
@dataclass
class BranchRunner(Runner):
"""A meta-runner that calls the branch-specific runner."""
runner: Runner
@staticmethod
def description():
return "Run a branch."
@staticmethod
def add_args(parser):
# Produce help text for selecting the branch.
helptext = '='*82 + '\nOpenLTH: A Library for Research on Lottery Tickets and Beyond\n' + '-'*82
runner_name = arg_utils.maybe_get_arg('runner', positional=True, position=1)
# If the runner name is not present.
if runner_name is None or runner_name not in registry.registered_runners():
helptext = '\nChoose a runner on which to branch:\n'
helptext += '\n'.join([f' * {sys.argv[0]} branch {runner}' for runner in registry.registered_runners()])
helptext += '\n' + '='*82
print(helptext)
sys.exit(1)
# If the branch name is not present.
branch_names = registry.registered_branches(runner_name)
branch_name = arg_utils.maybe_get_arg('branch', positional=True, position=2)
if branch_name is None or branch_name not in branch_names:
helptext += '\nChoose a branch to run:'
for bn in branch_names:
helptext += "\n * {} {} {} [...] => {}".format(
sys.argv[0], sys.argv[1], bn,
registry.get(runner_name, bn).description())
helptext += '\n' + '='*82
print(helptext)
sys.exit(1)
# Add the arguments for the branch.
parser.add_argument('runner_name', type=str)
parser.add_argument('branch_name', type=str)
registry.get(runner_name, branch_name).add_args(parser)
@staticmethod
def create_from_args(args: argparse.Namespace):
runner_name = arg_utils.maybe_get_arg('runner', positional=True, position=1)
branch_name = arg_utils.maybe_get_arg('branch', positional=True, position=2)
return BranchRunner(registry.get(runner_name, branch_name).create_from_args(args))
def display_output_location(self):
self.runner.display_output_location()
def run(self) -> None:
self.runner.run()
class LotteryBranch(BranchRunner):
@staticmethod
def description():
return "Run a lottery branch."
| 36.013514
| 119
| 0.643152
| 337
| 2,665
| 4.946588
| 0.311573
| 0.059988
| 0.028794
| 0.040792
| 0.295141
| 0.257948
| 0.177564
| 0.177564
| 0.139172
| 0.139172
| 0
| 0.008543
| 0.253283
| 2,665
| 73
| 120
| 36.506849
| 0.829146
| 0.138837
| 0
| 0.326531
| 0
| 0
| 0.123522
| 0
| 0.061224
| 0
| 0
| 0
| 0
| 1
| 0.122449
| false
| 0
| 0.122449
| 0.040816
| 0.367347
| 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dff005711decae58a77ac5da887759206c11424
| 946
|
py
|
Python
|
wulinfeng/L3/WordDic/AQICity.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 2
|
2018-03-29T08:26:17.000Z
|
2019-06-17T10:56:19.000Z
|
wulinfeng/L3/WordDic/AQICity.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 1
|
2022-03-22T20:26:08.000Z
|
2022-03-22T20:26:08.000Z
|
wulinfeng/L3/WordDic/AQICity.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 1
|
2019-02-18T10:44:20.000Z
|
2019-02-18T10:44:20.000Z
|
import requests # 导入requests 库
from bs4 import BeautifulSoup
import urllib.error
import re
class AQICityClass(object):
def cityAQI(self,url,cityName,header={}):
try:
urlName = url + cityName + '.html'
r = requests.get(urlName, header)
except urllib.error.URLError as e:
print("获取空气质量数据请求出错")
except Exception as e:
print('获取空气质量数据函数出现异常')
resp = BeautifulSoup(r.text, 'html.parser')
all_div = []
for tag in resp.find_all('div', class_='span12 data'):
all_div = tag.findAll('div')
all_divValues = []
for div in all_div:
value = div.find('div', class_='value')
if value != None:
title = value.text.strip() #取第一个<a>的文本数据
print(title.replace("\n", ""))
return title.replace("\n", "")
break
| 32.62069
| 64
| 0.524313
| 101
| 946
| 4.841584
| 0.564356
| 0.04908
| 0.03272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004951
| 0.359408
| 946
| 29
| 65
| 32.62069
| 0.80198
| 0.02537
| 0
| 0
| 0
| 0
| 0.077174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.16
| 0
| 0.28
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dff00eda6e7e13b33088c5ae46ed97a3a4cc3ce
| 1,464
|
py
|
Python
|
setup.py
|
hiradyazdan/nginx-amplify-agent-health-check
|
7aa0fa2aba082491b1b47c2b6189a9266245f647
|
[
"MIT"
] | 2
|
2018-05-23T17:34:28.000Z
|
2018-07-09T21:55:53.000Z
|
setup.py
|
hiradyazdan/nginx-amplify-agent-health-check
|
7aa0fa2aba082491b1b47c2b6189a9266245f647
|
[
"MIT"
] | null | null | null |
setup.py
|
hiradyazdan/nginx-amplify-agent-health-check
|
7aa0fa2aba082491b1b47c2b6189a9266245f647
|
[
"MIT"
] | null | null | null |
from setuptools import setup
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX'
] + [
('Programming Language :: Python :: %s' % x)
for x in '2.7'.split()
]
test_requirements = [
'pytest',
'pytest-cov',
'coveralls',
'mock',
'numpy',
# Only their Exceptions
'setuptools',
'psutil',
'requests'
]
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name='nginx-amplify-agent-health-check',
version='0.1.6',
description='Static and Dynamic Analysis for nginx-amplify-agent Health Status',
long_description=long_description,
url='https://github.com/hiradyazdan/nginx-amplify-agent-health-check',
author='Hirad Yazdanpanah',
author_email='hirad.y@gmail.com',
license='MIT',
platforms=["linux"],
packages=['amplifyhealthcheck'],
entry_points={
'console_scripts': [
'amphc=amplifyhealthcheck.cli:init_cli'
]
},
classifiers=classifiers,
keywords="nginx amplify nginx-amplify nginx-configuration health-check metrics",
install_requires=[
'psutil',
'setuptools',
'ntplib',
'crossplane',
'requests'
],
setup_requires=['pytest-runner'],
tests_require=test_requirements,
extras_require={
'test': test_requirements,
},
python_requires='==2.7.*',
zip_safe=False
)
| 24
| 84
| 0.623634
| 151
| 1,464
| 5.940397
| 0.649007
| 0.06689
| 0.056856
| 0.076923
| 0.06243
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006222
| 0.231557
| 1,464
| 60
| 85
| 24.4
| 0.791111
| 0.014344
| 0
| 0.111111
| 0
| 0
| 0.424011
| 0.047883
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018519
| 0
| 0.018519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6dffaf2548225e608c4b2975db6390a9dca03d10
| 2,849
|
py
|
Python
|
sherlock_scripts/pythonhops/sherlock_combine_restarts.py
|
apoletayev/anomalous_ion_conduction
|
badb91e971e4a5263a433cfa9fcbf914d53ed2a1
|
[
"MIT"
] | 2
|
2021-05-20T03:49:51.000Z
|
2021-06-21T08:41:10.000Z
|
sherlock_scripts/pythonhops/sherlock_combine_restarts.py
|
apoletayev/anomalous_ion_conduction
|
badb91e971e4a5263a433cfa9fcbf914d53ed2a1
|
[
"MIT"
] | null | null | null |
sherlock_scripts/pythonhops/sherlock_combine_restarts.py
|
apoletayev/anomalous_ion_conduction
|
badb91e971e4a5263a433cfa9fcbf914d53ed2a1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 01:41:52 2020
Combines LAMMPS output files coming from a series of restarts with a * wildcard.
This works on expanded (mode scalar) fixes from LAMMPS where each line is a time.
The overlapping values of times due to restarts are averaged, but they should be identical.
Required command-line args : filenames= ,
Optional command-line args : file_out= ,
@author: andreypoletaev
"""
# =============================================================================
# %% Imports and constants
# =============================================================================
import pandas as pd
import sys
from glob import glob
# =============================================================================
# %% parse input and combine
# =============================================================================
## Parse inputs. Format: key=value
options = dict([ (x.split('=')[0],x.split('=')[1]) for x in sys.argv[1:] ])
keys = list(options.keys())
# print(options)
assert 'filenames' in keys, 'please pass filenames=... [path] as command-line option'
# template = f'/*_vacf_{int(options["duration"])}ps.csv' if 'template' not in keys else options['template']
file_out = options['filenames'].replace('*','') if 'file_out' not in keys else options['file_out']
print('looking for files that look like this: '+options['filenames'], flush=True)
output = pd.DataFrame()
counter = 0
files_to_combine = sorted(glob(options['filenames']))
assert len(files_to_combine) > 1, 'Only one file fits the bill, skipping combining.'
print(files_to_combine, flush=True)
for fin in files_to_combine:
try:
## read the header for column names
fp = open(fin, 'r')
line1 = fp.readline()
line2 = fp.readline()
fp.close()
colnames = line2[:-1].split(' ')[1:]
## read the actual numbers
df = pd.read_csv(fin, skiprows=1, sep=' ')
# colnames = df.iloc[0,1:-1].tolist()
df = df.iloc[:, :-1]
df.columns = colnames
df = df.apply(pd.to_numeric)
# print(df.columns)
# print(df.head(5))
# print(df.dtypes)
# print(df.head())
if len(df) > 0:
output = output.append(df, ignore_index=True)
counter += 1
print(f'appended data from file #{counter} : {fin}', flush=True)
except: print(f'could not load / add {fin}', flush=True)
## ensemble-average in all cases - but not always the first thing
output = output.groupby('TimeStep').agg('mean').reset_index().rename(columns={'TimeStep':line1[:-1]+'\n# '+'TimeStep'})
# output.TimeStep = output.TimeStep.astype(int)
## write file normally
output.to_csv(file_out, index=False, float_format='%.6g', sep=' ')
| 32.011236
| 119
| 0.562654
| 355
| 2,849
| 4.456338
| 0.495775
| 0.022124
| 0.035398
| 0.016435
| 0.025284
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014899
| 0.199017
| 2,849
| 89
| 120
| 32.011236
| 0.678352
| 0.440154
| 0
| 0
| 0
| 0
| 0.195138
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 1
| 0
| false
| 0.032258
| 0.096774
| 0
| 0.096774
| 0.129032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3009f5a1a8c11a46a1920015fba53e4cf3ae345
| 1,875
|
py
|
Python
|
app.py
|
zorro1992/task-app-devops
|
48312e53ce5711ce0d9508b481e73f78df411dd2
|
[
"MIT"
] | 1
|
2021-08-19T11:54:08.000Z
|
2021-08-19T11:54:08.000Z
|
app.py
|
zorro1992/task-app-devops
|
48312e53ce5711ce0d9508b481e73f78df411dd2
|
[
"MIT"
] | null | null | null |
app.py
|
zorro1992/task-app-devops
|
48312e53ce5711ce0d9508b481e73f78df411dd2
|
[
"MIT"
] | null | null | null |
"""
app
"""
from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# /// = relative path, //// = absolute path
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Todo(db.Model):
"""A dummy docstring."""
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100))
complete = db.Column(db.Boolean)
def pub1(self):
"""A dummy docstring."""
print("")
def pub2(self):
"""A dummy docstring."""
print("")
# Edit endpoint
@app.route("/edit")
def home1():
"""A dummy docstring."""
todo_list = Todo.query.all()
return render_template("base.html", todo_list=todo_list)
# Default home endpoint
@app.route("/")
def list1():
"""A dummy docstring."""
todo_list = Todo.query.all()
return render_template("list.html", todo_list=todo_list)
# Add endpoint
@app.route("/add", methods=["POST"])
def add():
"""A dummy docstring."""
title = request.form.get("title")
new_todo = Todo(title=title, complete=False)
db.session.add(new_todo)
db.session.commit()
return redirect(url_for("home1"))
# Update endpoint
@app.route("/update/<int:todo_id>")
def update(todo_id):
"""A dummy docstring."""
todo = Todo.query.filter_by(id=todo_id).first()
todo.complete = not todo.complete
db.session.commit()
return redirect(url_for("home1"))
# Delete endpoint
@app.route("/delete/<int:todo_id>")
def delete(todo_id):
"""A dummy docstring."""
todo = Todo.query.filter_by(id=todo_id).first()
db.session.delete(todo)
db.session.commit()
return redirect(url_for("home1"))
# Main function
if __name__ == "__main__":
db.create_all()
app.run(host="0.0.0.0", debug=True)
| 25
| 68
| 0.6544
| 255
| 1,875
| 4.647059
| 0.317647
| 0.040506
| 0.101266
| 0.064135
| 0.367932
| 0.293671
| 0.293671
| 0.293671
| 0.259916
| 0.185654
| 0
| 0.009026
| 0.1728
| 1,875
| 74
| 69
| 25.337838
| 0.754997
| 0.156267
| 0
| 0.272727
| 0
| 0
| 0.118456
| 0.062173
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159091
| false
| 0
| 0.045455
| 0
| 0.409091
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a304245df7598c6937f92e93f9b38b346d5b4c9a
| 2,009
|
py
|
Python
|
app/models/version.py
|
akashtalole/python-flask-restful-api
|
475d8fd7be1724183716a197aac4257f8fbbeac4
|
[
"MIT"
] | 3
|
2019-09-05T05:28:49.000Z
|
2020-06-10T09:03:37.000Z
|
app/models/version.py
|
akashtalole/python-flask-restful-api
|
475d8fd7be1724183716a197aac4257f8fbbeac4
|
[
"MIT"
] | null | null | null |
app/models/version.py
|
akashtalole/python-flask-restful-api
|
475d8fd7be1724183716a197aac4257f8fbbeac4
|
[
"MIT"
] | null | null | null |
from sqlalchemy.orm import backref
from app.models import db
class Version(db.Model):
"""Version model class"""
__tablename__ = 'versions'
id = db.Column(db.Integer, primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))
events = db.relationship("Event", backref=backref('version', uselist=False))
event_ver = db.Column(db.Integer, nullable=False, default=0)
sessions_ver = db.Column(db.Integer, nullable=False, default=0)
speakers_ver = db.Column(db.Integer, nullable=False, default=0)
tracks_ver = db.Column(db.Integer, nullable=False, default=0)
sponsors_ver = db.Column(db.Integer, nullable=False, default=0)
microlocations_ver = db.Column(db.Integer, nullable=False, default=0)
def __init__(self,
event_id=None,
event_ver=None,
sessions_ver=None,
speakers_ver=None,
tracks_ver=None,
sponsors_ver=None,
microlocations_ver=None):
self.event_id = event_id
self.event_ver = event_ver
self.sessions_ver = sessions_ver
self.speakers_ver = speakers_ver
self.tracks_ver = tracks_ver
self.sponsors_ver = sponsors_ver
self.microlocations_ver = microlocations_ver
def __repr__(self):
return '<Version %r>' % self.id
def __str__(self):
return self.__repr__()
@property
def serialize(self):
"""Return object data in easily serializable format"""
return {
'version': [
{'id': self.id,
'event_id': self.event_id,
'event_ver': self.event_ver,
'sessions_ver': self.sessions_ver,
'speakers_ver': self.speakers_ver,
'tracks_ver': self.tracks_ver,
'sponsors_ver': self.sponsors_ver,
'microlocations_ver': self.microlocations_ver}
]
}
| 35.245614
| 84
| 0.60677
| 232
| 2,009
| 4.982759
| 0.224138
| 0.066609
| 0.069204
| 0.117647
| 0.276817
| 0.212803
| 0.212803
| 0.212803
| 0.212803
| 0
| 0
| 0.004196
| 0.288203
| 2,009
| 56
| 85
| 35.875
| 0.804196
| 0.033848
| 0
| 0
| 0
| 0
| 0.071503
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.043478
| 0.043478
| 0.434783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a304eeaa7c9f4ed5704a6d6deba75d5ddfdbb3d1
| 346
|
py
|
Python
|
code-tk/scrollbar.py
|
shilpasayura/bk
|
2b0a1aa9300da80e201264bcf80226b3c5ff4ad6
|
[
"MIT"
] | 4
|
2018-09-08T10:30:27.000Z
|
2021-07-23T07:59:24.000Z
|
code-tk/scrollbar.py
|
shilpasayura/bk
|
2b0a1aa9300da80e201264bcf80226b3c5ff4ad6
|
[
"MIT"
] | null | null | null |
code-tk/scrollbar.py
|
shilpasayura/bk
|
2b0a1aa9300da80e201264bcf80226b3c5ff4ad6
|
[
"MIT"
] | 6
|
2018-09-07T05:54:17.000Z
|
2021-07-23T07:59:25.000Z
|
from tkinter import *
import tkinter
root = Tk()
scrollbar = Scrollbar(root)
scrollbar.pack( side = RIGHT, fill=Y )
mylist = Listbox(root, yscrollcommand = scrollbar.set )
for line in range(100):
mylist.insert(END, "Line number : " + str(line))
mylist.pack( side = LEFT, fill = BOTH )
scrollbar.config( command = mylist.yview )
mainloop()
| 21.625
| 55
| 0.705202
| 46
| 346
| 5.304348
| 0.652174
| 0.065574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010381
| 0.16474
| 346
| 15
| 56
| 23.066667
| 0.83391
| 0
| 0
| 0
| 0
| 0
| 0.040462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3052e2c0e4e4d32b495f5d940bc6dff09090dc4
| 1,742
|
py
|
Python
|
Solutions/2021/13.py
|
Azurealistic/Winter
|
4ef5d1fde10f9ba769c33597e1269f161068f18b
|
[
"Unlicense"
] | 1
|
2021-12-18T20:02:57.000Z
|
2021-12-18T20:02:57.000Z
|
Solutions/2021/13.py
|
Azurealistic/Winter
|
4ef5d1fde10f9ba769c33597e1269f161068f18b
|
[
"Unlicense"
] | null | null | null |
Solutions/2021/13.py
|
Azurealistic/Winter
|
4ef5d1fde10f9ba769c33597e1269f161068f18b
|
[
"Unlicense"
] | null | null | null |
# Advent of Code 2021 - Day: 13
# Imports (Always imports data based on the folder and file name)
from aocd import data, submit
def solve(data):
# Parse input
# Split the input into two lists, based on where the empty line is
# Find the index of the line that is '', and use that to split the list
# Return the two lists
coordinates, instructions = data.strip().split("\n\n")
coordinates = [[int(x) for x in ln.split(",")] for ln in coordinates.strip().split("\n")]
instructions = [ln.split() for ln in instructions.strip().split("\n")]
for iteration, fold in enumerate(instructions):
direction, location = fold[-1].split('=')
location = int(location)
points = set()
# PLace the point based on the current fold.
for (x, y) in coordinates:
if direction == 'y':
if y < location:
points.add((x, y))
else:
points.add((x, location - (y - location)))
elif direction == 'x':
if x < location:
points.add((x, y))
else:
points.add((location - (x - location), y))
coordinates = points
if iteration == 0:
print("Star 1:", len(coordinates))
submit(len(coordinates), part="a", day=13, year=2021)
grid = []
for n in range(10):
grid.append(list(" " * 80))
for (x, y) in coordinates:
grid[y][x] = '█'
# Print the grid, by using each row as a string, and then joining them with newlines, only include rows that have a '#' and print up to the final '#'
print("Star 2:")
print("\n".join(["".join(row) for row in grid if '█' in row]))
# This has to be manually submitted, because it's a visual representation of the grid.
submit("RHALRCRA", part="b", day=13, year=2021)
# Solution
def main():
solve(data)
# Call the main function.
if __name__ == '__main__':
main()
| 29.525424
| 150
| 0.647532
| 272
| 1,742
| 4.125
| 0.411765
| 0.00713
| 0.029412
| 0.02139
| 0.114082
| 0.057041
| 0.057041
| 0.057041
| 0
| 0
| 0
| 0.018786
| 0.205511
| 1,742
| 59
| 151
| 29.525424
| 0.790462
| 0.326636
| 0
| 0.166667
| 0
| 0
| 0.042241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.027778
| 0
| 0.083333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a308348c02f7d05a6bdcec5e102eab0f328f25f9
| 1,329
|
py
|
Python
|
biosimulators_utils/archive/utils.py
|
virtualcell/Biosimulators_utils
|
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
|
[
"MIT"
] | 2
|
2021-06-02T13:26:34.000Z
|
2021-12-27T23:12:47.000Z
|
biosimulators_utils/archive/utils.py
|
virtualcell/Biosimulators_utils
|
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
|
[
"MIT"
] | 102
|
2020-12-06T19:47:43.000Z
|
2022-03-31T12:56:17.000Z
|
biosimulators_utils/archive/utils.py
|
virtualcell/Biosimulators_utils
|
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
|
[
"MIT"
] | 4
|
2021-01-27T19:56:34.000Z
|
2022-02-03T21:08:20.000Z
|
""" Utilities for creating archives
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2020-12-06
:Copyright: 2020, Center for Reproducible Biomedical Modeling
:License: MIT
"""
from .data_model import Archive, ArchiveFile
import glob
import os
__all__ = ['build_archive_from_paths']
def build_archive_from_paths(path_patterns, rel_path=None, recursive=True):
""" Build an archive from a list of glob path patterns
Args:
path_patterns (:obj:`list` of :obj:`str`): glob path patterns for files to bundle into an archive
rel_path (:obj:`str`, optional): if provided, set the archive file names to their path relative to this path
recursive (:obj:`bool`, optional): if :obj:`True`, match the path patterns recursively
Returns:
:obj:`Archive`: archive
"""
archive = Archive()
for path_pattern in path_patterns:
for local_path in glob.glob(path_pattern, recursive=recursive):
if os.path.isfile(local_path):
if rel_path:
archive_path = os.path.relpath(local_path, rel_path)
else:
archive_path = local_path
archive.files.append(ArchiveFile(
local_path=local_path,
archive_path=archive_path,
))
return archive
| 32.414634
| 116
| 0.645598
| 168
| 1,329
| 4.928571
| 0.422619
| 0.086957
| 0.054348
| 0.050725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012333
| 0.267871
| 1,329
| 40
| 117
| 33.225
| 0.838643
| 0.426637
| 0
| 0
| 0
| 0
| 0.03338
| 0.03338
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
096461150c75c546d91d335a2584ba96fe70e040
| 845
|
py
|
Python
|
v1/Commit.py
|
gzc/gitstats
|
d6e41c4f7ad5c3d754ef872fa9e615b88df0ccb8
|
[
"MIT"
] | 26
|
2017-06-11T05:44:25.000Z
|
2021-02-20T12:21:22.000Z
|
v1/Commit.py
|
gzc/gitstats
|
d6e41c4f7ad5c3d754ef872fa9e615b88df0ccb8
|
[
"MIT"
] | 1
|
2020-04-22T15:48:19.000Z
|
2020-04-22T15:52:51.000Z
|
v1/Commit.py
|
gzc/gitstats
|
d6e41c4f7ad5c3d754ef872fa9e615b88df0ccb8
|
[
"MIT"
] | 1
|
2020-10-20T04:46:11.000Z
|
2020-10-20T04:46:11.000Z
|
"""
This class represents the info of one commit
"""
from Change import *;
class Commit:
def __init__(self, hash, author, authorEmail, date, commitMessage):
self.hash = hash;
self.author = author;
self.authorEmail = authorEmail
self.date = date;
self.commitMessage = commitMessage;
self.changes = None;
self.linesAdded = 0;
self.linesDeleted = 0;
self.filesAdded = 0;
self.filesDeleted = 0;
def __str__(self):
return ('commit hash {0}\ncommit author {1}\ncommit author email {2}\n'
'commit date {3}\n{4} lines added, {5} lines deleted\n'
'{6} files added, {7} files deleted\n'). \
format(self.hash, self.author, self.authorEmail, self.date,
self.linesAdded, self.linesDeleted, self.filesAdded, self.filesDeleted)
| 31.296296
| 79
| 0.622485
| 101
| 845
| 5.128713
| 0.425743
| 0.046332
| 0.054054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019169
| 0.259172
| 845
| 26
| 80
| 32.5
| 0.808307
| 0.052071
| 0
| 0
| 0
| 0
| 0.189155
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0.052632
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0966490b7f876064ed7777de569aec9aeed5aa61
| 3,758
|
py
|
Python
|
htdocs/plotting/auto/scripts100/p172.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
htdocs/plotting/auto/scripts100/p172.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
htdocs/plotting/auto/scripts100/p172.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
"""YTD precip"""
import calendar
import datetime
from pandas.io.sql import read_sql
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.plot.use_agg import plt
from pyiem.network import Table as NetworkTable
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['description'] = """This chart presents year to date accumulated
precipitation for a station of your choice. The year with the highest and
lowest accumulation is shown along with the envelop of observations and
long term average. You can optionally plot up to three additional years
of your choice.
"""
thisyear = datetime.date.today().year
desc['arguments'] = [
dict(type='station', name='station', default='IA2203',
label='Select Station:', network='IACLIMATE'),
dict(type='year', name='year1', default=thisyear,
label='Additional Year to Plot:'),
dict(type='year', name='year2', optional=True, default=(thisyear - 1),
label='Additional Year to Plot: (optional)'),
dict(type='year', name='year3', optional=True, default=(thisyear - 2),
label='Additional Year to Plot: (optional)'),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('coop')
ctx = get_autoplot_context(fdict, get_description())
station = ctx['station']
network = ctx['network']
year1 = ctx.get('year1')
year2 = ctx.get('year2')
year3 = ctx.get('year3')
nt = NetworkTable(network)
table = "alldata_%s" % (station[:2],)
df = read_sql("""
WITH years as (SELECT distinct year from """ + table + """
WHERE station = %s and sday = '0101')
SELECT day, sday, year, precip,
sum(precip) OVER (PARTITION by year ORDER by day ASC) as accum from
""" + table + """ WHERE station = %s and year in (select year from years)
ORDER by day ASC
""", pgconn, params=(station, station), index_col='day')
if df.empty:
raise ValueError("No data found!")
(fig, ax) = plt.subplots(1, 1)
# Average
jday = df[['sday', 'accum']].groupby('sday').mean()
ax.plot(range(1, len(jday.index)+1), jday['accum'], lw=2, zorder=5,
color='k', label='Average - %.2f' % (jday['accum'].iloc[-1],))
# Min and Max
jmin = df[['sday', 'accum']].groupby('sday').min()
jmax = df[['sday', 'accum']].groupby('sday').max()
ax.fill_between(range(1, len(jday.index)+1), jmin['accum'],
jmax['accum'], zorder=2, color='tan')
# find max year
plotted = []
for year, color in zip([df['accum'].idxmax().year,
df[df['sday'] == '1231']['accum'].idxmin().year,
year1, year2, year3],
['b', 'brown', 'r', 'g', 'purple']):
if year is None or year in plotted:
continue
plotted.append(year)
df2 = df[df['year'] == year]
ax.plot(range(1, len(df2.index)+1), df2['accum'],
label='%s - %.2f' % (year, df2['accum'].iloc[-1]),
color=color, lw=2)
ax.set_title(("Year to Date Accumulated Precipitation\n"
"[%s] %s (%s-%s)"
) % (station, nt.sts[station]['name'],
nt.sts[station]['archive_begin'].year,
datetime.date.today().year))
ax.set_ylabel("Precipitation [inch]")
ax.grid(True)
ax.legend(loc=2)
ax.set_xlim(1, 366)
ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274,
305, 335, 365))
ax.set_xticklabels(calendar.month_abbr[1:])
return fig, df
if __name__ == '__main__':
plotter(dict())
| 37.207921
| 78
| 0.575838
| 486
| 3,758
| 4.393004
| 0.380658
| 0.014052
| 0.016862
| 0.022482
| 0.15644
| 0.072131
| 0
| 0
| 0
| 0
| 0
| 0.031408
| 0.262906
| 3,758
| 100
| 79
| 37.58
| 0.73935
| 0.02661
| 0
| 0.02439
| 0
| 0
| 0.293843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.073171
| 0
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
096a5172854a6f7ee1cbbe59f19ac4a86d87ac0c
| 1,684
|
py
|
Python
|
Steganalysis-CNN/dataload.py
|
1129ljc/video-interpolation-detection
|
eb2931269b2ac19af28de750f0b719fb0d66aaef
|
[
"Apache-2.0"
] | 2
|
2022-03-29T06:46:21.000Z
|
2022-03-30T09:13:10.000Z
|
Steganalysis-CNN/dataload.py
|
1129ljc/video-interpolation-detection
|
eb2931269b2ac19af28de750f0b719fb0d66aaef
|
[
"Apache-2.0"
] | null | null | null |
Steganalysis-CNN/dataload.py
|
1129ljc/video-interpolation-detection
|
eb2931269b2ac19af28de750f0b719fb0d66aaef
|
[
"Apache-2.0"
] | null | null | null |
'''
@Time : 2021/9/3 9:42
@Author : ljc
@FileName: dataload.py
@Software: PyCharm
'''
import os
import json
import cv2
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
transform = transforms.Compose(
[
# transforms.Resize(size=(224, 224)),
transforms.ToTensor(),
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]
)
class DataSet(Dataset):
def __init__(self, data_file_path):
super(Dataset, self).__init__()
self.json_file_path = data_file_path
assert os.path.isfile(data_file_path), print('The dataset json file cannot be read')
with open(data_file_path, 'r', encoding='utf8')as fp:
data = fp.readlines()
self.image_path_list = []
self.image_label_list = []
for i in range(len(data)):
line = data[i].split(' ')
self.image_path_list.append(line[0])
self.image_label_list.append(int(line[1][0:-1]))
self.image_num = len(self.image_path_list)
def __len__(self):
return self.image_num
def __getitem__(self, item):
image_file = self.image_path_list[item]
label = self.image_label_list[item]
label_torch = torch.tensor(label)
# image_torch = transform(Image.open(image_file).convert('RGB'))
image_torch = torch.from_numpy(np.array(Image.open(image_file).convert('RGB')))
image_torch = image_torch.permute(2, 0, 1).float()
image_torch = torch.unsqueeze(image_torch[0, :, :], dim=0)
return image_file, image_torch, label_torch
| 30.618182
| 92
| 0.647862
| 236
| 1,684
| 4.385593
| 0.355932
| 0.078261
| 0.014493
| 0.019324
| 0.131401
| 0.085024
| 0.085024
| 0.085024
| 0.011594
| 0.011594
| 0
| 0.029163
| 0.226247
| 1,684
| 54
| 93
| 31.185185
| 0.765157
| 0.140143
| 0
| 0
| 0
| 0
| 0.031293
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.078947
| false
| 0
| 0.236842
| 0.026316
| 0.394737
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
096b3b878c08f6ba21432355cfef1328654cf1dc
| 23,998
|
py
|
Python
|
run.py
|
kampta/PatchVAE
|
816f4b49fd8b836641d7e1068c1e802ae0453742
|
[
"MIT"
] | 9
|
2020-10-29T11:56:53.000Z
|
2021-11-21T14:34:38.000Z
|
run.py
|
kampta/PatchVAE
|
816f4b49fd8b836641d7e1068c1e802ae0453742
|
[
"MIT"
] | null | null | null |
run.py
|
kampta/PatchVAE
|
816f4b49fd8b836641d7e1068c1e802ae0453742
|
[
"MIT"
] | 2
|
2020-10-29T03:40:31.000Z
|
2021-01-31T20:04:49.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" run.py
Code to run the PatchVAE on different datasets
Usage:
# Run with default arguments on mnist
python run.py
Basic VAE borrowed from
https://github.com/pytorch/examples/tree/master/vae
"""
__author__ = "Kamal Gupta"
__email__ = "kampta@cs.umd.edu"
__version__ = "0.1"
import sys
from collections import OrderedDict
import shutil
import numpy as np
import torch
import torch.nn as nn
from torchvision.utils import make_grid
from utils import Timer
from utils.torchsummary import summary
from utils.commons import data_loaders, load_vae_model, count_parameters, EdgeWeights
from loss import BetaVaeLoss, VaeConcreteLoss, BetaVaeConcreteLoss,\
BetaVaeConcretePartsLoss, BetaVaeConcretePartsEntropyLoss, DiscLoss
from model import Discriminator
import utils.commons as commons
from torch.utils.tensorboard import SummaryWriter
def train_vaegan(data_loader, model_d, model_v, opt_d, opt_v, d_loss_fn, v_loss_fn, writer):
model_v.train()
model_d.train()
fwd_clock = Timer()
bwd_clock = Timer()
num_batches = args.img_per_epoch // args.batch_size
data_iterator = iter(data_loader)
overall_losses = OrderedDict()
# for batch_idx, (x, _) in enumerate(data_loader):
for batch_idx in range(num_batches):
batch_losses = OrderedDict()
try:
x, _ = next(data_iterator)
except StopIteration:
data_iterator = iter(data_loader)
continue
x = x.to(args.device)
########################################################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
#######################################################
# train with real
model_d.zero_grad()
real_x = x
real_y = torch.ones(x.size(0)).cuda()
outputs = model_d(real_x)
err_d_real = d_loss_fn(outputs.squeeze(), real_y.squeeze())
err_d_real.backward()
batch_losses['err_d_real'] = err_d_real.item()
batch_losses['d_x'] = outputs.data.mean()
# train with fake
fake_y = torch.zeros(x.size(0)).cuda()
x_tilde, z_app_mean, z_app_var, z_vis_mean = model_v(x, args.temp)
# recon_x, _ = x_tilde
outputs = model_d(x_tilde.detach())
err_d_fake = d_loss_fn(outputs.squeeze(), fake_y.squeeze())
err_d_fake.backward()
batch_losses['err_d_fake'] = err_d_fake.item()
batch_losses['d_v1'] = outputs.data.mean()
opt_d.step()
###########################
# (2) Update G network: VAE
###########################
model_v.zero_grad()
loss, loss_dict = v_loss_fn(
x_tilde, x, z_app_mean, z_app_var, z_vis_mean,
categorical=args.categorical, py=args.py, beta_p=args.beta_p,
beta_a=args.beta_a, beta_v=args.beta_v,
beta_ea=args.beta_ea, beta_ew=args.beta_ew
)
loss.backward()
for loss_key, loss_value in loss_dict.items():
batch_losses[loss_key] = loss_value.item()
opt_v.step()
############################
# (3) Update G network: maximize log(D(G(z)))
###########################
x_tilde, z_app_mean, z_app_var, z_vis_mean = model_v(x, args.temp)
# recon_x, _ = x_tilde
outputs = model_d(x_tilde)
real_y.fill_(1)
err_g = d_loss_fn(outputs.squeeze(), real_y.squeeze())
err_g.backward()
batch_losses['err_g'] = err_g.item()
batch_losses['d_v2'] = outputs.data.mean()
opt_v.step()
# Logs
for loss_key, loss_value in batch_losses.items():
writer.add_scalar('loss/train/' + loss_key, loss_value, args.steps)
overall_losses[loss_key] = overall_losses[loss_key] + loss_value \
if loss_key in overall_losses else loss_value
args.steps += 1
if args.steps % 1000 == 1:
args.temp = max(args.temp * np.exp(-args.anneal * args.steps),
args.min_temp)
if batch_idx % args.log_interval != 0:
continue
logstr = '\t'.join(['{}: {:0.4f}'.format(k, v) for k, v in batch_losses.items()])
print('[{}/{} ({:0.0f}%)]\t{}'.format(batch_idx, num_batches,
100. * batch_idx / num_batches, logstr))
overall_losses = OrderedDict([(k, v / num_batches) for k, v in overall_losses.items()])
logstr = '\t'.join(['{}: {:0.4f}'.format(k, v) for k, v in overall_losses.items()])
print('[End of train epoch]\t# steps: {}\t# images: {}, temp: {:0.2f}'.format(
args.steps, num_batches * args.batch_size, args.temp))
print(logstr)
print('[End of train epoch]\t# calls: {}, Fwd: {:.3f} ms\tBwd: {:.3f} ms'.format(
fwd_clock.calls, 1000 * fwd_clock.average_time, 1000 * bwd_clock.average_time))
return overall_losses
def train(data_loader, model, optimizer, loss_function, writer):
model.train()
fwd_clock = Timer()
bwd_clock = Timer()
losses = OrderedDict()
losses['loss'] = 0
num_batches = args.img_per_epoch // args.batch_size
data_iterator = iter(data_loader)
for batch_idx in range(num_batches):
try:
x, _ = next(data_iterator)
x = x.to(args.device)
optimizer.zero_grad()
# Forward Pass
fwd_clock.tic()
x_tilde, z_app_mean, z_app_var, z_vis_mean = model(x, args.temp)
# Compute Loss
loss, loss_dict = loss_function(
x_tilde, x, z_app_mean, z_app_var, z_vis_mean,
categorical=args.categorical, py=args.py, beta_p=args.beta_p,
beta_a=args.beta_a, beta_v=args.beta_v,
beta_ea=args.beta_ea, beta_ew=args.beta_ew
)
fwd_clock.toc()
# Backprop
bwd_clock.tic()
loss.backward()
bwd_clock.toc()
# Update Adam
optimizer.step()
# Logs
losses['loss'] += loss.item()
writer.add_scalar('loss/train/loss', loss.item(), args.steps)
for loss_key, loss_value in loss_dict.items():
writer.add_scalar('loss/train/' + loss_key, loss_value.item(), args.steps)
losses[loss_key] = losses[loss_key] + loss_value.item() \
if loss_key in losses else loss_value.item()
args.steps += 1
if args.steps % 1000 == 1:
args.temp = max(args.temp * np.exp(-args.anneal * args.steps),
args.min_temp)
if batch_idx % args.log_interval != 0:
continue
logstr = '\t'.join(['{}: {:0.4f}'.format(k, v.item()) for k, v in loss_dict.items()])
print('[{}/{} ({:0.0f}%)]\t{}'.format(batch_idx, num_batches,
100. * batch_idx / num_batches, logstr))
except StopIteration:
data_iterator = iter(data_loader)
losses = OrderedDict([(k, v / num_batches) for k, v in losses.items()])
logstr = '\t'.join(['{}: {:0.4f}'.format(k, v) for k, v in losses.items()])
print('[End of train epoch]\t# steps: {}\t# images: {}, temp: {:0.2f}'.format(
args.steps, num_batches * args.batch_size, args.temp))
print(logstr)
print('[End of train epoch]\t# calls: {}, Fwd: {:.3f} ms\tBwd: {:.3f} ms'.format(
fwd_clock.calls, 1000 * fwd_clock.average_time, 1000 * bwd_clock.average_time))
return losses['loss']
def test(data_loader, model, loss_function, writer):
model.eval()
losses = OrderedDict()
losses['loss'] = 0
data_iterator = iter(data_loader)
with torch.no_grad():
for batch_idx, (x, _) in enumerate(data_iterator):
x = x.to(args.device)
x_tilde, z_app_mean, z_app_var, z_vis_mean = model(x, args.temp)
loss, loss_dict = loss_function(
x_tilde, x, z_app_mean, z_app_var, z_vis_mean,
categorical=args.categorical, py=args.py, beta_p=args.beta_p,
beta_a=args.beta_a, beta_v=args.beta_v,
beta_ea=args.beta_ea, beta_ew=args.beta_ew
)
losses['loss'] += loss.item()
for loss_key, loss_value in loss_dict.items():
losses[loss_key] = losses[loss_key] + loss_value.item() \
if loss_key in losses else loss_value.item()
losses = OrderedDict([(k, v / (batch_idx+1)) for k, v in losses.items()])
logstr = '\t'.join(['{}: {:0.4f}'.format(k, v) for k, v in losses.items()])
print('[End of test epoch]')
print(logstr)
# Logs
for loss_key, loss_value in losses.items():
writer.add_scalar('loss/test/' + loss_key, loss_value, args.steps)
return losses['loss']
def plot_graph(height, width, channels, model, writer):
fake = torch.from_numpy(np.random.randn(args.batch_size,
channels, height, width).astype(np.float32))
fake = fake.to(args.device)
writer.add_graph(model, fake)
def main():
np.random.seed(args.seed)
torch.manual_seed(args.seed)
args.steps = 0
writer = SummaryWriter(args.log_dir)
save_filename = args.model_dir
train_loader, test_loader, (channels, height, width), num_classes, _ = \
data_loaders(args.dataset, data_folder=args.data_folder,
classify=False, size=args.size, inet=args.inet,
batch_size=args.batch_size, num_workers=args.workers)
# Fixed images for Tensorboard
fixed_images, _ = next(iter(test_loader))
fixed_images = fixed_images.to(args.device)
fixed_grid = make_grid(commons.unnorm(fixed_images).cpu().data, nrow=32, pad_value=1)
writer.add_image('original', fixed_grid, 0)
# build a VAE model
vae_model, _ = load_vae_model((channels, height, width),
args.arch,
encoder_arch=args.encoder_arch,
decoder_arch=args.decoder_arch,
hidden_size=args.hidden_size,
num_parts=args.num_parts,
base_depth=args.ngf,
independent=args.independent,
hard=args.hard,
categorical=args.categorical,
scale=args.scale,
device=args.device)
args.py = 1 / args.num_parts if args.py is None else args.py
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
vae_model = nn.DataParallel(vae_model)
vae_model.to(args.device)
if args.pretrained is not None:
print("Loading pretrained model from %s" % args.pretrained)
pretrained_dict = torch.load(args.pretrained, map_location=args.device)
if type(pretrained_dict) == OrderedDict:
vae_model.load_state_dict(pretrained_dict)
elif 'vae_dict' in pretrained_dict:
vae_model.load_state_dict(pretrained_dict['vae_dict'])
else:
print('debug')
sys.exit(0)
# Generate samples only, no training
if args.evaluate:
with torch.no_grad():
# Reconstructions after current epoch
if torch.cuda.device_count() > 1:
reconstructions = vae_model.module.get_reconstructions(
fixed_images, temp=args.temp)
else:
reconstructions = vae_model.get_reconstructions(
fixed_images, temp=args.temp)
for key in reconstructions:
grid = make_grid(reconstructions[key].cpu(), nrow=32, pad_value=1)
writer.add_image(key, grid, 0)
# Random samples after current epoch
if torch.cuda.device_count() > 1:
random_samples = vae_model.module.get_random_samples(py=args.py)
else:
random_samples = vae_model.get_random_samples(py=args.py)
for key in random_samples:
grid = make_grid(random_samples[key].cpu(), nrow=32, pad_value=1)
writer.add_image(key, grid, 0)
sys.exit(0)
opt_v = torch.optim.Adam(vae_model.parameters(), lr=args.lr, betas=(0.5, 0.999))
recon_mask = None
if args.recon_mask == 'edge':
recon_mask = EdgeWeights(nc=channels, scale=args.scale)
if args.arch == 'vae':
loss_function = BetaVaeLoss(beta=args.beta_a, mask_nn=recon_mask)
elif args.arch == 'convvae':
loss_function = VaeConcreteLoss(
beta_v=args.beta_v,
py=args.py,
categorical=args.categorical,
mask_nn=recon_mask
)
elif args.arch == 'patchy':
if args.beta_p == 0. and args.beta_ea == 0. and args.beta_ew == 0.:
loss_function = BetaVaeConcreteLoss(
beta_a=args.beta_a,
beta_v=args.beta_v,
py=args.py,
categorical=args.categorical,
mask_nn=recon_mask
)
elif args.beta_ea == 0. and args.beta_ew == 0.:
loss_function = BetaVaeConcretePartsLoss(
beta_a=args.beta_a,
beta_v=args.beta_v,
beta_p=args.beta_p,
py=args.py,
categorical=args.categorical,
)
else:
loss_function = BetaVaeConcretePartsEntropyLoss(
beta_a=args.beta_a,
beta_v=args.beta_v,
beta_p=args.beta_p,
beta_ea=args.beta_ea,
beta_ew=args.beta_ew,
py=args.py,
categorical=args.categorical,
)
else:
print('Unknown model architecture: %s' % args.arch)
sys.exit(0)
if args.gan:
gan_model = Discriminator(height, nc=channels, ndf=args.ndf, scale=args.scale).to(args.device)
opt_d = torch.optim.Adam(gan_model.parameters(), lr=args.lr, betas=(0.5, 0.999))
d_loss_fn = DiscLoss(args.beta_g)
# test after seeing approx. every 50000 images
# num_epochs = (args.num_epochs * len(train_loader.dataset)) // 50000
for epoch in range(1, args.num_epochs + 1):
print("================== Epoch: {} ==================".format(epoch))
if args.gan:
train_loss = train_vaegan(train_loader, gan_model, vae_model, opt_d, opt_v, d_loss_fn, loss_function, writer)
else:
train_loss = train(train_loader, vae_model, opt_v, loss_function, writer)
test_loss = test(test_loader, vae_model, loss_function, writer)
if epoch == 1:
best_loss = test_loss
if epoch % args.save_interval != 0:
continue
# Save model
with torch.no_grad():
# Reconstructions after current epoch
if torch.cuda.device_count() > 1:
reconstructions = vae_model.module.get_reconstructions(
fixed_images, temp=args.temp)
else:
reconstructions = vae_model.get_reconstructions(
fixed_images, temp=args.temp)
for key in reconstructions:
grid = make_grid(reconstructions[key].cpu(), nrow=32, pad_value=1, normalize=True)
writer.add_image(key, grid, epoch)
# Random samples after current epoch
if torch.cuda.device_count() > 1:
random_samples = vae_model.module.get_random_samples(py=args.py)
else:
random_samples = vae_model.get_random_samples(py=args.py)
for key in random_samples:
grid = make_grid(random_samples[key].cpu(), nrow=32, pad_value=1, normalize=True)
writer.add_image(key, grid, epoch)
f = '{0}/model_{1}.pt'.format(save_filename, epoch)
save_state = {
'args': args,
'vae_dict': vae_model.state_dict(),
'loss': train_loss,
}
if args.gan:
save_state['disc_dict'] = gan_model.state_dict()
torch.save(save_state, f)
if test_loss < best_loss:
best_loss = test_loss
shutil.copyfile(f, '{0}/best.pt'.format(save_filename))
print("Model saved at: {0}/best.pt".format(save_filename))
print("# Parameters: {}".format(count_parameters(vae_model)))
if torch.cuda.device_count() > 1:
summary(vae_model.module, (channels, height, width))
else:
summary(vae_model, (channels, height, width))
if __name__ == '__main__':
import argparse
import os
parser = argparse.ArgumentParser(description='Patchy VAE')
# Dataset
parser.add_argument('--dataset', type=str, default='cifar100',
help='name of the dataset (default: cifar100)')
parser.add_argument('--data-folder', type=str, default='./data',
help='name of the data folder (default: ./data)')
parser.add_argument('--workers', type=int, default=4,
help='number of threads (default: 4)')
parser.add_argument('--pretrained', default=None,
help='path of pre-trained model')
parser.add_argument('--evaluate', action='store_true', default=False,
help='just sample no training (default: False)')
parser.add_argument('--size', type=int, default=64,
help='size of image (default: 64)')
parser.add_argument('--inet', default=False, action='store_true',
help='Whether or not to do imagenet normalization')
# Model
parser.add_argument('--arch', type=str, default='patchy',
help='model architecture (default: patchy)')
parser.add_argument('--encoder-arch', type=str, default='resnet',
help='encoder architecture (default: resnet)')
parser.add_argument('--decoder-arch', type=str, default='pyramid',
help='decoder architecture (default: pyramid)')
parser.add_argument('--independent', action='store_true', default=False,
help='independent decoders (default: False)')
parser.add_argument('--ngf', type=int, default=64,
help='depth of first layer of encoder (default: 64)')
# Optimization
parser.add_argument('--recon-mask', type=str, default=None,
help="Use 'edge' mask for improved reconstruction (default: None.)")
parser.add_argument('--batch-size', type=int, default=128,
help='batch size (default: 128)')
parser.add_argument('--img-per-epoch', type=int, default=50000,
help='images per epoch (default: 50000)')
parser.add_argument('--num-epochs', type=int, default=30,
help='number of epochs (default: 30)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training (default: False)')
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate for Adam optimizer (default: 1e-4)')
parser.add_argument('--beta-a', type=float, default=1.0,
help='contribution of KLD App loss (default: 1.0)')
parser.add_argument('--beta-v', type=float, default=10.,
help='contribution of KLD Vis loss (default: 10.)')
parser.add_argument('--beta-p', type=float, default=0.,
help='contribution of MSE Parts loss (default: 0.)')
parser.add_argument('--beta-ea', type=float, default=0.,
help='contribution of Entropy Across loss (default: 0.)')
parser.add_argument('--beta-ew', type=float, default=0.,
help='contribution of Entropy Within loss (default: 0.)')
# GAN
parser.add_argument('--gan', action='store_true', default=False,
help='enable gan (default: False)')
parser.add_argument('--ndf', type=int, default=64,
help='depth of first layer of discrimnator (default: 64)')
parser.add_argument('--beta-g', type=float, default=1.0,
help='contribution of GAN loss (default: 0.)')
# Latent space
parser.add_argument('--scale', type=int, default=8,
help='scale down by (default: 8)')
parser.add_argument('--num-parts', type=int, default=16,
help='number of parts (default: 16)')
parser.add_argument('--hidden-size', type=int, default=6,
help='size of the latent vectors (default: 6)')
parser.add_argument('--py', type=float, default=None,
help='part visibility prior (default: 1 / num_parts)')
parser.add_argument('--categorical', action='store_true', default=False,
help='take only 1 part per location (default: False)')
# Annealing
parser.add_argument('--hard', action='store_true', default=False,
help='hard samples from bernoulli (default: False)')
parser.add_argument('--temp', type=float, default=1.0,
help='Initial temperature (default: 1.0)')
parser.add_argument('--anneal', type=float, default=0.00003,
help='Anneal rate (default: 00003)')
parser.add_argument('--min-temp', type=float, default=0.1,
help='minimum temperature')
# Miscellaneous
parser.add_argument('--debug-grad', action='store_true', default=False,
help='debug gradients (default: False)')
parser.add_argument('--output-folder', type=str, default='./scratch',
help='name of the output folder (default: ./scratch)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=50,
help='how many batches to wait before logging training status')
parser.add_argument('--save-interval', type=int, default=1,
help='how many batches to wait before logging training status')
args = parser.parse_args()
print("All arguments")
print(args)
print("PID: ", os.getpid())
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.device = torch.device("cuda:0"
if args.cuda and torch.cuda.is_available() else "cpu")
# Slurm
if 'SLURM_JOB_NAME' in os.environ and 'SLURM_JOB_ID' in os.environ:
# running with sbatch and not srun
if os.environ['SLURM_JOB_NAME'] != 'bash':
args.output_folder = os.path.join(args.output_folder,
os.environ['SLURM_JOB_ID'])
print("SLURM_JOB_ID: ", os.environ['SLURM_JOB_ID'])
else:
args.output_folder = os.path.join(args.output_folder, str(os.getpid()))
else:
args.output_folder = os.path.join(args.output_folder, str(os.getpid()))
# Create logs and models folder if they don't exist
if not os.path.exists(args.output_folder):
print("Creating output directory: %s" % args.output_folder)
os.makedirs(args.output_folder)
log_dir = os.path.join(args.output_folder, 'logs')
if not os.path.exists(log_dir):
print("Creating log directory: %s" % log_dir)
os.makedirs(log_dir)
model_dir = os.path.join(args.output_folder, 'models')
if not os.path.exists(model_dir):
print("Creating model directory: %s" % model_dir)
os.makedirs(model_dir)
args.log_dir = log_dir
args.model_dir = model_dir
main()
| 40.063439
| 121
| 0.580548
| 3,020
| 23,998
| 4.409272
| 0.133444
| 0.027035
| 0.051066
| 0.014419
| 0.476194
| 0.431511
| 0.387278
| 0.331706
| 0.317813
| 0.297011
| 0
| 0.014212
| 0.287524
| 23,998
| 598
| 122
| 40.130435
| 0.764592
| 0.046504
| 0
| 0.369021
| 0
| 0.009112
| 0.134854
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01139
| false
| 0
| 0.036446
| 0
| 0.05467
| 0.05467
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
096bb8869bace9e3c4b6964fc661952242355ebd
| 11,602
|
py
|
Python
|
membership/management/commands/csvbills.py
|
guaq/sikteeri
|
9a80790666edaa058e9cb42cb9e78626cfc0e565
|
[
"MIT"
] | null | null | null |
membership/management/commands/csvbills.py
|
guaq/sikteeri
|
9a80790666edaa058e9cb42cb9e78626cfc0e565
|
[
"MIT"
] | null | null | null |
membership/management/commands/csvbills.py
|
guaq/sikteeri
|
9a80790666edaa058e9cb42cb9e78626cfc0e565
|
[
"MIT"
] | null | null | null |
# encoding: UTF-8
from __future__ import with_statement
import logging
import codecs
import csv
import os
from datetime import datetime, timedelta
from decimal import Decimal
from django.db.models import Q, Sum
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from membership.models import Bill, BillingCycle, Payment
from membership.utils import log_change
from optparse import make_option
logger = logging.getLogger("membership.csvbills")
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
<http://docs.python.org/library/csv.html#examples>
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
<http://docs.python.org/library/csv.html#examples>
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeDictReader(UnicodeReader):
"""A CSV reader which stores the headers from the first line
"""
def __init__(self, *args, **kw):
UnicodeReader.__init__(self, *args, **kw)
# Read headers from first line
self.headers = map(lambda x: x.strip(), UnicodeReader.next(self))
def next(self):
row = UnicodeReader.next(self)
return dict(zip(self.headers, row))
class RequiredFieldNotFoundException(Exception): pass
class DuplicateColumnException(Exception): pass
class PaymentFromFutureException(Exception): pass
class BillDictReader(UnicodeDictReader):
REQUIRED_COLUMNS = ['date', 'amount', 'transaction']
CSV_TRANSLATION = {}
def __init__(self, f, delimiter=';', encoding="iso8859-1", *args, **kw):
UnicodeDictReader.__init__(self, f, delimiter=delimiter,
encoding=encoding, *args, **kw)
# Translate headers
h = self.headers
for i in xrange(0, len(h)):
self.headers[i] = self._get_translation(h[i])
# Check that all required columns exist in the header
for name in self.REQUIRED_COLUMNS:
if name not in self.headers:
error = "CSV format is invalid: missing field '%s'." % name
raise RequiredFieldNotFoundException(error)
# Check that each field is unique
for name in self.headers:
if self.headers.count(name) != 1:
error = "The field '%s' occurs multiple times in the header"
raise DuplicateColumnException(error)
def _get_translation(self, h):
"""
Function for custom translations
"""
return self.CSV_TRANSLATION.get(h, h)
def _get_row(self, row):
"""
Function for custom data processing
"""
return row
def next(self):
row = self._get_row(UnicodeDictReader.next(self))
if len(row) == 0:
return None
row['amount'] = Decimal(row['amount'].replace(",", "."))
row['date'] = datetime.strptime(row['date'], "%d.%m.%Y")
row['reference'] = row['reference'].replace(' ', '').lstrip('0')
row['transaction'] = row['transaction'].replace(' ', '').replace('/', '')
if row.has_key('value_date'):
row['value_date'] = datetime.strptime(row['value_date'], "%d.%m.%Y")
return row
class OpDictReader(BillDictReader):
'''Reader for Osuuspankki CSV file format
The module converts Osuuspankki CSV format data into a more usable form.'''
# If these fields are not found on the first line, an exception is raised
REQUIRED_COLUMNS = ['date', 'amount', 'transaction']
# Translation table from Osuuspankki CSV format to short names
OP_CSV_TRANSLATION = {u'Kirjauspäivä' : 'date',
u'Arvopäivä' : 'value_date',
u'Tap.pv' : 'date', # old format
u'Määrä EUROA' : 'amount',
u'Määrä' : 'amount',
u'Tapahtumalajikoodi' : 'event_type_code',
u'Selitys' : 'event_type_description',
u'Saaja/Maksaja' : 'fromto',
u'Saajan tilinumero' : 'account', # old format
u'Saajan tilinumero ja pankin BIC' : 'account',
u'Viite' : 'reference',
u'Viesti' : 'message',
u'Arkistotunnus' : 'transaction', # old format
u'Arkistointitunnus' : 'transaction'}
def _get_translation(self, h):
# Quick and dirty, OP changes this field name too often!
if h.startswith(u"Määrä"):
return "amount"
return self.OP_CSV_TRANSLATION.get(h, h)
class ProcountorDictReader(BillDictReader):
REQUIRED_COLUMNS = ['date', 'amount', 'transaction']
CSV_TRANSLATION = {u'Kirjauspäivä' : 'date',
u'Arvopäivä' : 'value_date',
u'Maksupäivä' : 'date',
u'Maksu' : 'amount',
u'Summa' : 'amount',
u'Kirjausselite' : 'event_type_description',
u'Maksaja' : 'fromto',
u'Nimi' : 'fromto',
u'Tilinro' : 'account',
u'Viesti' : 'message',
u'Viitenumero' : 'reference',
u'Arkistointitunnus' : 'transaction',
u'Oikea viite' : 'real_reference',
}
def _get_row(self, row):
if 'real_reference' in row:
row['reference'] = row['real_reference']
return row
def row_to_payment(row):
try:
p = Payment.objects.get(transaction_id__exact=row['transaction'])
return p
except Payment.DoesNotExist:
p = Payment(payment_day=min(datetime.now(), row['date']),
amount=row['amount'],
type=row['event_type_description'],
payer_name=row['fromto'],
reference_number=row['reference'],
message=row['message'],
transaction_id=row['transaction'])
return p
def attach_payment_to_cycle(payment, user=None):
"""
Outside of this module, this function is mainly used by
generate_test_data.py.
"""
if payment.ignore == True or payment.billingcycle != None:
raise Exception("Unexpected function call. This shouldn't happen.")
reference = payment.reference_number
cycle = BillingCycle.objects.get(reference_number=reference)
if cycle.is_paid == False or cycle.amount_paid() < cycle.sum:
payment.attach_to_cycle(cycle, user=user)
else:
# Don't attach a payment to a cycle with enough payments
payment.comment = _('duplicate payment')
payment.duplicate = True
log_user = User.objects.get(id=1)
log_change(payment, log_user, change_message="Payment not attached due to duplicate payment")
payment.save()
return None
return cycle
def process_payments(reader, user=None):
"""
Actual CSV file processing logic
"""
return_messages = []
num_attached = num_notattached = 0
sum_attached = sum_notattached = 0
for row in reader:
if row == None:
continue
if row['amount'] < 0: # Transaction is paid by us, ignored
continue
# Payment in future more than 1 day is a fatal error
if row['date'] > datetime.now() + timedelta(days=1):
raise PaymentFromFutureException("Payment date in future")
payment = row_to_payment(row)
# Do nothing if this payment has already been assigned or ignored
if payment.billingcycle or payment.ignore:
continue
try:
cycle = attach_payment_to_cycle(payment, user=user)
if cycle:
msg = _("Attached payment %(payment)s to cycle %(cycle)s") % {
'payment': unicode(payment), 'cycle': unicode(cycle)}
logger.info(msg)
return_messages.append((None, None, msg))
num_attached = num_attached + 1
sum_attached = sum_attached + payment.amount
else:
# Payment not attached to cycle because enough payments were attached
msg = _("Billing cycle already paid for %s. Payment not attached.") % payment
return_messages.append((None, None, msg))
logger.info(msg)
num_notattached = num_notattached + 1
sum_notattached = sum_notattached + payment.amount
except BillingCycle.DoesNotExist:
# Failed to find cycle for this reference number
if not payment.id:
payment.save() # Only save if object not in database yet
logger.warning("No billing cycle found for %s" % payment.reference_number)
return_messages.append((None, payment.id, _("No billing cycle found for %s") % payment))
num_notattached = num_notattached + 1
sum_notattached = sum_notattached + payment.amount
log_message ="Processed %s payments total %.2f EUR. Unidentified payments: %s (%.2f EUR)" % \
(num_attached + num_notattached, sum_attached + sum_notattached, num_notattached, \
sum_notattached)
logger.info(log_message)
return_messages.append((None, None, log_message))
return return_messages
def process_op_csv(file_handle, user=None):
logger.info("Starting OP payment CSV processing...")
reader = OpDictReader(file_handle)
return process_payments(reader)
def process_procountor_csv(file_handle, user=None):
logger.info("Starting procountor payment CSV processing...")
reader = ProcountorDictReader(file_handle)
return process_payments(reader)
class Command(BaseCommand):
args = '<csvfile> [<csvfile> ...]'
help = 'Read a CSV list of payment transactions'
option_list = BaseCommand.option_list + (
make_option('--procountor',
dest='procountor',
default=None,
action="store_true",
help='Use procountor import csv format'),
)
def handle(self, *args, **options):
for csvfile in args:
logger.info("Starting the processing of file %s." %
os.path.abspath(csvfile))
# Exceptions of process_csv are fatal in command line run
with open(csvfile, 'r') as file_handle:
if options['procountor']:
process_procountor_csv(file_handle)
else:
process_op_csv(file_handle)
logger.info("Done processing file %s." % os.path.abspath(csvfile))
| 38.039344
| 104
| 0.589036
| 1,275
| 11,602
| 5.228235
| 0.253333
| 0.008401
| 0.006601
| 0.014401
| 0.180918
| 0.122412
| 0.085209
| 0.063306
| 0.051605
| 0.051605
| 0
| 0.00337
| 0.309516
| 11,602
| 304
| 105
| 38.164474
| 0.828735
| 0.122393
| 0
| 0.216981
| 0
| 0
| 0.162362
| 0.006582
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09434
| false
| 0.014151
| 0.075472
| 0.014151
| 0.349057
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
096c49cec3a4f594f36896910c20f3ffbf6d0451
| 1,962
|
py
|
Python
|
apps/site/api/serializers/dataset_serializer.py
|
LocalGround/localground
|
aa5a956afe7a84a7763a3b23d62a9fd925831cd7
|
[
"Apache-2.0"
] | 9
|
2015-05-29T22:22:20.000Z
|
2022-02-01T20:39:00.000Z
|
apps/site/api/serializers/dataset_serializer.py
|
LocalGround/localground
|
aa5a956afe7a84a7763a3b23d62a9fd925831cd7
|
[
"Apache-2.0"
] | 143
|
2015-01-22T15:03:40.000Z
|
2020-06-27T01:55:29.000Z
|
apps/site/api/serializers/dataset_serializer.py
|
LocalGround/localground
|
aa5a956afe7a84a7763a3b23d62a9fd925831cd7
|
[
"Apache-2.0"
] | 5
|
2015-03-16T20:51:49.000Z
|
2017-02-07T20:48:49.000Z
|
from localground.apps.site.api.serializers.base_serializer import \
BaseSerializer, NamedSerializerMixin, ProjectSerializerMixin
from localground.apps.site.api.serializers.field_serializer import \
FieldSerializer
from django.conf import settings
from rest_framework import serializers
from localground.apps.site import models
class DatasetSerializerList(
NamedSerializerMixin, ProjectSerializerMixin, BaseSerializer):
data_url = serializers.SerializerMethodField()
fields_url = serializers.SerializerMethodField()
def create(self, validated_data):
# Call the Dataset's custom create method, which creates
# 2 fields "for free": Name and Description:
description = serializers.CharField(
source='description', required=False, allow_null=True, label='description',
style={'base_template': 'textarea.html', 'rows': 5}, allow_blank=True
)
validated_data.update(self.get_presave_create_dictionary())
self.instance = models.Dataset.create(**validated_data)
return self.instance
class Meta:
model = models.Dataset
fields = BaseSerializer.field_list + \
('id', 'name', 'description', 'tags', 'url') + \
ProjectSerializerMixin.field_list + ('data_url', 'fields_url')
depth = 0
def get_data_url(self, obj):
return '%s/api/0/datasets/%s/data/' % (settings.SERVER_URL, obj.pk)
def get_fields_url(self, obj):
return '%s/api/0/datasets/%s/fields/' % (settings.SERVER_URL, obj.pk)
class DatasetSerializerDetail(DatasetSerializerList):
fields = serializers.SerializerMethodField('get_dataset_fields')
class Meta:
model = models.Dataset
fields = DatasetSerializerList.Meta.fields + ('fields',)
depth = 0
def get_dataset_fields(self, obj):
return FieldSerializer(
obj.fields, many=True,
context={'request': {}}).data
| 37.730769
| 87
| 0.690622
| 207
| 1,962
| 6.410628
| 0.386473
| 0.039186
| 0.042954
| 0.051997
| 0.183873
| 0.150716
| 0.045215
| 0.045215
| 0.045215
| 0
| 0
| 0.003849
| 0.205403
| 1,962
| 51
| 88
| 38.470588
| 0.847338
| 0.049439
| 0
| 0.153846
| 0
| 0
| 0.096133
| 0.029001
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.128205
| 0.076923
| 0.512821
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
096c52364e36a63ef84c11f7cd157e7b506deae2
| 1,447
|
py
|
Python
|
example/0_Basic_usage_of_the_library/python_pyppeteer/7_PageClass_Cookie.py
|
RecluseXU/learning_spider
|
45fa790ed7970be57a21b40817cc66856de3d99b
|
[
"MIT"
] | 38
|
2020-08-30T11:41:53.000Z
|
2022-03-23T04:30:26.000Z
|
example/0_Basic_usage_of_the_library/python_pyppeteer/7_PageClass_Cookie.py
|
AndersonHJB/learning_spider
|
b855b7808fb5268e9564180cf73ba5b1fb133f58
|
[
"MIT"
] | 2
|
2021-08-20T16:34:12.000Z
|
2021-10-08T11:06:41.000Z
|
example/0_Basic_usage_of_the_library/python_pyppeteer/7_PageClass_Cookie.py
|
AndersonHJB/learning_spider
|
b855b7808fb5268e9564180cf73ba5b1fb133f58
|
[
"MIT"
] | 10
|
2020-11-24T09:15:42.000Z
|
2022-02-25T06:05:16.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : 7_PageClass_Cookie.py
@Time : 2020-8-23 01:33:25
@Author : Recluse Xu
@Version : 1.0
@Contact : 444640050@qq.com
@Desc : 页面类 Page Class
官方文档:https://miyakogi.github.io/pyppeteer/reference.html#pyppeteer.page.Page.target
Page类提供了与标签交互的方法,一个浏览器可以有多个Page对象
'''
# here put the import lib
import asyncio
from pyppeteer import launch
async def main():
browser = await launch({
'headless': False,
'ignorehttpserrrors': True,
'viewport': {'width': 1280, 'height': 800},
'autoClose': True,
})
page = await browser.newPage()
await page.goto('http://www.baidu.com')
# Page.cookies(*urls) → dict
# 获取Cookie
# 如果指定url那就返回那个url的Cookie,没指定就返回当前页面Cookie
c = await page.cookies()
print(c)
# Page.deleteCookie(*cookies)
# 删除Cookie
# cookies可以填入的参数
# name (str): 必须传入
# url (str)
# domain (str)
# path (str)
# secure (bool)
await page.deleteCookie({'name': 'BAIDUID'})
# Page.setCookie(*cookies) → None[source]
# 设置Cookie
# 可选Cookie的参数:
# name (str): required
# value (str): required
# url (str)
# domain (str)
# path (str)
# expires (number): Unix time in seconds
# httpOnly (bool)
# secure (bool)
# sameSite (str): 'Strict' or 'Lax'
asyncio.get_event_loop().run_until_complete(main())
| 23.721311
| 87
| 0.601935
| 164
| 1,447
| 5.286585
| 0.70122
| 0.031142
| 0.027682
| 0.034602
| 0.05075
| 0.05075
| 0
| 0
| 0
| 0
| 0
| 0.030698
| 0.257084
| 1,447
| 61
| 88
| 23.721311
| 0.773953
| 0.56669
| 0
| 0
| 0
| 0
| 0.143098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09706c6eb6ce8046078f05dc861a923a4dfa7d00
| 736
|
py
|
Python
|
ML/Computer Vision/Lab7_face_detection_real_time.py
|
richeyphu/ITE-425
|
4210b692609fa04cdd00b76a45d9e1e5baacd6e3
|
[
"MIT"
] | null | null | null |
ML/Computer Vision/Lab7_face_detection_real_time.py
|
richeyphu/ITE-425
|
4210b692609fa04cdd00b76a45d9e1e5baacd6e3
|
[
"MIT"
] | null | null | null |
ML/Computer Vision/Lab7_face_detection_real_time.py
|
richeyphu/ITE-425
|
4210b692609fa04cdd00b76a45d9e1e5baacd6e3
|
[
"MIT"
] | null | null | null |
import cv2
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
#capture = cv2.VideoCapture(0)
capture = cv2.VideoCapture('Elon Musk 320.mp4')
while True:
_, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#faces = faceCascade.detectMultiScale(gray, 1.1, 4)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
#minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Image', frame)
keyboard = cv2.waitKey(30 & 0xff)
if keyboard==27:
break
capture.release()
| 30.666667
| 99
| 0.639946
| 94
| 736
| 4.946809
| 0.585106
| 0.043011
| 0.094624
| 0.154839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066318
| 0.221467
| 736
| 24
| 100
| 30.666667
| 0.745201
| 0.130435
| 0
| 0
| 0
| 0
| 0.092683
| 0.056911
| 0
| 0
| 0.006504
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0970b9ad7614a84f468d83f8de90de992c7f521f
| 1,110
|
py
|
Python
|
setup.py
|
tabac/cprofilev
|
dd9ee42ef8e68d08dbdde88ddce854aac55ef934
|
[
"MIT"
] | null | null | null |
setup.py
|
tabac/cprofilev
|
dd9ee42ef8e68d08dbdde88ddce854aac55ef934
|
[
"MIT"
] | null | null | null |
setup.py
|
tabac/cprofilev
|
dd9ee42ef8e68d08dbdde88ddce854aac55ef934
|
[
"MIT"
] | 1
|
2019-09-15T12:56:29.000Z
|
2019-09-15T12:56:29.000Z
|
from setuptools import setup
import sys
if sys.version_info < (2,5):
raise NotImplementedError(
"Sorry, you need at least Python 2.5 to use cprofilev.")
VERSION = '1.0.4'
__doc__ = """\
An easier way to use cProfile.
Outputs a simpler html view of profiled stats.
Able to show stats while the code is still running!
"""
setup(
name='CProfileV',
version=VERSION,
url='https://github.com/ymichael/cprofilev',
author='Michael Yong',
author_email='wrong92@gmail.com',
py_modules=['cprofilev'],
entry_points="""
[console_scripts]
cprofilev = cprofilev:main
""",
install_requires=["bottle"],
license='MIT',
description='An easier way to use cProfile',
long_description=__doc__,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Framework :: Bottle',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
]
)
| 24.130435
| 64
| 0.636937
| 125
| 1,110
| 5.536
| 0.704
| 0.021676
| 0.031792
| 0.037572
| 0.069364
| 0.069364
| 0
| 0
| 0
| 0
| 0
| 0.01182
| 0.237838
| 1,110
| 45
| 65
| 24.666667
| 0.806147
| 0
| 0
| 0
| 0
| 0
| 0.551351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054054
| 0
| 0.054054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09722db03d2e3d65cdf0b22fef132df0fab89e4d
| 5,947
|
py
|
Python
|
classification.py
|
Sigmoid-Frontsquat-LLC/classification-model-backend
|
7366302063315a245b7ab20219fb22ecf67bd377
|
[
"MIT"
] | null | null | null |
classification.py
|
Sigmoid-Frontsquat-LLC/classification-model-backend
|
7366302063315a245b7ab20219fb22ecf67bd377
|
[
"MIT"
] | null | null | null |
classification.py
|
Sigmoid-Frontsquat-LLC/classification-model-backend
|
7366302063315a245b7ab20219fb22ecf67bd377
|
[
"MIT"
] | null | null | null |
import sys # this is for extracting command line arguments.
def parse_activator(flag, value):
if flag[1] == 'a':
return (True, value)
else:
return (False,None)
pass
def parse_optimizer(flag, value):
if flag[1] == 'o':
return (True, value)
else:
return (False,None)
pass
def parse_source(flag, value):
if flag[1] == 's':
return (True, value)
else:
return (False,None)
pass
activator = ''
optimizer = ''
source = ''
if len(sys.argv) == 1 or (len(sys.argv) - 1) % 2 != 0:
raise ValueError("Usage: [-s image] [-a activator] [-o optimizer]")
else:
# could this be done better?
# sure, but this works for now...
for i in range(1, len(sys.argv) - 1):
flag = sys.argv[i]
value = sys.argv[i + 1]
isActivator, act = parse_activator(flag, value)
if isActivator:
if act != '-o':
activator = act
continue
isOptimizer, opt = parse_optimizer(flag, value)
if isOptimizer:
optimizer = opt
continue
isSource, so = parse_source(flag, value)
if isSource:
source = so
continue
pass
pass
# naive check to ensure no argument is left unfilled
if len(activator) == 0 or len(optimizer) == 0 or len(source) == 0 :
raise ValueError("Usage: [-s image] [-a activator] [-o optimizer]")
# exit(0)
############# Classification Logic ##################
import pandas as pd
import io
import requests
import numpy as np
import os
import logging
import json
import shutil
from sklearn.model_selection import train_test_split
from sklearn import metrics
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications.vgg16 import VGG16
from PIL import Image, ImageFile, ImageEnhance
from matplotlib.pyplot import imshow
import requests
from io import BytesIO
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
####### warning messages not printed #######
logging.disable(logging.WARNING)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# class labels are as follows for the cifar10
# airplane : 0
# automobile : 1
# bird : 2
# cat : 3
# deer : 4
# dog : 5
# frog : 6
# horse : 7
# ship : 8
# truck : 9
class_labels = ['airplane','automobile','bird','cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = 10
# Image preprocessing
img = Image.open(source)
img = img.resize((32,32))
enhancer = ImageEnhance.Sharpness(img)
enhanced_im = enhancer.enhance(10.0)
enhanced_im.save('resized.jpg')
img_array = np.asarray(enhanced_im)
img_array = img_array / 255
input_shape = (32,32,3)
# reshape for model
# original model was trained with (32,32,3)
img_array = img_array.reshape((1,32,32,3))
modelo = Sequential()
modelo.add(Conv2D(32, (3, 3), activation=activator, padding='same', input_shape=input_shape))
modelo.add(Conv2D(32, (3, 3), activation=activator, padding='same'))
modelo.add(Conv2D(32, (3, 3), activation=activator, padding='same'))
modelo.add(MaxPooling2D((3, 3)))
modelo.add(Dropout(0.2))
modelo.add(Conv2D(64, (3, 3), activation=activator, padding='same'))
modelo.add(Conv2D(64, (3, 3), activation=activator, padding='same'))
modelo.add(Conv2D(64, (3, 3), activation=activator, padding='same'))
modelo.add(MaxPooling2D((3, 3)))
modelo.add(Dropout(0.2))
modelo.add(Conv2D(128, (3, 3), activation=activator, padding='same'))
modelo.add(Conv2D(128, (3, 3), activation=activator, padding='same'))
modelo.add(MaxPooling2D((3, 3)))
modelo.add(Flatten())
modelo.add(Dense(128, activation=activator))
modelo.add(Dropout(0.2))
modelo.add(Dense(10, activation='softmax'))
modelo.compile(loss='categorical_crossentropy',optimizer=optimizer)
# validate the 'activator'
pass
# validate the 'optimizer'
pass
# Load weights based on activator and optimizer
# probably not needed as we are already passing the optimizer as a variable
if optimizer == 'adam':
# compile with adam
modelo.compile(loss='categorical_crossentropy',optimizer=optimizer)
# activator
if activator == 'relu':
# load adam-relu
modelo.load_weights('dnn/relu-adam2.hdf5')
elif activator == 'sigmoid':
# load sigmoid-adam
modelo.load_weights('dnn/sigmoid-adam2.hdf5')
elif activator == 'tanh':
# load tanh-adam
modelo.load_weights('dnn/tanh-adam2.hdf5')
else:
print('error')
elif optimizer == 'sgd':
# compile with sgd
modelo.compile(loss='categorical_crossentropy',optimizer=optimizer)
if activator == 'relu':
# load relu-sgd
modelo.load_weights('dnn/relu-sgd2.hdf5')
elif activator == 'sigmoid':
# load sigmoid-sgd
modelo.load_weights('dnn/sigmoid-sgd2.hdf5')
elif activator == 'tanh':
# load tanh-sgd
modelo.load_weights('dnn/tanh-sgd2.hdf5')
else:
print('error')
# Get the classification
############# classification ##############
pred = modelo.predict(img_array)
pred = pred[0]
pred_class = class_labels[np.argmax(pred)]
############# JSON ###############
# classification = {k:v for k,v in zip(class_labels,pred)}
classification = [
{
class_labels[0] : pred[0]
},
{
class_labels[1] : pred[1]
},
{
class_labels[2] : pred[2]
},
{
class_labels[3] : pred[3]
},
{
class_labels[4] : pred[4]
},
{
class_labels[5] : pred[5]
},
{
class_labels[6] : pred[6]
},
{
class_labels[7] : pred[7]
},
{
class_labels[8] : pred[8]
},
{
class_labels[9] : pred[9]
},
]
########## output ################
print(classification)
| 23.230469
| 102
| 0.636624
| 768
| 5,947
| 4.865885
| 0.276042
| 0.040942
| 0.032111
| 0.044956
| 0.386674
| 0.283918
| 0.249666
| 0.195879
| 0.18571
| 0.18571
| 0
| 0.033398
| 0.214562
| 5,947
| 255
| 103
| 23.321569
| 0.766645
| 0.144779
| 0
| 0.318471
| 0
| 0
| 0.092777
| 0.023398
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019108
| false
| 0.044586
| 0.146497
| 0
| 0.203822
| 0.019108
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0972614a80b05e57c1220dbf0ff54e2fa988f86e
| 7,658
|
py
|
Python
|
wizard/gui/destination_manager.py
|
Wizard-collab/wizard_2
|
a2cb23362e178a0205f6dd0b9b4328c329b5b142
|
[
"MIT"
] | 1
|
2021-10-13T15:07:32.000Z
|
2021-10-13T15:07:32.000Z
|
wizard/gui/destination_manager.py
|
Wizard-collab/wizard_2
|
a2cb23362e178a0205f6dd0b9b4328c329b5b142
|
[
"MIT"
] | null | null | null |
wizard/gui/destination_manager.py
|
Wizard-collab/wizard_2
|
a2cb23362e178a0205f6dd0b9b4328c329b5b142
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Author: Leo BRUNEL
# Contact: contact@leobrunel.com
# Python modules
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import pyqtSignal
import logging
# Wizard modules
from wizard.core import assets
from wizard.core import project
from wizard.vars import ressources
# Wizard gui modules
from wizard.gui import gui_utils
from wizard.gui import gui_server
logger = logging.getLogger(__name__)
class destination_manager(QtWidgets.QWidget):
def __init__(self, export_id, parent=None):
super(destination_manager, self).__init__(parent)
self.setWindowIcon(QtGui.QIcon(ressources._wizard_ico_))
self.setWindowTitle(f"Wizard - Destination manager")
self.references_ids = dict()
self.export_id = export_id
self.fill_thread = fill_thread(self)
self.build_ui()
self.connect_functions()
self.refresh()
def build_ui(self):
self.setMinimumSize(QtCore.QSize(800,500))
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setContentsMargins(0,0,0,0)
self.main_layout.setSpacing(0)
self.setLayout(self.main_layout)
self.header = QtWidgets.QWidget()
self.header.setObjectName('transparent_widget')
self.header_layout = QtWidgets.QHBoxLayout()
self.header_layout.setSpacing(6)
self.header.setLayout(self.header_layout)
self.main_layout.addWidget(self.header)
self.header_label = QtWidgets.QLabel()
self.header_layout.addWidget(self.header_label)
self.content_widget = QtWidgets.QWidget()
self.content_widget.setObjectName('dark_widget')
self.content_layout = QtWidgets.QVBoxLayout()
self.content_layout.setSpacing(6)
self.content_widget.setLayout(self.content_layout)
self.main_layout.addWidget(self.content_widget)
self.list_view = QtWidgets.QTreeWidget()
self.list_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.list_view.setObjectName('tree_as_list_widget')
self.list_view.setColumnCount(2)
self.list_view.setHeaderLabels(['Destination', 'Referenced version'])
self.list_view.header().resizeSection(0, 450)
self.list_view.setIndentation(0)
self.list_view.setAlternatingRowColors(True)
self.list_view.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.content_layout.addWidget(self.list_view)
self.buttons_widget = QtWidgets.QWidget()
self.buttons_widget.setObjectName('transparent_widget')
self.buttons_layout = QtWidgets.QHBoxLayout()
self.buttons_layout.setContentsMargins(0,0,0,0)
self.buttons_layout.setSpacing(6)
self.buttons_widget.setLayout(self.buttons_layout)
self.content_layout.addWidget(self.buttons_widget)
self.buttons_layout.addSpacerItem(QtWidgets.QSpacerItem(0,0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed))
self.remove_selection_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.remove_selection_button, "Remove selected references")
self.remove_selection_button.setFixedSize(35,35)
self.remove_selection_button.setIconSize(QtCore.QSize(25,25))
self.remove_selection_button.setIcon(QtGui.QIcon(ressources._tool_archive_))
self.buttons_layout.addWidget(self.remove_selection_button)
self.update_button = QtWidgets.QPushButton()
gui_utils.application_tooltip(self.update_button, "Update selected references")
self.update_button.setFixedSize(35,35)
self.update_button.setIconSize(QtCore.QSize(25,25))
self.update_button.setIcon(QtGui.QIcon(ressources._tool_update_))
self.buttons_layout.addWidget(self.update_button)
def connect_functions(self):
self.fill_thread.data_signal.connect(self.update_reference)
self.remove_selection_button.clicked.connect(self.remove_selection)
self.update_button.clicked.connect(self.update_selection)
def refresh(self):
self.header_label.setText(assets.instance_to_string(('export', self.export_id)))
reference_rows = project.get_references_by_export(self.export_id)
project_references_id = []
for reference_row in reference_rows:
project_references_id.append(reference_row['id'])
if reference_row['id'] not in self.references_ids.keys():
target_item = custom_target_item(reference_row, self.list_view.invisibleRootItem())
self.references_ids[reference_row['id']] = target_item
references_list_ids = list(self.references_ids.keys())
for reference_id in references_list_ids:
if reference_id not in project_references_id:
self.remove_reference_item(reference_id)
self.fill_thread.update_reference_rows(self.export_id, reference_rows)
def remove_reference_item(self, reference_id):
if reference_id in self.references_ids.keys():
item = self.references_ids[reference_id]
self.list_view.invisibleRootItem().removeChild(item)
del self.references_ids[reference_id]
def remove_selection(self):
selected_items = self.list_view.selectedItems()
for selected_item in selected_items:
project.remove_reference(selected_item.reference_row['id'])
gui_server.refresh_team_ui()
def update_selection(self):
selected_items = self.list_view.selectedItems()
for selected_item in selected_items:
reference_id = selected_item.reference_row['id']
assets.set_reference_last_version(reference_id)
gui_server.refresh_team_ui()
def update_reference(self, data_tuple):
if data_tuple[0] in self.references_ids.keys():
self.references_ids[data_tuple[0]].update(data_tuple)
class custom_target_item(QtWidgets.QTreeWidgetItem):
def __init__(self, reference_row, parent=None):
super(custom_target_item, self).__init__(parent)
self.reference_row = reference_row
bold_font=QtGui.QFont()
bold_font.setBold(True)
self.setFont(0, bold_font)
def update(self, data_tuple):
self.setText(0, data_tuple[1])
self.setText(1, data_tuple[2])
if data_tuple[3]:
self.setForeground(1, QtGui.QBrush(QtGui.QColor('#9ce87b')))
else:
self.setForeground(1, QtGui.QBrush(QtGui.QColor('#f79360')))
class fill_thread(QtCore.QThread):
data_signal = pyqtSignal(tuple)
def __init__(self, parent = None):
super(fill_thread, self).__init__(parent)
self.export_id = None
self.references_rows = []
self.running = False
def update_reference_rows(self, export_id, reference_rows):
self.references_rows = reference_rows
self.export_id = export_id
self.running = True
self.start()
def run(self):
if self.running:
default_export_version_id = project.get_default_export_version(self.export_id, 'id')
for reference_row in self.references_rows:
work_env_string = assets.instance_to_string(('work_env', reference_row['work_env_id']))
export_version_row = project.get_export_version_data(reference_row['export_version_id'])
if default_export_version_id != export_version_row['id']:
up_to_date = 0
else:
up_to_date = 1
self.data_signal.emit((reference_row['id'], work_env_string, export_version_row['name'], up_to_date))
| 42.076923
| 131
| 0.70619
| 921
| 7,658
| 5.563518
| 0.192182
| 0.021858
| 0.032787
| 0.034153
| 0.253513
| 0.162373
| 0.125683
| 0.083528
| 0.031616
| 0.031616
| 0
| 0.010616
| 0.200444
| 7,658
| 181
| 132
| 42.309392
| 0.826229
| 0.014625
| 0
| 0.069444
| 0
| 0
| 0.033294
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090278
| false
| 0
| 0.055556
| 0
| 0.173611
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0972acc5adf0761541464f2087b1feb90d1044ab
| 1,591
|
py
|
Python
|
algotrading/agents/six_month_cycle_agent.py
|
vrishank97/AlgoTrading
|
41dd44f73d97267283032ed433dd0bfb3bd6c638
|
[
"MIT"
] | 92
|
2018-12-21T11:21:17.000Z
|
2022-03-27T13:01:45.000Z
|
build/lib/algotrader/agents/six_month_cycle_agent.py
|
ajmal017/AlgoTrading-5
|
41dd44f73d97267283032ed433dd0bfb3bd6c638
|
[
"MIT"
] | 3
|
2018-12-19T16:33:36.000Z
|
2019-05-28T10:08:40.000Z
|
build/lib/algotrader/agents/six_month_cycle_agent.py
|
ajmal017/AlgoTrading-5
|
41dd44f73d97267283032ed433dd0bfb3bd6c638
|
[
"MIT"
] | 34
|
2019-05-28T21:31:51.000Z
|
2022-02-06T20:25:54.000Z
|
from .BaseAgent import BaseAgent
import pandas as pd
import numpy as np
from itertools import islice
class SixMonthCycle_Agent(BaseAgent):
def __init__(self, window_size, small, large, signal, up, down):
super().__init__(window_size)
self.up = up
self.down = down
self.large = large
self.small = small
self.signal = signal
self.window_size = window_size
def get_macd_signal(self):
memory_slice = list(islice(self.memory, self.window_size - self.large, self.window_size))
memory_slice = pd.DataFrame(memory_slice)
df_memory = pd.DataFrame(memory_slice)
df_macd = df_memory.ewm(span=self.small, adjust=False).mean() - df_memory.ewm(span=self.large, adjust=False).mean()
signal = df_macd.ewm(span=self.signal, adjust=False).mean()[0][self.large - 1]
macd = df_macd[0][self.large - 1]
if macd >= (1 + self.up)*(signal):
return "buy"
elif macd <= (1 - self.down)*(signal):
return "sell"
else:
return "hold"
def step(self, price, date):
self.memory.append(price)
if len(self.memory)<self.window_size:
return 0
date = list(map(int, date.split("-")))
month = date[1]
macd_signal = self.get_macd_signal()
# Buy in november
if month > 10 or month < 5 and macd_signal == "buy":
return 1
# Sell in may
if month > 4 and month < 11 and macd_signal == "sell":
return -1
# Hold
return 0
| 29.462963
| 123
| 0.588938
| 211
| 1,591
| 4.28436
| 0.298578
| 0.077434
| 0.077434
| 0.044248
| 0.14823
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015247
| 0.299183
| 1,591
| 54
| 124
| 29.462963
| 0.795516
| 0.020113
| 0
| 0.052632
| 0
| 0
| 0.012211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.105263
| 0
| 0.394737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0973f9ff4b18475410c9ec73581276a3c910551c
| 485
|
py
|
Python
|
modu/linear_regression/linear_regression_cost_plot.py
|
godong9/ml
|
2c735376f4366000685cd97de5df31aabc1c597e
|
[
"MIT"
] | null | null | null |
modu/linear_regression/linear_regression_cost_plot.py
|
godong9/ml
|
2c735376f4366000685cd97de5df31aabc1c597e
|
[
"MIT"
] | null | null | null |
modu/linear_regression/linear_regression_cost_plot.py
|
godong9/ml
|
2c735376f4366000685cd97de5df31aabc1c597e
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import matplotlib.pyplot as plt
X = [1, 2, 3]
Y = [1, 2, 3]
W = tf.placeholder(tf.float32)
hypothesis = X * W
cost = tf.reduce_mean(tf.square(hypothesis - Y))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
W_val = []
cost_val = []
for i in range(-30, 50):
feed_W = i * 0.1
curr_cost, curr_W = sess.run([cost, W], feed_dict={W: feed_W})
W_val.append(curr_W)
cost_val.append(curr_cost)
plt.plot(W_val, cost_val)
plt.show()
| 18.653846
| 66
| 0.665979
| 88
| 485
| 3.488636
| 0.454545
| 0.039088
| 0.019544
| 0.071661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035088
| 0.17732
| 485
| 26
| 67
| 18.653846
| 0.734336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09744b77cc03b6489272302bf793ec2a6a1a7ea2
| 2,078
|
py
|
Python
|
dyno_pods/test/test_multisampling.py
|
louisXW/PODS-DYNO
|
5cd3cced8f0556a5c42d9021ff1d965880f360dd
|
[
"MIT"
] | null | null | null |
dyno_pods/test/test_multisampling.py
|
louisXW/PODS-DYNO
|
5cd3cced8f0556a5c42d9021ff1d965880f360dd
|
[
"MIT"
] | 1
|
2022-03-24T18:17:50.000Z
|
2022-03-24T18:17:50.000Z
|
dyno_pods/test/test_multisampling.py
|
louisXW/PODS-DYNO
|
5cd3cced8f0556a5c42d9021ff1d965880f360dd
|
[
"MIT"
] | 1
|
2021-08-01T12:57:30.000Z
|
2021-08-01T12:57:30.000Z
|
"""
.. module:: test_multisampling
:synopsis: Test multisampling strategy
.. moduleauthor:: David Eriksson <dme65@cornell.edu>
"""
from pySOT import Ackley, CandidateDYCORS, GeneticAlgorithm, \
MultiStartGradient, SyncStrategyNoConstraints, \
RBFInterpolant, CubicKernel, LinearTail, \
SymmetricLatinHypercube, MultiSampling
from poap.controller import SerialController
import numpy as np
import os.path
import logging
def main():
if not os.path.exists("./logfiles"):
os.makedirs("logfiles")
if os.path.exists("./logfiles/test_multisampling.log"):
os.remove("./logfiles/test_multisampling.log")
logging.basicConfig(filename="./logfiles/test_multisampling.log",
level=logging.INFO)
print("\nNumber of threads: 1")
print("Maximum number of evaluations: 500")
print("Sampling method: CandidateDYCORS, Genetic Algorithm, Multi-Start Gradient")
print("Experimental design: Latin Hypercube")
print("Surrogate: Cubic RBF")
nthreads = 1
maxeval = 500
nsamples = nthreads
data = Ackley(dim=10)
print(data.info)
# Create a strategy and a controller
sampling_method = [CandidateDYCORS(data=data, numcand=100*data.dim),
GeneticAlgorithm(data=data), MultiStartGradient(data=data)]
controller = SerialController(data.objfunction)
controller.strategy = \
SyncStrategyNoConstraints(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=maxeval),
exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim + 1)),
sampling_method=MultiSampling(sampling_method, [0, 1, 0, 2]))
result = controller.run()
best, xbest = result.value, result.params[0]
print('Best value: {0}'.format(best))
print('Best solution: {0}\n'.format(
np.array_str(xbest, max_line_width=np.inf,
precision=5, suppress_small=True)))
if __name__ == '__main__':
main()
| 34.065574
| 95
| 0.682387
| 223
| 2,078
| 6.26009
| 0.502242
| 0.060888
| 0.053725
| 0.060172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015691
| 0.202599
| 2,078
| 60
| 96
| 34.633333
| 0.826795
| 0.076997
| 0
| 0
| 0
| 0
| 0.180628
| 0.051832
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.116279
| 0
| 0.139535
| 0.186047
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09745d72d59a6783162603fbdf15fcd5912b5ca1
| 1,172
|
py
|
Python
|
play_game.py
|
sanderland/SelfplayLab
|
4ce5b8ffd8cfb5465196dddaa0142b2843570b98
|
[
"MIT"
] | 2
|
2020-12-10T17:11:23.000Z
|
2021-05-09T04:14:00.000Z
|
play_game.py
|
sanderland/SelfplayLab
|
4ce5b8ffd8cfb5465196dddaa0142b2843570b98
|
[
"MIT"
] | null | null | null |
play_game.py
|
sanderland/SelfplayLab
|
4ce5b8ffd8cfb5465196dddaa0142b2843570b98
|
[
"MIT"
] | 2
|
2021-05-09T04:14:05.000Z
|
2021-05-09T04:14:34.000Z
|
import torch
import argparse
from selfplaylab.game.go import CaptureGoState, PixelCaptureGoState, GoState
from selfplaylab.game.gomoku import GoMokuState, GoMokuStateAugmented, TicTacToe, TicTacToeAugmented
from selfplaylab.game.nim import NimState
from selfplaylab.game.othello import OthelloState
from selfplaylab.play import play_game
parser = argparse.ArgumentParser(description="Self-play visualization.")
parser.add_argument("--game", type=str, help="Game to play")
parser.add_argument("--tag", type=str, help="Tag for experiment", default="")
args = parser.parse_args()
game = args.game
if game == "cg":
game_class = CaptureGoState
elif game == "pxcg":
game_class = PixelCaptureGoState
elif game == "nim":
game_class = NimState
elif game == "oth":
game_class = OthelloState
else:
raise Exception("unknown game")
net = game_class.create_net(tag=args.tag)
options = {}
print(f"Loaded net {net.metadata['filename']} on cuda? {net.device}")
temp_fn = lambda mv: 1.0 if mv < 2 else 0.1
with torch.no_grad():
game_states = play_game(
net_evaluator=net.evaluate_sample, game_class=game_class, temperature=temp_fn, verbose=True,
)
| 31.675676
| 100
| 0.75256
| 157
| 1,172
| 5.496815
| 0.477707
| 0.073001
| 0.088065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004926
| 0.133959
| 1,172
| 36
| 101
| 32.555556
| 0.84532
| 0
| 0
| 0
| 0
| 0
| 0.12628
| 0.022184
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.233333
| 0
| 0.233333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
09761ad36a8d0fda1e1934f1d5836f763526e5ae
| 558
|
py
|
Python
|
tests/test_version.py
|
rndazurescript/Coco2CustomVision
|
c189109413b185a77f5d1de51fb2dbcc96139ff6
|
[
"MIT"
] | null | null | null |
tests/test_version.py
|
rndazurescript/Coco2CustomVision
|
c189109413b185a77f5d1de51fb2dbcc96139ff6
|
[
"MIT"
] | 1
|
2022-02-23T13:01:38.000Z
|
2022-02-23T13:01:38.000Z
|
tests/test_version.py
|
rndazurescript/Coco2CustomVision
|
c189109413b185a77f5d1de51fb2dbcc96139ff6
|
[
"MIT"
] | null | null | null |
import re
def try_parse_int(s, base=10, val=None):
try:
return int(s, base)
except ValueError:
return val
def test_version():
"""Test version string"""
from coco2customvision import __version__
version_parts = re.split("[.-]", __version__)
if __version__ != "UNKNOWN":
assert 3 <= len(version_parts), "must have at least Major.minor.patch"
assert all(
not try_parse_int(i) is None for i in version_parts[:2]
), f"Version Major.minor must be 2 integers. Received {__version__}"
| 26.571429
| 78
| 0.641577
| 75
| 558
| 4.453333
| 0.586667
| 0.107784
| 0.065868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014354
| 0.250896
| 558
| 20
| 79
| 27.9
| 0.784689
| 0.03405
| 0
| 0
| 0
| 0
| 0.204503
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0976f09aff61c07c694ac80f44c6f37d65e2b8b2
| 1,266
|
py
|
Python
|
Python/Pages/ITProPage.py
|
hirokundayon/koedo
|
1d6fc0bb6045edb24253f039628104256896bd1a
|
[
"Apache-2.0"
] | 1
|
2019-02-04T15:13:51.000Z
|
2019-02-04T15:13:51.000Z
|
Python/Pages/ITProPage.py
|
hirokundayon/koedo
|
1d6fc0bb6045edb24253f039628104256896bd1a
|
[
"Apache-2.0"
] | null | null | null |
Python/Pages/ITProPage.py
|
hirokundayon/koedo
|
1d6fc0bb6045edb24253f039628104256896bd1a
|
[
"Apache-2.0"
] | 1
|
2018-02-26T15:12:04.000Z
|
2018-02-26T15:12:04.000Z
|
# -*- coding: utf-8 -*-
from Pages.PageObject import PageObject
import time
class ITProPage(PageObject):
firstHandle = ""
secondHandle = ""
def __init__(self, driver):
PageObject.__init__(self, driver)
def click_picture(self):
self.firstHandle = self.driver.window_handles[0]
picture =\
self.waiting_element_by_xpath("//img[@alt=\"小江戸らぐ\"]")
#self.driver.save_screenshot("C:\\home\\hirofumi\\koedo\\a.jpg")
self.click(picture)
for handle in self.driver.window_handles:
if handle != self.firstHandle:
self.secondHandle = handle
self.driver.switch_to_window(self.secondHandle)
picture =\
self.waiting_element_by_xpath("//img[@src=\"koedlug.jpg\"]")
time.sleep(5)
return self
def quit(self):
self.driver.switch_to_window(self.secondHandle)
self.driver.close()
self.driver.switch_to_window(self.firstHandle)
self.driver.quit()
def click_PC_button(self):
PC_button =\
self.waiting_element_by_xpath("//img[@src=\"/images/n/itpro/2010/leaf/btn_pc.gif\"]")
self.click(PC_button)
return self
| 32.461538
| 98
| 0.598736
| 143
| 1,266
| 5.076923
| 0.405594
| 0.137741
| 0.078512
| 0.082645
| 0.292011
| 0.292011
| 0.253444
| 0
| 0
| 0
| 0
| 0.007634
| 0.275671
| 1,266
| 38
| 99
| 33.315789
| 0.784079
| 0.066351
| 0
| 0.2
| 0
| 0
| 0.034211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
097a67824a1ea5f6c93e2208bf5602c06cf66bd7
| 9,891
|
py
|
Python
|
Chapter05/5B_MnA/5B_MnAPrediction.py
|
uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking
|
3a10a14194368478bb8b78d3d17e9c6a7b7253db
|
[
"MIT"
] | 115
|
2020-06-18T15:00:58.000Z
|
2022-03-02T10:13:19.000Z
|
Chapter05/5B_MnA/5B_MnAPrediction.py
|
uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking
|
3a10a14194368478bb8b78d3d17e9c6a7b7253db
|
[
"MIT"
] | 2
|
2020-11-06T11:02:31.000Z
|
2021-01-22T12:44:35.000Z
|
Chapter05/5B_MnA/5B_MnAPrediction.py
|
uyenphuong18406/Hands-On-Artificial-Intelligence-for-Banking
|
3a10a14194368478bb8b78d3d17e9c6a7b7253db
|
[
"MIT"
] | 60
|
2020-07-22T14:53:10.000Z
|
2022-03-23T10:17:59.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
QUANDLKEY = '<Enter your Quandl APT key here>'
"""
Created on Fri Oct 5 23:24:35 2018
@author: jeff
"""
'''*************************************
#1. Import libraries and define key variables
'''
import pandas as pd
import numpy as np
import quandl
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report,roc_curve, auc,confusion_matrix,f1_score
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
import pickle
import graphviz
#KPI keys
quandl.ApiConfig.api_key = QUANDLKEY
'''*************************************
#2. Definition of functions
'''
#2a.Download tickers
def download_tkr(tkr):
record_db_events_gp = pd.DataFrame()
record_db_financials=quandl.get_table('SHARADAR/SF1', calendardate={'gte': '2008-12-31'}, ticker=tkr, dimension='MRY')
record_db_financials['year'] = record_db_financials['reportperiod'].dt.year
record_db_financials['year_1'] = record_db_financials['year']+1
record_db_events=quandl.get_table('SHARADAR/EVENTS', ticker=tkr)
tmp_series = record_db_events['eventcodes'].str.contains('21')
record_db_events= record_db_events[tmp_series]
record_db_events['year'] = record_db_events.date.dt.year
record_db_events= record_db_events.drop(['date'],axis=1)
record_db_events_gp = record_db_events.groupby(['ticker','year'],as_index=False).count()
combined_pd = pd.merge(record_db_financials,record_db_events_gp,how ='left',left_on='year_1',right_on='year')
#convert all events to 1 and NaN
combined_pd.loc[combined_pd['eventcodes']>1,'eventcodes'] = 1
X = record_db_financials.iloc[:,6:-5]
Y = combined_pd.iloc[:,-1]
return combined_pd, X, Y
#tkr = 'AMZN'
#df_tmp = download_tkr(tkr)
#2b.Train tree
def train_tree(X,Y,ind):
print('Decision Tree')
#split the dataset into training set and testing set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.33, random_state=0)
min_leaf_size = int(len(X_train) * 0.01)
tree_clf = tree.DecisionTreeClassifier(min_samples_leaf=min_leaf_size)
#preprocessing the data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#fit the training data to the model
tree_clf.fit(X_train,Y_train)
##metric 1: roc
Y_score_tree = tree_clf.predict(X_test)
fpr, tpr, thresholds = roc_curve(Y_test,Y_score_tree, pos_label=1)
roc_auc = auc(fpr,tpr)
lw=2
plt.figure()
plt.plot(fpr,tpr,color='darkorange',lw=lw,label='ROC curve (area = %0.2f)' %roc_auc)
plt.plot([0,1],[0,1],color='navy',lw=lw,linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic - Decision Tree '+ind)
plt.legend(loc="lower right")
plt.savefig(ind+'_DT.png')
##metric 2: Confusion matrix
Y_pred_tree = tree_clf.predict(X_test)
confusion_matrix_tree = confusion_matrix(Y_test, Y_pred_tree)
print(confusion_matrix_tree)
print(classification_report(Y_test, Y_pred_tree))
#common standard to compare across models
f1_clf = f1_score(Y_test, Y_pred_tree, average='weighted')
##save model
f_tree = open(ind+'_tree_clf.pkl',"wb+")
pickle.dump(tree_clf, f_tree)
f_tree.close()
f_tree_sc = open(ind+'_tree_scaler.pkl',"wb+")
pickle.dump(scaler, f_tree_sc)
f_tree_sc.close()
return tree_clf,f1_clf
##2C Neural Network
#2Ci. Grid search that simulate the performance of different neural network design
def grid_search(X_train,X_test, Y_train,Y_test,num_training_sample):
best_f1 = 0
best_hidden_layers_list = []
best_hidden_layers_tuple = ()
#various depth
for depth in range(1,5):
print('Depth = '+str(depth))
for layer_size in range(1,8):
neuron_cnt = 0
hidden_layers_list = []
i = 0
while i<depth:
hidden_layers_list.append(layer_size)
neuron_cnt += layer_size
i+=1
#pruning - to avoid over-training
if num_training_sample<neuron_cnt:
break
hidden_layers_tuple = tuple(hidden_layers_list)
nn_clf = MLPClassifier(alpha=1e-5,
hidden_layer_sizes=hidden_layers_tuple, random_state=1)
nn_clf.fit(X_train,Y_train)
Y_pred = nn_clf.predict(X_test)
temp_f1 = f1_score(Y_test, Y_pred, average='weighted')
if temp_f1 > best_f1:
best_f1 = temp_f1
best_hidden_layers_list = hidden_layers_list
best_hidden_layers_tuple = hidden_layers_tuple
print(best_hidden_layers_list)
return best_hidden_layers_list,best_hidden_layers_tuple
#2Cii. Train Neural Network
def train_NN(X,Y,ind):
print('Neural Network')
#split the dataset into training set and testing set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.33, random_state=0)
#preprocessing the data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
num_training_sample = len(X_train)
best_hidden_layers_list,best_hidden_layers_tuple = grid_search(X_train, X_test, Y_train, Y_test,num_training_sample)
nn_clf = MLPClassifier(alpha=1e-5,
hidden_layer_sizes=best_hidden_layers_tuple, random_state=1)
#fit the training data to the model
nn_clf.fit(X_train,Y_train)
##metric 1: roc
Y_score_nn = nn_clf.predict(X_test)
fpr, tpr, thresholds = roc_curve(Y_test,Y_score_nn, pos_label=1)
roc_auc = auc(fpr,tpr)
lw=2
plt.figure()
plt.plot(fpr,tpr,color='darkorange',lw=lw,label='ROC curve (area = %0.2f)' %roc_auc)
plt.plot([0,1],[0,1],color='navy',lw=lw,linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic - Neural Network '+ind)
plt.legend(loc="lower right")
#plt.show()
plt.savefig(ind+'_NN.png')
##metric 2: Confusion matrix
Y_pred_tree = nn_clf.predict(X_test)
confusion_matrix_tree = confusion_matrix(Y_test, Y_pred_tree)
print(confusion_matrix_tree)
print(classification_report(Y_test, Y_pred_tree))
#common standard to compare across models
#f1_clf = f1_score(Y_test, Y_score_nn, average='binary')
f1_clf = f1_score(Y_test, Y_score_nn, average='weighted')
##save model
f_nn = open(ind+'_nn_clf_.pkl',"wb+")
pickle.dump(nn_clf, f_nn)
f_nn.close()
f_nn_sc = open(ind+'_nn_scaler.pkl',"wb+")
pickle.dump(scaler, f_nn_sc)
f_nn_sc.close()
return nn_clf, f1_clf
'''*************************************
3. Execute the program
#3a. filter the industry in scope
'''
groupby_fld = 'sicsector'
min_size = 30
df_tkr = pd.read_csv('industry_tickers_list.csv')
dict_ind_tkr = {}
f1_list = []
df_tkr_ind = pd.DataFrame()
df_tkr_ind['cnt'] = df_tkr.groupby(groupby_fld)['ticker'].count()
df_tkr_ind_select = df_tkr_ind[df_tkr_ind['cnt']>=min_size]
list_scope = list(df_tkr_ind_select.index)
#collect ticker in each industry
for index, row in df_tkr.iterrows():
ind = row[groupby_fld]
tkr = row['ticker']
if ind in list_scope:
if ind in dict_ind_tkr:
dict_ind_tkr[ind].append(tkr)
else:
dict_ind_tkr[ind] = [tkr]
#loop through the dictionary - one industry at a time
for ind, list_tkr in dict_ind_tkr.items():
df_X = pd.DataFrame({})
df_Y = pd.DataFrame({})
print(ind)
#Go through the ticker list to Download data from source
#loop through tickers from that industry
for tkr in list_tkr:
print(tkr)
try:
df_tmp,X_tmp,Y_tmp = download_tkr(tkr)
except Exception:
continue
if len(df_X)==0:
#df_all = df_tmp
df_X = X_tmp
df_Y = Y_tmp
else:
#df_all = pd.concat([df_all,df_tmp])
df_X = pd.concat([df_X,X_tmp])
df_Y = pd.concat([df_Y,Y_tmp])
'''
*************************************
3b. prepare features for clustering for the industry
'''
#convert to float and calc the difference across rows
df_X = df_X.astype(float)
df_Y = df_Y.astype(float)
#remove zero records
df_X = df_X.replace([np.inf ], 999999999)
df_X = df_X.fillna(0)
df_Y = df_Y.fillna(0)
#neural network
nn_clf,f1_score_temp = train_NN(df_X,df_Y,ind)
f1_list.append(f1_score_temp)
nn_clf.get_params()
#decision tree
try:
tree_clf,f1_score_temp = train_tree(df_X,df_Y,ind)
except Exception:
continue
f1_list.append(f1_score_temp)
tree_clf.get_params()
'''
#3c. Visualize the result
'''
fields_list = df_tmp.columns
print('********************')
print('f1 of the models')
print(f1_list)
print('********************')
#for visualization of decision tree
x_feature_name = fields_list[6:-8]
y_target_name = fields_list[-1]
d_tree_out_file = 'decision_tree_'+ind
dot_data = tree.export_graphviz(tree_clf, out_file=None,
feature_names=x_feature_name,
class_names=y_target_name,
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.render(d_tree_out_file)
| 32.860465
| 126
| 0.64958
| 1,461
| 9,891
| 4.104723
| 0.216975
| 0.025346
| 0.028014
| 0.011006
| 0.428548
| 0.393864
| 0.354511
| 0.307654
| 0.275805
| 0.261798
| 0
| 0.019256
| 0.22293
| 9,891
| 300
| 127
| 32.97
| 0.760994
| 0.114953
| 0
| 0.26943
| 0
| 0
| 0.085499
| 0.003036
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020725
| false
| 0
| 0.056995
| 0
| 0.098446
| 0.072539
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|