hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
95128ff73c5b19e12278311e5737397a3c5afe40
| 6,943
|
py
|
Python
|
infrastructure/cdn-in-a-box/ort/traffic_ops_ort/utils.py
|
hbeatty/incubator-trafficcontrol
|
13ed991531778c60298eb8f532b2a4862f7cb67b
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2021-04-11T16:55:27.000Z
|
2021-04-11T16:55:27.000Z
|
infrastructure/cdn-in-a-box/ort/traffic_ops_ort/utils.py
|
hbeatty/incubator-trafficcontrol
|
13ed991531778c60298eb8f532b2a4862f7cb67b
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 3
|
2021-03-12T22:35:02.000Z
|
2021-12-09T23:00:11.000Z
|
infrastructure/cdn-in-a-box/ort/traffic_ops_ort/utils.py
|
hbeatty/incubator-trafficcontrol
|
13ed991531778c60298eb8f532b2a4862f7cb67b
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains miscellaneous utilities, typically dealing with string
manipulation or user input/output
"""
import logging
from sys import stderr
import requests
import typing
def getYesNoResponse(prmpt:str, default:str = None) -> bool:
"""
Utility function to get an interactive yes/no response to the prompt `prmpt`
:param prmpt: The prompt to display to users
:param default: The default response; should be one of ``'y'``, ``"yes"``, ``'n'`` or ``"no"``
(case insensitive)
:raises AttributeError: if 'prmpt' and/or 'default' is/are not strings
:returns: the parsed response as a boolean
"""
if default:
prmpt = prmpt.rstrip().rstrip(':') + '['+default+"]:"
while True:
choice = input(prmpt).lower()
if choice in {'y', 'yes'}:
return True
if choice in {'n', 'no'}:
return False
if not choice and default is not None:
return default.lower() in {'y', 'yes'}
print("Please enter a yes/no response.", file=stderr)
def getTextResponse(uri:str, cookies:dict = None, verify:bool = True) -> str:
"""
Gets the plaintext response body of an HTTP ``GET`` request
:param uri: The full path to a resource for the request
:param cookies: An optional dictionary of cookie names mapped to values
:param verify: If :const:`True`, the SSL keys used to communicate with the full URI will be
verified
:raises ConnectionError: when an error occurs trying to communicate with the server
:raises ValueError: if the server's response cannot be interpreted as a UTF-8 string - e.g.
when the response body is raw binary data but the response headers claim it's UTF-16
"""
logging.info("Getting plaintext response via 'HTTP GET %s'", uri)
response = requests.get(uri, cookies=cookies, verify=verify)
if response.status_code not in range(200, 300):
logging.warning("Status code (%d) seems to indicate failure!", response.status_code)
logging.debug("Response: %r\n%r", response.headers, response.content)
return response.text
def getJSONResponse(uri:str, cookies:dict = None, verify:bool = True) -> dict:
"""
Retrieves a JSON object from some HTTP API
:param uri: The URI to fetch
:param cookies: A dictionary of cookie names mapped to values
:param verify: If this is :const:`True`, the SSL keys will be verified during handshakes with
'https' URIs
:returns: The decoded JSON object
:raises ConnectionError: when an error occurs trying to communicate with the server
:raises ValueError: when the request completes successfully, but the response body
does not represent a JSON-encoded object.
"""
logging.info("Getting JSON response via 'HTTP GET %s", uri)
try:
response = requests.get(uri, cookies=cookies, verify=verify)
except (ValueError, ConnectionError, requests.exceptions.RequestException) as e:
raise ConnectionError from e
if response.status_code not in range(200, 300):
logging.warning("Status code (%d) seems to indicate failure!", response.status_code)
logging.debug("Response: %r\n%r", response.headers, response.content)
return response.json()
def parse_multipart(raw: str) -> typing.List[typing.Tuple[str, str]]:
"""
Parses a multipart/mixed-type payload and returns each contiguous chunk.
:param raw: The raw payload - without any HTTP status line.
:returns: A list where each element is a tuple where the first element is a chunk of the message. All headers are discarded except 'Path', which is the second element of each tuple if it was found in the chunk.
:raises: ValueError if the raw payload cannot be parsed as a multipart/mixed-type message.
>>> testdata = '''MIME-Version: 1.0\\r
... Content-Type: multipart/mixed; boundary="test"\\r
... \\r
... --test\\r
... Content-Type: text/plain; charset=us-ascii\\r
... Path: /path/to/ats/root/directory/etc/trafficserver/fname\\r
... \\r
... # A fake testing file that wasn't generated at all on some date
... CONFIG proxy.config.way.too.many.period.separated.words INT 1
...
... --test\\r
... Content-Type: text/plain; charset=utf8\\r
... Path: /path/to/ats/root/directory/etc/trafficserver/othername\\r
... \\r
... # The same header again
... CONFIG proxy.config.the.same.insane.chain.of.words.again.but.the.last.one.is.different INT 0
...
... --test--\\r
... '''
>>> output = parse_multipart(testdata)
>>> print(output[0][0])
# A fake testing file that wasn't generated at all on some date
CONFIG proxy.config.way.too.many.period.separated.words INT 1
>>> output[0][1]
'/path/to/ats/root/directory/etc/trafficserver/fname'
>>> print(output[1][0])
# The same header again
CONFIG proxy.config.the.same.insane.chain.of.words.again.but.the.last.one.is.different INT 0
>>> output[1][1]
'/path/to/ats/root/directory/etc/trafficserver/othername'
"""
try:
hdr_index = raw.index("\r\n\r\n")
headers = {line.split(':')[0].casefold(): line.split(':')[1] for line in raw[:hdr_index].splitlines()}
except (IndexError, ValueError) as e:
raise ValueError("Invalid or corrupt multipart header") from e
ctype = headers.get("content-type")
if not ctype:
raise ValueError("Message is missing 'Content-Type' header")
try:
param_index = ctype.index(";")
params = {param.split('=')[0].strip(): param.split('=')[1].strip() for param in ctype[param_index+1:].split(';')}
except (IndexError, ValueError) as e:
raise ValueError("Invalid or corrupt 'Content-Type' header") from e
boundary = params.get("boundary", "").strip('"\'')
if not boundary:
raise ValueError("'Content-Type' header missing 'boundary' parameter")
chunks = raw.split(f"--{boundary}")[1:] # ignore prologue
if chunks[-1].strip() != "--":
logging.warning("Final chunk appears invalid - possible bad message payload")
else:
chunks = chunks[:-1]
ret = []
for i, chunk in enumerate(chunks):
try:
hdr_index = chunk.index("\r\n\r\n")
headers = {line.split(':')[0].casefold(): line.split(':')[1] for line in chunk[:hdr_index].splitlines() if line}
except (IndexError, ValueError) as e:
logging.debug("chunk: %s", chunk)
raise ValueError(f"Chunk #{i} poorly formed") from e
ret.append((chunk[hdr_index+4:].replace("\r","").strip(), headers.get("path").strip()))
return ret
| 38.572222
| 211
| 0.715109
| 1,039
| 6,943
| 4.766121
| 0.297401
| 0.015549
| 0.01454
| 0.010501
| 0.353998
| 0.340469
| 0.331583
| 0.318659
| 0.263732
| 0.246365
| 0
| 0.007644
| 0.152096
| 6,943
| 179
| 212
| 38.78771
| 0.833532
| 0.55826
| 0
| 0.230769
| 0
| 0
| 0.190796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.061538
| 0
| 0.215385
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9514c9647a31509619c43b943b315ef73a1f481a
| 1,192
|
py
|
Python
|
tests/test_hw02.py
|
timm/sinless-swe
|
b331b9bf4d27fdf357ce8a5ce54f9858103fd64f
|
[
"MIT"
] | null | null | null |
tests/test_hw02.py
|
timm/sinless-swe
|
b331b9bf4d27fdf357ce8a5ce54f9858103fd64f
|
[
"MIT"
] | null | null | null |
tests/test_hw02.py
|
timm/sinless-swe
|
b331b9bf4d27fdf357ce8a5ce54f9858103fd64f
|
[
"MIT"
] | 2
|
2021-08-29T19:26:19.000Z
|
2021-09-20T17:44:27.000Z
|
import os
import sys
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/.."))
from src.hw2 import csv_reader
def testCsvReader():
expectedResult = [['outlook', 'Temp', '?Humidity', 'windy', 'Wins+', 'Play-'],
['sunny', 85, 85, 'FALSE', 10, 20],
['sunny', 80, 90, 'TRUE', 12, 40],
['overcast', 83, 86, 'FALSE', 40, 40],
['rainy', 70, 96, 'FALSE', 40, 50],
['rainy', 65, 70, 'TRUE', 4, 10],
['overcast', 64, 65, 'TRUE', 30, 60],
['sunny', 72, 95, 'FALSE', 7, 20],
['sunny', 69, 70, 'FALSE', 70, 70],
['rainy', 75, 80, 'FALSE', 80, 40],
['sunny', 75, 70, 'TRUE', 30, 50],
['overcast', 72, 90, 'TRUE', 60, 50],
['overcast', 81, 75, 'FALSE', 30, 60],
['rainy', 71, 91, 'TRUE', 50, 40]]
dataPath = os.path.dirname(os.path.abspath(__file__))
dataPath = dataPath[:dataPath.rindex("/")]
result = csv_reader("data/windy.csv")
for i,row in enumerate(result):
assert row == expectedResult[i]
| 45.846154
| 82
| 0.452181
| 135
| 1,192
| 3.918519
| 0.466667
| 0.045369
| 0.049149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132732
| 0.348993
| 1,192
| 25
| 83
| 47.68
| 0.548969
| 0
| 0
| 0
| 0
| 0
| 0.158557
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9514f668db331c946ecbf660cfa6375f54adec5b
| 2,462
|
py
|
Python
|
hyperdeck.py
|
FlantasticDan/hyperdeck-replay
|
5d5a62c9342c4e552e6a2d44dbe85cb3dba49f28
|
[
"MIT"
] | 1
|
2021-09-06T15:02:34.000Z
|
2021-09-06T15:02:34.000Z
|
hyperdeck.py
|
FlantasticDan/hyperdeck-replay
|
5d5a62c9342c4e552e6a2d44dbe85cb3dba49f28
|
[
"MIT"
] | null | null | null |
hyperdeck.py
|
FlantasticDan/hyperdeck-replay
|
5d5a62c9342c4e552e6a2d44dbe85cb3dba49f28
|
[
"MIT"
] | null | null | null |
from telnetlib import Telnet
from threading import Thread
class Hyperdeck:
def __init__(self, ip_address, id) -> None:
self.deck = Telnet(ip_address, 9993)
self.id = id
self.thread = Thread(target=self.listener)
self.thread.start()
def listener(self):
while True:
message = self.deck.read_some()
print(f'//{self.id}//')
print(message)
def identify_standard_command(self, command):
if command == 'live':
return 'preview: enable: true'
elif command == 'clip':
return 'preview: enable: false\r\nplayrange clear'
elif command == 'record':
return 'record'
elif command == 'play':
return 'play: single clip: true'
elif command == 'stop':
return 'stop'
elif command == 'previous':
return 'goto: clip id: -1'
elif command == 'next':
return 'goto: clip id: +1'
elif command == 'beginning':
return 'goto: clip: start'
elif command == 'end':
return 'goto: clip: end'
def identify_granular_command(self, command, direction):
if direction == 'forward':
sign = '+'
else:
sign = '-'
if command == '10%':
return f'play: single clip: true speed: {sign}10'
elif command == '25%':
return f'play: single clip: true speed: {sign}25'
elif command == '50%':
return f'play: single clip: true speed: {sign}50'
elif command == '75%':
return f'play: single clip: true speed: {sign}75'
elif command == '10s':
return f'jog: timecode: {sign}00:00:10:00'
elif command == '5s':
return f'jog: timecode: {sign}00:00:05:00'
elif command == '1s':
return f'jog: timecode: {sign}00:00:01:00'
elif command == '1f':
return f'jog: timecode: {sign}00:00:00:01'
def send_standard_command(self, command):
identified_command = self.identify_standard_command(command)
query = bytes(f'{identified_command}\r\n', 'ascii')
self.deck.write(query)
def send_granular_command(self, command, direction):
identified_command = self.identify_granular_command(command, direction)
query = bytes(f'{identified_command}\r\n', 'ascii')
self.deck.write(query)
| 34.676056
| 79
| 0.553209
| 283
| 2,462
| 4.731449
| 0.272085
| 0.123226
| 0.052278
| 0.067214
| 0.352502
| 0.300224
| 0.300224
| 0.180732
| 0.079164
| 0.079164
| 0
| 0.035308
| 0.321284
| 2,462
| 71
| 80
| 34.676056
| 0.766008
| 0
| 0
| 0.065574
| 0
| 0
| 0.240357
| 0.019488
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098361
| false
| 0
| 0.032787
| 0
| 0.42623
| 0.032787
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
951662a92b08b48e3775881d06dfdde6053f3486
| 453
|
py
|
Python
|
leetcode/weekly154/balloons.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | 2
|
2018-01-18T11:01:36.000Z
|
2021-12-20T18:14:48.000Z
|
leetcode/weekly154/balloons.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
leetcode/weekly154/balloons.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
'''
https://leetcode.com/contest/weekly-contest-154/problems/maximum-number-of-balloons/
'''
class Solution:
def maxNumberOfBalloons(self, text: str) -> int:
m = {}
for c in text:
if c not in m: m[c] = 0
m[c] += 1
ans = len(text)
for c in 'lo':
if c in m: m[c] //= 2
for c in 'balon':
if c in m: ans = min(ans, m[c])
else: ans = 0
return ans
| 28.3125
| 84
| 0.479029
| 68
| 453
| 3.191176
| 0.514706
| 0.069124
| 0.082949
| 0.046083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024911
| 0.379691
| 453
| 16
| 85
| 28.3125
| 0.747331
| 0.18543
| 0
| 0
| 0
| 0
| 0.019337
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9518a93eb1a74edc2a091b88692ed0896329bfe9
| 38,343
|
py
|
Python
|
fraudbot.py
|
DocGrishka/tetstsss
|
9e594333306e6ea8c13f0c81aa5ccb05bc7e9e5e
|
[
"MIT"
] | null | null | null |
fraudbot.py
|
DocGrishka/tetstsss
|
9e594333306e6ea8c13f0c81aa5ccb05bc7e9e5e
|
[
"MIT"
] | null | null | null |
fraudbot.py
|
DocGrishka/tetstsss
|
9e594333306e6ea8c13f0c81aa5ccb05bc7e9e5e
|
[
"MIT"
] | null | null | null |
import discord
import sqlite3
import random
import requests
import pymorphy2
from itertools import product
# база, в которой будут храниться заработанные очки и статус отношений бота с пользователем - играет оно и во что,
# или просто общается
class Bnc:
def __init__(self):
random.seed(self.generate_answer())
self.attempt = self.k = 0
# создаем список всех возможных чисел.
self.everything = ["".join(x) for x in product('0123456789', repeat=4)
if len(set(x)) == len(x)]
self.answer = self.generate_answer()
# таким образом мы еще и перемешиваем все числа. кроме того, из массива их удобнее удалять.
self.guess_space = set(self.everything)
# здесь храним историю попыток бота.
self.historys = []
# а здесь храним историю попыток игрока.
self.history = []
def is_compatible(self, guess):
# проверка на то, подходит ли нам это число, на основе всех предыдущих попыток.
return all(self.bulls_n_cows(guess, previous_guess) == (bulls, cows)
for previous_guess, bulls, cows in self.historys)
@staticmethod
# возвращает быки и коровы, сравнивая 2 числа
def bulls_n_cows(attempt, answer):
bulls = sum(1 for x, y in zip(attempt, answer) if x == y)
cows = len(set(attempt) & set(answer)) - int(bulls)
return bulls, cows
@staticmethod
def bulls_n_cows_morph(bulls, cows):
# возвращает быков и коров в более удобной форме для передачи игроку.
morph = pymorphy2.MorphAnalyzer()
cows = str(cows) + ' ' + morph.parse('корова')[0].make_agree_with_number(int(cows)).word
bulls = str(bulls) + ' ' + morph.parse('бык')[0].make_agree_with_number(int(bulls)).word
return bulls, cows
@staticmethod
# генерирует число
def generate_answer():
n = [i for i in range(10)]
number = []
for _ in range(4):
a = n.pop(random.choice(range(len(n))))
number.append(str(a))
return ''.join(number)
def cheat(self, player_try):
max_score = 0
best_answer = self.answer
for new_answer in self.everything:
score = 12.0
error = True
while error:
if self.history:
for i in self.history:
if self.bulls_n_cows(i[0], new_answer) != [i[1], i[2]]:
score = 0
error = False
break
error = False
else:
break
bulls, cows = self.bulls_n_cows(new_answer, player_try)
score -= bulls * 3 + cows
if bulls + cows == 0:
score -= 5.1
if max_score < score:
best_answer = new_answer
max_score = score
return best_answer
class Fraudbot(discord.Client):
def __init__(self, **options):
super().__init__(**options)
# в базе хранятся данные о пользователях, user_id, points, state.
# Первое это идентификатор, второе это очки, а третье - то, чем сейчас занимается бот с игроками.
self.con = sqlite3.connect("users.db")
# это все игры, которые доступны, в формате: команду на вызов игры - описание игры
games = '/быки и коровы - математическая игра, в двух изданиях: в одиночку и против бота\n' \
'/крестики-нолики - классические крестики-нолики с 3 уровнями сложности\n' \
'/сапер - классический сапер, размер поля варьируется от 5 на 5, до 26 на 26 клеток\n' \
'/камень-ножницы-бумага - классические... камень-ножницы-бумага!\n' \
'/кости - вы делаете ставки на сумму выброшенных ботом костей\n\n' \
'Более подробные правила игр описаны внутри каждой из них. Пусть Фортуна будет благосклонна' \
' к вам!'
# это база откуда мы будем брать реакции на разные фразы.
self.dialog_base = {'/игры': 'Вот список моих игр: \n' + games,
'/привет': 'Здравствуйте! Я Fraudbot. Я представляю математические игры, то есть игры,'
' где используется математическое мышление. Команда "/игры" -- '
'здесь описаны мои игры и команды для их вызова.\nКоманда "/помощь" -- '
'используйте ее, если возникнут вопросы или проблемы.',
'/помощь': 'Если у вас возник вопрос, проблема, или у вас есть какая-то идея'
' -- пишите на адрес fraudbot.help@mail.ru'}
self.commands = ['/помощь', '/игры', '/привет'] + [g.split(' - ')[0] for g in games.split('\n')]
# после перезапуска бот должен будет предупредить пользователей, что все их диалоги были прекращены.
self.reconnect = {}
async def on_message(self, message):
# не даем отвечать самому себе
if message.author == self.user:
return
# user_gambler - объект класса Member и служит для проверки в функции check(m).
user_gambler = message.author
# user_player - идентификатор пользователя, нужен для обращения к нему и для занесения в базу.
user_player = str(user_gambler).replace('#', '')
# user_channel - канал, на котором был запущено общение.
user_channel = message.channel
# в базе данных лежит имя сервера и канал. если пользователь общается с ботом в личных сообщениях,
# то сервера нет и мы записываем только канал.
try:
user_chan_guild = str(user_channel.guild.id) + str(user_channel.id)
except AttributeError:
user_chan_guild = str(user_channel.id)
# прекращает все взаимодействия с ботом по команде.
if message.content == '/стоп':
await self.db_edit(user_player, 'empty')
await message.channel.send(user_player + ", вы прервали все взаимодействия с ботом.")
# если бот был запущен первый раз, или перезапущен
if user_player in self.reconnect and self.reconnect[user_player]:
# 1 условие проверяет, что пользователь уже общался с ботом
await message.channel.send(f"Извините, {user_player}, произошел перезапуск бота. Приносим извинения"
f" за причиненные неудобства. Все диалоги были досрочно прекращены.")
# если пользователя нет в базе
if self.user_status(user_player) == 'None':
# поприветствуем нового пользователя и добавим его в базу. Добавление в базу происходит автоматически,
await message.channel.send(f'Приветствую, {user_player}! Я Fraudbot и у меня 3 основных команды:\n\t/'
f'привет\t|\t/игры\t|\t/помощь\nВы можете отправить любую из них. Более '
f'подробное приветствие уже отправлено вам в личные сообщения.')
# также отправляем ему сообщение в личный канал.
await self.pm_greet(user_gambler)
# вместе со сменой статуса в конце функции. Но пользователь мог первым сообщением сразу отправить команду
# и поэтому статус меняется перед проверкой на то, что сообщение является командой.
await self.db_edit(user_player, 'empty')
# если пользователь "свободен" от наших игр или диалога
if self.user_status(user_player) == 'empty':
for i in self.dialog_base:
if message.content == i:
await message.channel.send(self.dialog_base[i])
# если игрок не "свободен", и при этом пишет с другого канала - говорим ему об этом
# и не даем запустить еще один процесс.
else:
# также проверяем - не написал ли он в другой чат просто так, не нам.(проверка на наличие нашей команды)
if self.user_status(user_player, get_channel=True) != "None" and user_chan_guild != \
self.user_status(user_player, get_channel=True) and message.content in self.commands:
await message.channel.send(user_player + ', вы уже ведете диалог с ботом на другом канале.'
' Завершите его, или прервите командой "/стоп".')
# не даем еще раз запустить цикл, даже в случае, если он вызвал команду с того же сервера,
# где он "занят".
return
def check(m):
# проверяем, что точно сообщение от нашего игрока и что он не случайно нажал enter
# также не дает случиться путанице с множеством каналов.
return len(m.content) != 0 and m.author == user_gambler and m.channel == user_channel
# запуск игры "Быки и Коровы>"
if message.content == '/быки и коровы':
await self.db_edit(message.author.name + message.author.discriminator, 'bnc', user_chan_guild)
# это нужно, чтобы отслеживать сообщения именно от данного пользователя
await message.channel.send('Хорошо, ' + user_player + '!\nУгадывающий называет число, а '
'загадывающий специальным образом отвечает, '
'сколько цифр совпало с ответом.\nЕсли в назван'
'ном числе цифра какого-то раз'
'ряда совпала с цифрой в том же разряде правил'
'ьного ответа, '
'это называется "быком". Если указанная цифра '
'есть в ответе, но на неверной'
' позиции, это "корова". Загадывающий отвечает,'
' сколько "быков" и "коров" '
'в числе угадывающего.\nПример -- числа\n8536\n'
'6573\nУ них 1 "бык" (это цифра 5) и 2 "коровы"'
' (это цифры 3 и 6).\n\n'
'Вы собираетесь просто отгадывать; играть'
' против бота(одновременно загадывать свое число '
'и отгадывать его);'
' или вы не собираетесь играть?\nЧтобы ответи'
'ть, введите'
' один из следующих вариантов: '
' 1 | 2 | /стоп\n'
'\nЕсли вы '
'пожелаете прекратить игру, то в любой'
' момент введите команду "/стоп"')
async def bnc_user_input(history=None):
# пользовательский ввод для игры быки и коровы
user_try = await self.wait_for('message', check=check)
user_try = user_try.content
# здесь находятся комбинации цифр, начинающиеся с 0.
zero_digitalis = ['0' + str(digital) for digital in range(100, 1000)]
while user_try != '/стоп' and (len(set(list(user_try))) != 4 or user_try not in
(zero_digitalis + [str(d) for d in range(1000, 10000)])):
if history is not None and user_try == '/история':
history_read = ''
for p in history:
b, c = Bnc.bulls_n_cows_morph(p[1], p[2])
# в f строке нельзя напрямую вызвать метод split() с аргументом '\n',
# поэтому аргументом будет служить переменная со значением '\n'
delimiter = '\n'
history_read += f'\nПопытка {str(len(history_read.split(delimiter)))}.' \
f' Ваше число {str(p[0])} -- {b} и {c}.'
await message.channel.send(user_player + ', это история ваших попыток.' + history_read)
await message.channel.send(user_player + ', введите четырехзначное число'
' с неповторяющимися цифрами или команду "/стоп",'
' чтобы прекратить игру.')
user_try = await self.wait_for('message', check=check)
user_try = user_try.content
return user_try
choice = await self.wait_for('message', check=check)
while choice.content not in ('1', '2', '/стоп'):
await message.channel.send(user_player + ', чтобы ответить,'
' введите один из следующих вариантов: \n1\n2\n/стоп')
choice = await self.wait_for('message', check=check)
if choice.content == '/стоп':
# игрок отказался играть. В конце блока игры его статус автоматически поменяется.
pass
elif choice.content == '1':
# генерируем число, выводим быки и коровы, пока игрок не выиграет
answer = Bnc.generate_answer()
await message.channel.send('Вы в одиночной игре, ' + user_player + '! Бот уже загадал число,'
' попробуйте угадать его.'
' Введите четырехзначное число'
' с неповторяющимися цифрами.')
win = False
number = 1
user_input = await bnc_user_input()
# количество попыток
while not win:
if user_input == '/стоп':
break
bulls_count, cows_count = Bnc.bulls_n_cows(user_input, answer)
bulls, cows = Bnc.bulls_n_cows_morph(bulls_count, cows_count)
await message.channel.send(user_player + f"\n{number} попытка. Ваше число {user_input}."
f" У вас {bulls} и {cows}.")
if bulls_count == 4:
win = True
break
else:
await message.channel.send('Введите четырехзначное число с неповторяющимися цифрами.')
user_input = await bnc_user_input()
number += 1
if win:
morph = pymorphy2.MorphAnalyzer()
await message.channel.send('Невероятная победа, ' + user_player + '! Вы сделали это'
' всего за ' + str(number)
+ ' ' +
morph.parse('попытку')[0].make_agree_with_number(number).word + '.')
else:
await message.channel.send(user_player + ', вы играете против бота. Для того, чтобы решить,'
' кто будет ходить первым, бот использует бинарную'
' монетку. Выберите 0 или 1.\nВо время вашего хода также'
' будет доступна команда "/история", эта команда покажет'
' все ваши попытки и ответы соперника.')
# определяет, кто ходит первым.
bin_coin = str(random.choice((0, 1)))
choice = await self.wait_for('message', check=check)
while choice.content not in ('1', '0', '/стоп'):
await message.channel.send(user_player + ', выберите\n0\tили\t1\n Для прекращения игры '
'напишите команду "/стоп"')
choice = await self.wait_for('message', check=check)
# объект класса Быки и Коровы, в игре против бота используются все его функции.
game = Bnc()
# 0 означает, что игра в процессе. 1 - что игрок победил. 2 - что победил бот.
# -1 - что игра была прервана потому, что игрок жульничал, или потому, что он ее прервал.
playing = 0
# True, если сейчас ход игрока
player_turn = False
# ведет подсчет попыток игрока
if choice.content == '/стоп':
playing = -1
elif choice.content == bin_coin:
player_turn = True
await message.channel.send('Вы угадали, ' + user_player + '.')
else:
await message.channel.send('Вы не угадали, ' + user_player + '. ')
# игра длится до остановки командой или победы одной из сторон
while playing == 0:
if player_turn:
await message.channel.send(user_player + ', введите четырехзначное число '
'с неповторяющимися цифрами. Также вы можете'
' ввести команду "/история".')
user_input = await bnc_user_input(history=game.history)
if user_input == '/стоп':
playing = -1
break
bulls_count, cows_count = game.bulls_n_cows(user_input, game.answer)
# считаем быков и коров, и, если они подходят под условие, генерируем число заново,
# в связи с историей попыток.
if bulls_count >= 2 or cows_count >= 3 or bulls_count + cows_count in (4, 0):
game.cheat(user_input)
bulls_count, cows_count = game.bulls_n_cows(user_input, game.answer)
# добавляем в историю попытку и ее результаты
game.history.append([user_input, bulls_count, cows_count])
bulls, cows = game.bulls_n_cows_morph(bulls_count, cows_count)
await message.channel.send(user_player + f"\nВаша {len(game.history)} попытка. Ваше число"
f" {user_input}. У вас {bulls} и {cows}.")
if bulls_count == 4:
# игрок победил
await message.channel.send('Вы победили, ' + user_player + '! Я загадал число '
+ str(game.answer))
playing = 1
player_turn = False
else:
guess = None
while True:
if len(game.guess_space) == 0:
await message.channel.send(user_player + ', вы попытались обмануть бота. '
'Вы проиграли.')
playing = -1
break
guess = random.choice(list(game.guess_space))
game.guess_space.remove(guess)
if game.is_compatible(guess):
break
# если бот обнаружил, что игрок жульничает - прерываем игру
if playing != 0:
break
await message.channel.send(user_player + ', я думаю, что вы загадали число '
+ str(guess) + '\nВведите через пробел количество быков и коров.'
' (например -- 0 2)')
bulls_n_cows = await self.wait_for('message', check=check)
bulls_n_cows = bulls_n_cows.content.split(' ')
while len(bulls_n_cows) != 2 or not all(j in [str(d) for d in range(0, 5)]
for j in bulls_n_cows) \
or sum([int(c) for c in bulls_n_cows]) > 4:
if bulls_n_cows == ['/стоп']:
playing = -1
break
await message.channel.send(user_player + ', введите через пробел количество'
' "быков" и "коров".\nЕсли в названном числе '
'цифра какого-то разряда совпала с цифрой'
' в том же разряде правильного ответа, эт'
'о называется "быком". Если указанная циф'
'ра есть в ответе, но на неверной позиции,'
' это "корова". Пример -- у чисел 1234 и 5631 '
' 1 "бык" (это цифра 3) и 1 "корова"'
' (это цифра 1). Сумма "быков" и "коров" не может'
' быть больше 4.')
bulls_n_cows = await self.wait_for('message', check=check)
bulls_n_cows = bulls_n_cows.content.split(' ')
# это условие приходится дублировать из-за того, что во время хода бота 2 варианта
# прерывания игры. 1 - игрок жульничал. 2 - игрок прервал игру. В обоих случаях игра
# должна прекратиться незамедлительно.
if playing != 0:
break
game.historys.append((guess, int(bulls_n_cows[0]), int(bulls_n_cows[1])))
bulls, cows = game.bulls_n_cows_morph(bulls_n_cows[0], bulls_n_cows[1])
await message.channel.send(user_player + f"\nМоя {len(game.history) + 1} попытка. Мое число"
f" {guess}. У меня {bulls} и {cows}.")
if bulls_n_cows[0] == 4:
# бот победил
await message.channel.send('Бот победил, ' + user_player + '! Вы загадали число '
+ str(guess))
playing = 2
player_turn = True
if playing != -1:
await message.channel.send('Спасибо за игру! Если вы желаете еще поиграть --'
' введите команду "/игры".')
await message.channel.send(f'Игра окончена, {user_player}. Если желаете еще раз сыграть в эту или'
f' иную игру -- введите команду "/игры".')
# запуск игры "Кости"
elif message.content == '/кости':
# изменение статуса.
await self.db_edit(message.author.name + message.author.discriminator, 'dices', user_chan_guild)
# объяснение правил игры
await message.channel.send('Хорошо, ' + user_player + '! Правила таковы -- у вас ровно 100 монет. Вам нужно'
' увеличить их количество. На каждый бросок можно с'
'делать ставку, от 5 до 20 монет. Ставка делается '
'на сумму цифр, которые будет на верхн(их/ей) гран(я'
'х/и) кост(ей/и) после броска. Также вы можете '
'выбрать какие кости будете бросать. Кости каждый р'
'аз выбираются случайно, из следующих вариантов:'
'\n\tодна шестигранная кость, коэффициент ставки - 3.'
'\n\tдве шестигранные кости коэффициент ставки - 6'
'\n\tодна восьмигранная кость, коэффициент ставки - '
'4\n\tдве восьмигранные кости, коэффициент ставки - '
'8\n\tодна двадцатигранная кость,'
' коэффициент ставки - 10\nТакже вам всегда будет д'
'оступна моентка со стабильным коэффициентом 2.\n'
'Коэффициент ставки - это то число, на которое '
'будет умножена ваша ставка. При проигрыше у вас '
'вычтут вашу ставку. Но есть одно условие - ,'
' все коэффициенты, кроме стабильного, варируются'
' от 2 до самих себя.\nЕсли вы будете'
' играть, то выберите число, которого хотите '
'достигнуть, из нижеперечисленных. В противном случ'
'ае, напишите команду "/стоп"\n'
'200 | 300 | 500 | 1000 | /стоп')
choice = await self.wait_for('message', check=check)
# проверка на правильный ввод
while choice.content not in ('200', '300', '/стоп', '500', '1000'):
await message.channel.send(user_player + ', чтобы ответить,'
' введите один из следующих вариантов: \n200\n300\n500\n100'
'0\n/стоп')
choice = await self.wait_for('message', check=check)
if choice.content == '/стоп':
# игрок отказался играть. В конце блока игры его статус автоматически поменяется.
pass
else:
start_cash = 100
end_cash = int(choice.content)
# начальные и стартовые суммы, словарь названий костей и их коэффициентов.
dash_set = {'один шестигранник': 3,
'два шестигранника': 6,
'один восьмигранник': 4,
'два восьмигранника': 8,
'один двадцатигранник': 10}
# все возможные результаты бросков для разных наборов костей.
values = {'один шестигранник': range(1, 7),
'два шестигранника': range(2, 13),
'один восьмигранник': range(1, 9),
'два восьмигранника': range(2, 17),
'один двадцатигранник': range(1, 21),
'монета': range(1, 3)}
# использовалась ли монета в прошлый раз.
d2_used = False
# пока игрок не проиграет, или не выиграет.
while start_cash != 0 or start_cash != end_cash:
# экспериментальным путем было определено, что именно такая генерация
random.seed(random.randint(10 ** 10, 10 ** 20))
# те наборы кубиков, которые буду предоставлены игроку в этот раз.
cur_set = [random.choice([d for d in dash_set.keys()]) for _ in range(2)]
for i in range(len(cur_set)):
# устранение и замена дупликатов.
while cur_set.count(cur_set[i]) > 1:
del cur_set[i]
cur_set.append(random.choice([d for d in dash_set.keys()]))
cur_set[i] = f'{i + 1}){cur_set[i]} -- {str(random.randint(2, dash_set[cur_set[i]]))}'
if not d2_used:
cur_set.append('3)монета -- 2')
else:
d2_used = False
await message.channel.send(user_player + f'. Ваши монеты: {start_cash}, осталось набрать ещё '
f'{end_cash - start_cash} монет.\n Вы можете кинуть '
f'следующие кости:\n\t' + '\n\t'.join(cur_set)
+ '\nМожно ввести или наименование варианта, или его номер.')
user_move = await self.wait_for('message', check=check)
# проверка на правильный ввод.
while all([user_move.content != c.split(' -- ')[0][2:] for
c in cur_set]) and user_move.content not in ['1', '2', '3'] + ['/стоп']:
await message.channel.send(user_player + ', чтобы ответить, введите наименование одного из'
' следующих вариантов:\n\t' + '\n\t'.join(cur_set) +
'\nили номер варианта, от 1 до 3.\nТакже вы можете прервать игру'
' командой "/стоп"')
user_move = await self.wait_for('message', check=check)
dice = user_move.content
if dice == '/стоп':
break
if dice not in ['1', '2', '3']:
# если было указано наименование, то узнаем его номер.
dice = str([d.split(' -- ')[0][2:] == dice for d in cur_set].index(True) + 1)
if dice == '3':
d2_used = True
coefficient = int(cur_set[int(dice) - 1][-1])
await message.channel.send(user_player + ', теперь выберите число, на которое будете делать ставку.'
' Число не может превышать максимальную сумму цифр костей'
', или быть меньше 1 (или 2 если костей две).')
digit = await self.wait_for('message', check=check)
# получаем все числа, на которые можно делать ставки.
sums = [str(b) for b in values[cur_set[int(dice) - 1].split(' -- ')[0][2:]]]
# проверяем ввод
while digit.content not in sums and digit.content != '/стоп':
await message.channel.send(user_player + ', выберите число, на которое будете делать ставку.'
' Введите любое число из следуюших: ' +
', '.join(sums) + '\nТакже вы можете прервать игру командой '
'"/стоп"')
digit = await self.wait_for('message', check=check)
if digit.content == '/стоп':
break
await message.channel.send(f'Отлично, {user_player}, а теперь введите ставку. Ставкой может быть '
f'любое число от 5 до 20 включительно.')
bet = await self.wait_for('message', check=check)
# проверяем корректность ставки. Существует возможность сделать ставку и уйти в минус,
# в полном соответствии с правилами игры, которые были предоставлены пользователю.
while bet.content not in [str(b) for b in range(5, 21)] and bet.content != '/стоп':
await message.channel.send(user_player + ', введите ставку. Ставкой может быть любое число из'
' следующих: ' + ', '.join([str(g) for g in
range(5, 21)]))
bet = await self.wait_for('message', check=check)
if bet.content == '/стоп':
break
# бросок костей.
cast = random.choice(sums)
await message.channel.send(f'{user_player}, вы сделали ставку {bet.content} монет на число '
f'{digit.content}. Бот бросает кости...\nИ выбрасывает число'
f' {cast}.')
if digit.content != cast:
await message.channel.send(f'Жаль, {user_player}, вы не угадали и лишились {bet.content} монет.')
start_cash -= int(bet.content)
else:
await message.channel.send(f'Вы угадали, {user_player}! Ваш выигрыш составляет '
f'{coefficient * int(bet.content)} монет(а).')
start_cash += coefficient * int(bet.content)
if start_cash <= 0:
await message.channel.send(f'Вы проиграли, {user_player}. Но это не повод для огорчения,'
f' ведь смысл этой игры не в победах или поражениях, а в самой игре.'
f' Каждый проигрыш или победа чему-то учат.')
if start_cash == end_cash:
await message.channel.send(f'Поздравляю, {user_player}, вы победили!')
await message.channel.send(f'Игра окончена, {user_player}. Если вы желаете сыграть еще '
f'-- введите команду "/игры".')
await self.db_edit(user_player, 'empty')
async def db_edit(self, user_id, status, channel='None'):
# функция заносит игрока в базу данных, или изменяет статус, если он там уже есть.
cur = self.con.cursor()
# на сервере идентификатор содержит #, а в личных сообщениях нет. Не даем дублировать записи.
user = cur.execute("Select * from users WHERE user_id=?", (user_id,)).fetchone()
if user is None:
cur.execute('INSERT INTO users(user_id, state, channel) VALUES(?, ?, ?)', (str(user_id), status, channel))
else:
cur.execute(f'UPDATE users SET state = "{status}", channel = "{channel}" WHERE user_id = "'
+ str(user_id) + '"')
self.con.commit()
def user_status(self, user_id, get_channel=False):
# получение статуса пользователя.
cur = self.con.cursor()
user = cur.execute("Select * from users WHERE user_id=?", (user_id.replace('#', ''),)).fetchone()
if user is None:
return 'None'
if get_channel:
return user[2]
return user[1]
async def on_ready(self):
# при перезапуске все статусы сбрасываются, а при первом запуске ничего не просходит,
# так как в базе нет пользователей.
cur = self.con.cursor()
users = cur.execute("Select * from users").fetchall()
for i in users:
cur.execute('UPDATE users SET state = "empty", channel = "None" WHERE user_id = "' + str(i[0]) + '"')
self.reconnect[i[0]] = True
self.con.commit()
async def on_member_join(self, member):
# отправляем новому на сервере пользователю сообщение.
await self.pm_greet(member)
async def pm_greet(self, member):
# приветствие мы отправляем только в том случае, если пользователя нет в базе.
if self.user_status(str(member)) == 'None':
await member.create_dm()
await member.dm_channel.send(self.dialog_base['/привет'])
await member.dm_channel.send('Вы можете общаться со мной как на общем канале, так и здесь. Eще у меня'
' есть команда "/помощь". Отправьте ее мне, если понадобится помощь.')
client = Fraudbot()
client.run(open('token.txt', 'r').readline())
| 68.469643
| 122
| 0.463683
| 3,732
| 38,343
| 4.673365
| 0.244373
| 0.028095
| 0.044665
| 0.054068
| 0.265237
| 0.216215
| 0.185712
| 0.143799
| 0.120005
| 0.098561
| 0
| 0.013575
| 0.458232
| 38,343
| 559
| 123
| 68.592129
| 0.826024
| 0.130976
| 0
| 0.206208
| 0
| 0.006652
| 0.235492
| 0.006978
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019956
| false
| 0.004435
| 0.013304
| 0.004435
| 0.064302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9518dbb4f02a3d9f4f06a63e879638510aa4fe07
| 31,698
|
py
|
Python
|
iocage/lib/ioc_json.py
|
project-fifo/iocage
|
1b8669bc2119718dbea8f2707a4eb4c92197c0f0
|
[
"BSD-2-Clause"
] | null | null | null |
iocage/lib/ioc_json.py
|
project-fifo/iocage
|
1b8669bc2119718dbea8f2707a4eb4c92197c0f0
|
[
"BSD-2-Clause"
] | null | null | null |
iocage/lib/ioc_json.py
|
project-fifo/iocage
|
1b8669bc2119718dbea8f2707a4eb4c92197c0f0
|
[
"BSD-2-Clause"
] | 1
|
2022-03-06T10:09:18.000Z
|
2022-03-06T10:09:18.000Z
|
"""Convert, load or write JSON."""
import json
import logging
import os
import re
import sys
from os import geteuid, path
from subprocess import CalledProcessError, PIPE, Popen, STDOUT, check_call
from iocage.lib.ioc_common import checkoutput, get_nested_key, open_atomic
def _get_pool_and_iocroot():
"""For internal setting of pool and iocroot."""
pool = IOCJson().json_get_value("pool")
iocroot = IOCJson(pool).json_get_value("iocroot")
return (pool, iocroot)
class IOCJson(object):
"""
Migrates old iocage configurations(UCL and ZFS Props) to the new JSON
format, will set and get properties.
"""
def __init__(self, location="", silent=False, cli=False):
self.location = location
self.lgr = logging.getLogger('ioc_json')
self.cli = cli
if silent:
self.lgr.disabled = True
def json_convert_from_ucl(self):
"""Convert to JSON. Accepts a location to the ucl configuration."""
if geteuid() != 0:
raise RuntimeError("You need to be root to convert the"
" configurations to the new format!")
with open(self.location + "/config", "r") as conf:
lines = conf.readlines()
key_and_value = {}
for line in lines:
line = line.partition("=")
key = line[0].rstrip()
value = line[2].replace(";", "").replace('"', '').strip()
key_and_value[key] = value
self.json_write(key_and_value)
def json_convert_from_zfs(self, uuid, skip=False):
"""Convert to JSON. Accepts a jail UUID"""
pool, _ = _get_pool_and_iocroot()
dataset = "{}/iocage/jails/{}".format(pool, uuid)
jail_zfs_prop = "org.freebsd.iocage:jail_zfs_dataset"
if geteuid() != 0:
raise RuntimeError("You need to be root to convert the"
" configurations to the new format!")
cmd = ["zfs", "get", "-H", "-o", "property,value", "all", dataset]
regex = re.compile("org.freebsd.iocage")
zfs_get = Popen(cmd, stdout=PIPE).communicate()[0].decode(
"utf-8").split("\n")
# Find each of the props we want to convert.
props = [p for p in zfs_get if re.search(regex, p)]
key_and_value = {"host_domainname": "none"}
for prop in props:
prop = prop.partition(":")
key = prop[2].split("\t")[0]
value = prop[2].split("\t")[1].strip()
if key == "type":
if value == "basejail":
# These were just clones on master.
value = "jail"
key_and_value["basejail"] = "yes"
key_and_value[key] = value
if not skip:
# Set jailed=off and move the jailed dataset.
checkoutput(["zfs", "set", "jailed=off",
"{}/root/data".format(dataset)])
checkoutput(["zfs", "rename", "-f",
"{}/root/data".format(dataset),
"{}/data".format(dataset)])
checkoutput(["zfs", "set",
"{}=iocage/jails/{}/data".format(
jail_zfs_prop, uuid),
"{}/data".format(dataset)])
checkoutput(["zfs", "set", "jailed=on",
"{}/data".format(dataset)])
key_and_value["jail_zfs_dataset"] = "iocage/jails/{}/data".format(uuid)
self.json_write(key_and_value)
def json_load(self):
"""Load the JSON at the location given. Returns a JSON object."""
version = self.json_get_version()
skip = False
try:
with open(self.location + "/config.json", "r") as conf:
conf = json.load(conf)
except (IOError, OSError):
if path.isfile(self.location + "/config"):
self.json_convert_from_ucl()
with open(self.location + "/config.json", "r") as conf:
conf = json.load(conf)
else:
dataset = self.location.split("/")
for d in dataset:
if len(d) == 36:
uuid = d
elif len(d) == 8:
# Hack88 migration to a perm short UUID.
pool, iocroot = _get_pool_and_iocroot()
from iocage.lib.ioc_list import IOCList
full_uuid = checkoutput(
["zfs", "get", "-H", "-o",
"value",
"org.freebsd.iocage:host_hostuuid",
self.location]).rstrip()
jail_hostname = checkoutput(
["zfs", "get", "-H", "-o",
"value",
"org.freebsd.iocage:host_hostname",
self.location]).rstrip()
short_uuid = full_uuid[:8]
full_dataset = "{}/iocage/jails/{}".format(
pool, full_uuid)
short_dataset = "{}/iocage/jails/{}".format(
pool, short_uuid)
self.json_convert_from_zfs(full_uuid)
with open(self.location + "/config.json", "r") as conf:
conf = json.load(conf)
self.lgr.info("hack88 is no longer supported."
"\n{} is being converted to {} "
"permanently.".format(full_dataset,
short_dataset))
status, _ = IOCList().list_get_jid(full_uuid)
if status:
self.lgr.info("Stopping jail to migrate UUIDs.")
from iocage.lib.ioc_stop import IOCStop
IOCStop(full_uuid, conf["tag"], self.location,
conf, silent=True)
jail_zfs_prop = "org.freebsd.iocage:jail_zfs_dataset"
uuid_prop = "org.freebsd.iocage:host_hostuuid"
host_prop = "org.freebsd.iocage:host_hostname"
# Set jailed=off and move the jailed dataset.
checkoutput(["zfs", "set", "jailed=off",
"{}/data".format(full_dataset)])
# We don't want to change a real hostname.
if jail_hostname == full_uuid:
checkoutput(["zfs", "set", "{}={}".format(
host_prop, short_uuid), full_dataset])
checkoutput(["zfs", "set", "{}={}".format(
uuid_prop, short_uuid), full_dataset])
checkoutput(["zfs", "set",
"{}=iocage/jails/{}/data".format(
jail_zfs_prop, short_uuid),
"{}/data".format(full_dataset)])
checkoutput(["zfs", "rename", "-f", full_dataset,
short_dataset])
checkoutput(["zfs", "set", "jailed=on",
"{}/data".format(short_dataset)])
uuid = short_uuid
self.location = "{}/jails/{}".format(iocroot,
short_uuid)
skip = True
self.json_convert_from_zfs(uuid, skip=skip)
with open(self.location + "/config.json", "r") as conf:
conf = json.load(conf)
try:
conf_version = conf["CONFIG_VERSION"]
if version != conf_version:
conf = self.json_check_config(conf, version)
except KeyError:
conf = self.json_check_config(conf, version)
return conf
def json_write(self, data, _file="/config.json"):
"""Write a JSON file at the location given with supplied data."""
with open_atomic(self.location + _file, 'w') as out:
json.dump(data, out, sort_keys=True, indent=4,
ensure_ascii=False)
def json_get_value(self, prop):
"""Returns a string with the specified prop's value."""
old = False
if prop == "pool":
match = 0
zpools = Popen(["zpool", "list", "-H", "-o", "name"],
stdout=PIPE).communicate()[0].decode(
"utf-8").split()
for zfs in zpools:
dataset = Popen(["zfs", "get", "-H", "-o", "value",
"org.freebsd.ioc:active", zfs],
stdout=PIPE).communicate()[0].decode(
"utf-8").strip()
old_dataset = Popen(["zpool", "get", "-H", "-o", "value",
"comment", zfs],
stdout=PIPE).communicate()[0].decode(
"utf-8").strip()
if dataset == "yes":
_dataset = zfs
match += 1
elif old_dataset == "iocage":
_dataset = zfs
match += 1
old = True
if match == 1:
pool = _dataset
if old:
if os.geteuid() != 0:
raise RuntimeError("Run as root to migrate old pool"
" activation property!")
check_call(["zpool", "set", "comment=-", pool],
stderr=PIPE, stdout=PIPE)
check_call(["zfs", "set", "org.freebsd.ioc:active=yes",
pool], stderr=PIPE, stdout=PIPE)
return pool
elif match >= 2:
if "deactivate" not in sys.argv[1:]:
self.lgr.error("Pools:")
for zpool in zpools:
self.lgr.error(" {}".format(zpool))
raise RuntimeError("You have {} ".format(match) +
"pools marked active for iocage "
"usage.\n"
"Run \"iocage deactivate ZPOOL\" on"
" {} of the".format(match - 1) +
" pools.\n")
else:
if len(sys.argv) >= 2 and "activate" in sys.argv[1:]:
pass
else:
# We use the first zpool the user has, they are free to
# change it.
cmd = ["zpool", "list", "-H", "-o", "name"]
zpools = Popen(cmd, stdout=PIPE).communicate()[0].decode(
"utf-8").split()
if os.geteuid() != 0:
raise RuntimeError("Run as root to automatically "
"activate the first zpool!")
self.lgr.info("Setting up zpool [{}] for iocage usage\n"
"If you wish to change please use "
"\"iocage activate\"".format(zpools[0]))
Popen(["zfs", "set", "org.freebsd.ioc:active=yes",
zpools[0]]).communicate()
return zpools[0]
elif prop == "iocroot":
# Location in this case is actually the zpool.
try:
loc = "{}/iocage".format(self.location)
mount = checkoutput(["zfs", "get", "-H", "-o", "value",
"mountpoint", loc]).strip()
return mount
except CalledProcessError:
raise RuntimeError("{} not found!".format(self.location))
elif prop == "all":
conf = self.json_load()
return conf
else:
conf = self.json_load()
if prop == "last_started" and conf[prop] == "none":
return "never"
else:
return conf[prop]
def json_set_value(self, prop, create_func=False):
"""Set a property for the specified jail."""
# Circular dep! Meh.
from iocage.lib.ioc_list import IOCList
from iocage.lib.ioc_create import IOCCreate
key, _, value = prop.partition("=")
conf = self.json_load()
old_tag = conf["tag"]
uuid = conf["host_hostuuid"]
status, jid = IOCList.list_get_jid(uuid)
conf[key] = value
sysctls_cmd = ["sysctl", "-d", "security.jail.param"]
jail_param_regex = re.compile("security.jail.param.")
sysctls_list = Popen(sysctls_cmd, stdout=PIPE).communicate()[0].decode(
"utf-8").split()
jail_params = [p.replace("security.jail.param.", "").replace(":", "")
for p in sysctls_list if re.match(jail_param_regex, p)]
single_period = ["allow_raw_sockets", "allow_socket_af",
"allow_set_hostname"]
if not create_func:
if key == "tag":
conf["tag"] = IOCCreate("", prop, 0).create_link(
conf["host_hostuuid"], value, old_tag=old_tag)
tag = conf["tag"]
if key == "template":
pool, iocroot = _get_pool_and_iocroot()
old_location = "{}/iocage/jails/{}".format(pool, uuid)
new_location = "{}/iocage/templates/{}".format(pool, old_tag)
if status:
raise RuntimeError(f"{uuid} ({old_tag}) is running.\nPlease"
"stop it first!")
jails, paths = IOCList("uuid").list_datasets()
for j in jails:
_uuid = jails[j]
_path = f"{paths[j]}/root"
t_old_path = f"{old_location}/root@{_uuid}"
t_path = f"{new_location}/root@{_uuid}"
if _uuid == uuid:
continue
origin = checkoutput(["zfs", "get", "-H", "-o", "value",
"origin", _path]).rstrip()
if origin == t_old_path or origin == t_path:
_status, _ = IOCList.list_get_jid(_uuid)
if _status:
raise RuntimeError(f"CHILD: {_uuid} ({j}) is"
f" running.\nPlease stop it first!")
if value == "yes":
try:
checkoutput(["zfs", "rename", "-p", old_location,
new_location], stderr=STDOUT)
conf["type"] = "template"
self.location = new_location.lstrip(pool).replace(
"/iocage", iocroot)
except CalledProcessError as err:
raise RuntimeError("ERROR: {}".format(
err.output.decode("utf-8").rstrip()))
self.lgr.info("{} ({}) converted to a template.".format(uuid,
old_tag))
self.lgr.disabled = True
elif value == "no":
try:
checkoutput(["zfs", "rename", "-p", new_location,
old_location], stderr=STDOUT)
conf["type"] = "jail"
self.location = old_location.lstrip(pool).replace(
"/iocage", iocroot)
except CalledProcessError as err:
raise RuntimeError("ERROR: {}".format(
err.output.decode("utf-8").rstrip()))
self.lgr.info("{} ({}) converted to a jail.".format(uuid,
old_tag))
self.lgr.disabled = True
self.json_check_prop(key, value, conf)
self.json_write(conf)
self.lgr.info(
"Property: {} has been updated to {}".format(key, value))
# Used for import
if not create_func:
if key == "tag":
return tag
# We can attempt to set a property in realtime to jail.
if status:
if key in single_period:
key = key.replace("_", ".", 1)
else:
key = key.replace("_", ".")
if key in jail_params:
try:
checkoutput(["jail", "-m", "jid={}".format(jid),
"{}={}".format(key, value)], stderr=STDOUT)
except CalledProcessError as err:
raise RuntimeError("ERROR: {}".format(
err.output.decode("utf-8").rstrip()))
@staticmethod
def json_get_version():
"""Sets the iocage configuration version."""
version = "5"
return version
def json_check_config(self, conf, version):
"""
Takes JSON as input and checks to see what is missing and adds the
new keys with their default values if missing.
"""
if geteuid() != 0:
raise RuntimeError("You need to be root to convert the"
" configurations to the new format!")
_, iocroot = _get_pool_and_iocroot()
# Version 2 keys
try:
sysvmsg = conf["sysvmsg"]
sysvsem = conf["sysvsem"]
sysvshm = conf["sysvshm"]
except KeyError:
sysvmsg = "new"
sysvsem = "new"
sysvshm = "new"
# Set all keys, even if it's the same value.
conf["sysvmsg"] = sysvmsg
conf["sysvsem"] = sysvsem
conf["sysvshm"] = sysvshm
# Version 3 keys
try:
release = conf["release"]
cloned_release = conf["cloned_release"]
except KeyError:
try:
freebsd_version = f"{iocroot}/releases/{conf['release']}" \
"/root/bin/freebsd-version"
except (IOError, OSError):
freebsd_version = f"{iocroot}/templates/{conf['tag']}" \
"/root/bin/freebsd-version"
if conf["release"][:4].endswith("-"):
# 9.3-RELEASE and under don't actually have this binary.
release = conf["release"]
else:
with open(freebsd_version, "r") as r:
for line in r:
if line.startswith("USERLAND_VERSION"):
release = line.rstrip().partition("=")[2].strip(
'"')
cloned_release = conf["release"]
# Set all Version 3 keys
conf["release"] = release
conf["cloned_release"] = cloned_release
# Version 4 keys
try:
basejail = conf["basejail"]
except KeyError:
basejail = "no"
# Set all keys, even if it's the same value.
conf["basejail"] = basejail
# Version 5 keys
try:
comment = conf["comment"]
except KeyError:
comment = "none"
# Set all keys, even if it's the same value.
conf["comment"] = comment
conf["CONFIG_VERSION"] = version
self.json_write(conf)
return conf
def json_check_prop(self, key, value, conf):
"""
Checks if the property matches known good values, if it's the
CLI, deny setting any properties not in this list.
"""
props = {
# Network properties
"interfaces" : (":", ","),
"host_domainname" : ("string",),
"host_hostname" : ("string",),
"exec_fib" : ("string",),
"ip4_addr" : ("|",),
"ip4_saddrsel" : ("0", "1",),
"ip4" : ("new", "inherit", "none"),
"ip6_addr" : ("|",),
"ip6_saddrsel" : ("0", "1"),
"ip6" : ("new", "inherit", "none"),
"defaultrouter" : ("string",),
"defaultrouter6" : ("string",),
"resolver" : ("string",),
"mac_prefix" : ("string",),
"vnet0_mac" : ("string",),
"vnet1_mac" : ("string",),
"vnet2_mac" : ("string",),
"vnet3_mac" : ("string",),
# Jail Properties
"devfs_ruleset" : ("string",),
"exec_start" : ("string",),
"exec_stop" : ("string",),
"exec_prestart" : ("string",),
"exec_poststart" : ("string",),
"exec_prestop" : ("string",),
"exec_poststop" : ("string",),
"exec_clean" : ("0", "1"),
"exec_timeout" : ("string",),
"stop_timeout" : ("string",),
"exec_jail_user" : ("string",),
"exec_system_jail_user": ("string",),
"exec_system_user" : ("string",),
"mount_devfs" : ("0", "1"),
"mount_fdescfs" : ("0", "1"),
"enforce_statfs" : ("0", "1", "2"),
"children_max" : ("string",),
"login_flags" : ("string",),
"securelevel" : ("string",),
"sysvmsg" : ("new", "inherit", "disable"),
"sysvsem" : ("new", "inherit", "disable"),
"sysvshm" : ("new", "inherit", "disable"),
"allow_set_hostname" : ("0", "1"),
"allow_sysvipc" : ("0", "1"),
"allow_raw_sockets" : ("0", "1"),
"allow_chflags" : ("0", "1"),
"allow_mount" : ("0", "1"),
"allow_mount_devfs" : ("0", "1"),
"allow_mount_nullfs" : ("0", "1"),
"allow_mount_procfs" : ("0", "1"),
"allow_mount_tmpfs" : ("0", "1"),
"allow_mount_zfs" : ("0", "1"),
"allow_quotas" : ("0", "1"),
"allow_socket_af" : ("0", "1"),
# RCTL limits
"cpuset" : ("off", "on"),
"rlimits" : ("off", "on"),
"memoryuse" : ":",
"memorylocked" : ("off", "on"),
"vmemoryuse" : ("off", "on"),
"maxproc" : ("off", "on"),
"cputime" : ("off", "on"),
"pcpu" : ("off", "on"),
"datasize" : ("off", "on"),
"stacksize" : ("off", "on"),
"coredumpsize" : ("off", "on"),
"openfiles" : ("off", "on"),
"pseudoterminals" : ("off", "on"),
"swapuse" : ("off", "on"),
"nthr" : ("off", "on"),
"msgqqueued" : ("off", "on"),
"msgqsize" : ("off", "on"),
"nmsgq" : ("off", "on"),
"nsemop" : ("off", "on"),
"nshm" : ("off", "on"),
"shmsize" : ("off", "on"),
"wallclock" : ("off", "on"),
# Custom properties
"tag" : ("string",),
"bpf" : ("off", "on"),
"dhcp" : ("off", "on"),
"boot" : ("off", "on"),
"notes" : ("string",),
"owner" : ("string",),
"priority" : str(tuple(range(1, 100))),
"hostid" : ("string",),
"jail_zfs" : ("off", "on"),
"jail_zfs_dataset" : ("string",),
"jail_zfs_mountpoint" : ("string",),
"mount_procfs" : ("0", "1"),
"mount_linprocfs" : ("0", "1"),
"vnet" : ("off", "on"),
"template" : ("no", "yes"),
"comment" : ("string",)
}
zfs_props = {
# ZFS Props
"compression" : "lz4",
"origin" : "readonly",
"quota" : "none",
"mountpoint" : "readonly",
"compressratio": "readonly",
"available" : "readonly",
"used" : "readonly",
"dedup" : "off",
"reservation" : "none",
}
if key in zfs_props.keys():
pool, _ = _get_pool_and_iocroot()
if conf["template"] == "yes":
_type = "templates"
uuid = conf["tag"] # I know, but it's easier this way.
else:
_type = "jails"
uuid = conf["host_hostuuid"]
checkoutput(["zfs", "set", f"{key}={value}",
f"{pool}/iocage/{_type}/{uuid}"])
return
if key in props.keys():
# Either it contains what we expect, or it's a string.
for k in props[key]:
if k in value:
return
if props[key][0] == "string":
return
else:
err = f"{value} is not a valid value for {key}.\n"
if self.cli:
self.lgr.error(f"ERROR: {err}")
else:
err = f"ERROR: {err}"
if key not in ("interfaces", "ip4_addr", "ip6_addr",
"memoryuse"):
msg = f"Value must be {' or '.join(props[key])}"
if not self.cli:
msg = err + msg
raise RuntimeError(msg)
elif key == "ip4_addr":
msg = "IP address must contain both an interface and IP " \
"address.\nEXAMPLE: em0|192.168.1.10"
if not self.cli:
msg = err + msg
raise RuntimeError(msg)
elif key == "ip6_addr":
msg = "IP address must contain both an interface and IP " \
"address.\nEXAMPLE: em0|fe80::5400:ff:fe54:1"
if not self.cli:
msg = err + msg
raise RuntimeError(msg)
elif key == "interfaces":
msg = "Interfaces must be specified as a pair.\n" \
"EXAMPLE: vnet0:bridge0, vnet1:bridge1"
if not self.cli:
msg = err + msg
raise RuntimeError(msg)
elif key == "memoryuse":
msg = "memoryuse requires at minimum a pair.EXAMPLE: " \
"8g:log"
if not self.cli:
msg = err + msg
raise RuntimeError(msg)
else:
if self.cli:
exit(1)
else:
if self.cli:
raise RuntimeError(
f"ERROR: {key} cannot be changed by the user.")
else:
if key not in conf.keys():
raise RuntimeError(
f"WARNING: {key} is not a valid property!")
def json_plugin_load(self):
try:
with open("{}/plugin/settings.json".format(
self.location), "r") as settings:
settings = json.load(settings)
except (IOError, OSError):
raise RuntimeError(
"No settings.json exists in {}/plugin!".format(self.location))
return settings
def json_plugin_get_value(self, prop):
from iocage.lib.ioc_exec import IOCExec
pool, iocroot = _get_pool_and_iocroot()
conf = self.json_load()
uuid = conf["host_hostuuid"]
tag = conf["tag"]
_path = checkoutput(["zfs", "get", "-H", "-o", "value", "mountpoint",
"{}/iocage/jails/{}".format(pool,
uuid)]).rstrip()
# Plugin variables
settings = self.json_plugin_load()
serviceget = settings["serviceget"]
prop_error = ".".join(prop)
if "options" in prop:
_prop = prop[1:]
else:
_prop = prop
prop_cmd = "{},{}".format(serviceget, ",".join(_prop)).split(",")
try:
if prop[0] != "all":
if len(_prop) > 1:
return get_nested_key(settings, prop)
else:
return IOCExec(prop_cmd, uuid, tag, _path).exec_jail()
else:
return settings
except KeyError:
raise RuntimeError(
"Key: \"{}\" does not exist!".format(prop_error))
def json_plugin_set_value(self, prop):
from iocage.lib.ioc_exec import IOCExec
from iocage.lib.ioc_list import IOCList
pool, iocroot = _get_pool_and_iocroot()
conf = self.json_load()
uuid = conf["host_hostuuid"]
tag = conf["tag"]
_path = checkoutput(["zfs", "get", "-H", "-o", "value", "mountpoint",
"{}/iocage/jails/{}".format(pool,
uuid)]).rstrip()
status, _ = IOCList().list_get_jid(uuid)
# Plugin variables
settings = self.json_plugin_load()
serviceset = settings["serviceset"]
servicerestart = settings["servicerestart"].split()
keys, _, value = ".".join(prop).partition("=")
prop = keys.split(".")
restart = False
if "options" in prop:
prop = keys.split(".")[1:]
prop_cmd = "{},{},{}".format(serviceset, ",".join(prop), value).split(
",")
setting = settings["options"]
try:
while prop:
current = prop[0]
key = current
prop.remove(current)
if not prop:
if setting[current]:
try:
restart = setting[current]["requirerestart"]
except KeyError:
pass
else:
setting = setting[current]
if status:
# IOCExec will not show this if it doesn't start the jail.
self.lgr.info("Command output:")
IOCExec(prop_cmd, uuid, tag, _path).exec_jail()
if restart:
self.lgr.info("\n-- Restarting service --")
self.lgr.info("Command output:")
IOCExec(servicerestart, uuid, tag, _path).exec_jail()
self.lgr.info("\nKey: {} has been updated to {}".format(keys,
value))
except KeyError:
raise RuntimeError("Key: \"{}\" does not exist!".format(key))
| 39.573034
| 81
| 0.430942
| 2,921
| 31,698
| 4.549127
| 0.157823
| 0.009783
| 0.005795
| 0.009633
| 0.33233
| 0.260686
| 0.244958
| 0.214253
| 0.185882
| 0.162854
| 0
| 0.008564
| 0.440059
| 31,698
| 800
| 82
| 39.6225
| 0.740098
| 0.054136
| 0
| 0.324238
| 0
| 0
| 0.173244
| 0.020419
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022472
| false
| 0.00321
| 0.024077
| 0
| 0.077047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
951a6328f58a32b162e3ef00d555a91633c30955
| 6,913
|
py
|
Python
|
FP/V46_faraday_effect/plot.py
|
nsalewski/laboratory
|
e30d187a3f5227d5e228b0132c3de4d426d85ffb
|
[
"MIT"
] | 1
|
2021-05-05T23:00:28.000Z
|
2021-05-05T23:00:28.000Z
|
FP/V46_faraday_effect/plot.py
|
nsalewski/laboratory
|
e30d187a3f5227d5e228b0132c3de4d426d85ffb
|
[
"MIT"
] | null | null | null |
FP/V46_faraday_effect/plot.py
|
nsalewski/laboratory
|
e30d187a3f5227d5e228b0132c3de4d426d85ffb
|
[
"MIT"
] | null | null | null |
#!usr/bin/env python3
#coding:utf8
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from astropy.io import ascii
from uncertainties import ufloat
import uncertainties.unumpy as unp
from modules.table import textable
import scipy.constants as const
import math as math
from modules.plot import axislabel as axis
#arr1=[0.4,0.75,1.4]
#arr2=[2,3,4]
#textable.latex_tab(data=[arr1,arr2],names=[r"title column 1",r"title column 2"], filename=r"example.tex",caption=r"Beautiful caption",label=r"important_label",dec_points=[2,0])
def manipulate(arr):
for elem in range(len(arr)):
if arr[elem-1]<180:
arr[elem-1]=arr[elem-1]+180
else: arr[elem-1]=arr[elem-1]-180
return arr
def theorie(x,a,mu,b):
return ((a*np.exp(-((x-mu)**2)/(b))))
def winkel(grad,sec):
sec=sec*1/60
grad=grad+sec
return grad
def lin(x,a):
return a*x
def eff_mass(a,B,N):
return unp.sqrt(((e0)**3*N*B)/(8*np.pi**2*eps*c**3*n*a))
#daten importieren
b,z=np.genfromtxt("data/b_feld.txt",unpack=True)
f1,d1_hin,d1_hins,d1_rueck,d1_ruecks=np.genfromtxt("data/1_probe.txt",unpack=True)
f2,d2_hin,d2_hins,d2_rueck,d2_ruecks=np.genfromtxt("data/2_probe.txt",unpack=True)
f3,d3_hin,d3_hins,d3_rueck,d3_ruecks=np.genfromtxt("data/3_probe.txt",unpack=True)
f1=f1*10**(-6)
f2=f2*10**(-6)
f3=f3*10**(-6)
l1=1.296*10**(-3)
l2=1.36*10**(-3)
l3=5.11*10**(-3)
#bogensekunden addieren
grad1_hin=winkel(d1_hin,d1_hins)
grad1_rueck=winkel(d1_rueck,d1_ruecks)
grad2_hin=winkel(d2_hin,d2_hins)
grad2_rueck=winkel(d2_rueck,d2_ruecks)
grad3_hin=winkel(d3_hin,d3_hins)
grad3_rueck=winkel(d3_rueck,d3_ruecks)
#umrechnen auf gleichen Bezugspunkt
grad1_hin=manipulate(grad1_hin)
grad1_rueck=manipulate(grad1_rueck)
grad2_hin=manipulate(grad2_hin)
grad2_rueck=manipulate(grad2_rueck)
grad3_hin=manipulate(grad3_hin)
grad3_rueck=manipulate(grad3_rueck)
grad1=(1/(2*l1)*(grad1_rueck-grad1_hin)*2*np.pi/360)
grad2=(1/(2*l2)*(grad2_rueck-grad2_hin)*2*np.pi/360)
grad3=(1/(2*l3)*(grad3_rueck-grad3_hin)*2*np.pi/360)
#Berechnung delta theta
delta1=grad1-grad3
delta2=grad2-grad3
textable.latex_tab(data=[f1*10**6,grad3,grad1,grad2,delta1,delta2],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_{\mathrm{und}}$/$\si{\radian\per\meter}$",r"$\theta_{\mathrm{d1}}$/$\si{\radian\per\meter}$",r"$\theta_{\mathrm{d2}}$/$\si{\radian\per\meter}$",r"$\Delta \theta_{\mathrm{d1}}$/$\si{\radian\per\meter}$",r"$\Delta \theta_{\mathrm{d2}}$/$\si{\radian\per\meter}$"], filename=r"tables/eff_mass.tex",caption=r"Werte der $\Delta \theta$ zwischen undotiertem und dotiertem $\ce{GaAs}$ zur Bestimmung der effektiven Masse der Kristallelektronen",label=r"eff_mass",dec_points=[2,2,2,2,2,2],tableformat=4.2)
#Tabellen theta
textable.latex_tab(data=[f1*10**6,grad1_hin,grad1_rueck,grad1],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_1$/$\si{\degree}$",r"$\theta_2$/$\si{\degree}$",r"$\theta$/$\si{\radian\per\meter}$"], filename=r"tables/probe1.tex",caption=r"Messwerte der Faraday-Rotation für die dotierte Probe $\ce{GaAs}_{d1}$",label=r"probe1",dec_points=[2,2,2,2],tableformat=4.2)
textable.latex_tab(data=[f2*10**6,grad2_hin,grad2_rueck,grad2],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_1$/$\si{\degree}$",r"$\theta_2$/$\si{\degree}$",r"$\theta$/$\si{\radian\per\meter}$"], filename=r"tables/probe2.tex",caption=r"Messwerte der Faraday-Rotation für die dotierte Probe $\ce{GaAs}_{d2}$",label=r"probe2",dec_points=[2,2,2,2],tableformat=4.2)
textable.latex_tab(data=[f3*10**6,grad3_hin,grad3_rueck,grad3],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_1$/$\si{\degree}$",r"$\theta_2$/$\si{\degree}$",r"$\theta$/$\si{\radian\per\meter}$"], filename=r"tables/probe3.tex",caption=r"Messwerte der Faraday-Rotation für die undotierte Probe $\ce{GaAs}_{und}$",label=r"probe3",dec_points=[2,2,2,2],tableformat=4.2)
#Tabelle Magnetfeld
textable.latex_tab(data=[z-3.1,b],names=[r"$z$/$\si{\centi\meter}$",r"$B$/$\si{\milli\tesla}$"], filename=r"tables/magnetfeld.tex",caption=r"Messung des Magnetfelds in Abhängigkeit zum Ort $z$ (Probe ist etwa bei $\SI{3.1}{\centi\meter}$ platziert)",label=r"magnetfeld",dec_points=[2,0],tableformat=3.2)
z_theo=np.linspace(0,6,50)
#Ausgleichsrechnung Magnetfeld
params, covariance = curve_fit(theorie,z-3.1,b)
errors = np.sqrt(np.diag(covariance))
print(params,errors)
print("Erwartungswert",params[1],errors[1])
delta1_calc=np.delete(delta1,[0,3,7])
f1_calc1=np.delete(f1,[0,3,7])
delta2_calc=np.delete(delta2,[6,7])
f1_calc2=np.delete(f1,[6,7])
#lin regress delta
paramsd1, covarianced1 = curve_fit(lin,(f1_calc1**2),delta1_calc*10**(-6))
errorsd1 = np.sqrt(np.diag(covarianced1))
paramsd2, covarianced2 = curve_fit(lin,(f1_calc2)**2,delta2_calc*10**(-6))
errorsd2 = np.sqrt(np.diag(covarianced2))
a1=ufloat(paramsd1[0],errorsd1[0])*10**(6)
a2=ufloat(paramsd2[0],errorsd2[0])*10**(6)
n=3.3
e0=const.e
eps=const.epsilon_0
c=const.c
B=377.5*10**(-3)
print("Delta_1 Steigung", a1)
print("Delta_2 Steigung", a2)
print("Effektive Masse 1",eff_mass(a1,B,2.8*10**18*10**6),eff_mass(a1,B,2.8*10**18*10**6)/const.m_e)
print("Effektive Masse 2",eff_mass(a2,B,1.2*10**18*10**6),eff_mass(a2,B,1.2*10**18*10**6)/const.m_e)
#Plot Magnetfeld
plt.plot((params[1],params[1]),(-20,400), 'r--', label="Erwartungswert \n der Normalverteilung")
plt.plot(z-3.1,b, 'rx', label="Messwerte $B$")
plt.ylabel(r"$B/\si{\milli\tesla}$")
plt.xlabel(r"z/\si{\centi\meter}")
plt.legend(loc='best')
plt.ylim(-20,400)
axis.labels()
plt.tight_layout()
plt.savefig('pictures/B_feld.pdf')
plt.clf()
#Plot theta
plt.plot(f1*10**6,grad1, 'rx', label=r"Messwerte $\theta_{\mathrm{d1}}$")
plt.plot(f2*10**6,grad2, 'gx', label=r"Messwerte $\theta_{\mathrm{d2}}$")
plt.plot(f3*10**6,grad3, 'bx', label=r"Messwerte $\theta_{\mathrm{und}}$")
plt.ylabel(r"$\theta$/$\si{\radian\per\meter}")
plt.xlabel(r"$\lambda$/$\si{\micro\meter}$")
plt.legend(loc='lower right')
plt.tight_layout()
axis.labels()
plt.xlim(1,3.5)
plt.savefig('pictures/winkel_gg_wellenlaenge.pdf')
plt.clf()
f_theo=np.linspace(0,np.max(f1)+0.1*np.max(f1))
#plot delta
plt.plot((f1)**2*10**11,delta1, 'rx', label=r"$\Delta \theta_{\mathrm{d1}}$")
plt.plot((f_theo)**2*10**11,lin((f_theo)**2,*paramsd1*10**6), 'b-', label="Ausgleichsgrade")
plt.ylabel(r"$\Delta \theta_{\mathrm{d1}}$/$\si{\radian\per\meter}$")
plt.xlabel(r"$\lambda^{2}$/$\si{\square\meter}\cdot \num{e-11}$")
plt.legend(loc='best')
axis.labels()
plt.xlim(0,1.1)
plt.tight_layout()
plt.savefig('pictures/delta1.pdf')
plt.clf()
plt.plot((f1)**2*10**11,delta2, 'rx', label=r"$\Delta \theta_{\mathrm{d2}}$")
plt.plot((f_theo)**2*10**11,lin(f_theo**2,*paramsd2*10**6), 'b-', label="Ausgleichsgrade")
plt.ylabel(r"$\Delta \theta_{\mathrm{d2}}$/$\si{\radian\per\meter}$")
plt.xlabel(r"$\lambda^{2}$/$\si{\square\meter}\cdot\num{e-11}$")
axis.labels()
plt.legend(loc='best')
plt.tight_layout()
plt.xlim(0,1.1)
plt.savefig('pictures/delta2.pdf')
plt.clf()
| 43.20625
| 613
| 0.707363
| 1,254
| 6,913
| 3.796651
| 0.19378
| 0.012602
| 0.025415
| 0.036967
| 0.349926
| 0.299727
| 0.26192
| 0.227473
| 0.204789
| 0.174543
| 0
| 0.070606
| 0.057573
| 6,913
| 159
| 614
| 43.477987
| 0.660169
| 0.064516
| 0
| 0.153226
| 0
| 0.016129
| 0.31933
| 0.169741
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040323
| false
| 0
| 0.080645
| 0.024194
| 0.16129
| 0.048387
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
951a6b980e66f06393b5c53d18d14db57345b12d
| 2,256
|
py
|
Python
|
hackzurich_py/test_hist_threshold.py
|
ejoebstl/hackzurich16
|
81a3b302050a4a464e2191c1d0912f8038c26ed9
|
[
"MIT"
] | null | null | null |
hackzurich_py/test_hist_threshold.py
|
ejoebstl/hackzurich16
|
81a3b302050a4a464e2191c1d0912f8038c26ed9
|
[
"MIT"
] | null | null | null |
hackzurich_py/test_hist_threshold.py
|
ejoebstl/hackzurich16
|
81a3b302050a4a464e2191c1d0912f8038c26ed9
|
[
"MIT"
] | null | null | null |
import os
import matplotlib.pyplot as plt
import numpy as np
import cv2
filedir = '/Users/gabrielfior/Dropbox/Hackzurich16/pupils_cutout/'
readbgr = filedir+'left_pupil232.bmp'
frame = plt.imread(readbgr)
white=plt.imread('/Users/gabrielfior/Dropbox/Hackzurich16/pupils_bw/right_pupil61.bmp')
black=plt.imread('/Users/gabrielfior/Dropbox/Hackzurich16/pupils_bw/right_pupil203.bmp')
#convert to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
plt.figure(1)
plt.clf()
img = cv2.imread(readbgr)
color = ('b','g','r')
b = img[:,:,0]
g = img[:,:,1]
r = img[:,:,2]
for i,col in enumerate(color):
histr = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
plt.figure(2)
plt.clf()
plt.subplot(211)
ret,th1 = cv2.threshold(img[:,:,0],40,60,cv2.THRESH_BINARY)
plt.imshow(th1)
plt.subplot(212)
plt.imshow(hsv)
#Compare blue channel (when it is smaller than red channel)
#plt.figure(3)
new_mask = np.zeros_like(b)
for i in range(b.shape[0]):
for j in range(b.shape[1]):
#if b < r, put 1 else 0
if (img[:,:,0])[i][j] < (img[:,:,2])[i][j]:
new_mask[i][j]=1
plt.figure(3)
plt.clf()
plt.imshow(new_mask)
plt.figure(4)
plt.subplot(211)
plt.title('white')
for i,col in enumerate(color):
histr = cv2.calcHist([white],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.subplot(212)
plt.title('black')
for i,col in enumerate(color):
histr = cv2.calcHist([black],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
#################
#Compute diff
mask_white = np.zeros_like(white[:,:,0])
for i in range(white.shape[0]):
for j in range(white.shape[1]):
#if b < r, put 1 else 0
if (white[:,:,0])[i][j] < (white[:,:,2])[i][j]:
mask_white[i][j]=1
mask_black = np.zeros_like(black[:,:,0])
for i in range(black.shape[0]):
for j in range(black.shape[1]):
#if b < r, put 1 else 0
if (black[:,:,0])[i][j] < (black[:,:,2])[i][j]:
mask_black[i][j]=1
#Plot masks
plt.figure(5)
plt.subplot(211)
plt.title('white')
plt.imshow(mask_white)
plt.subplot(212)
plt.title('black')
plt.imshow(mask_black)
plt.show()
#Flat fill
| 23.747368
| 88
| 0.626773
| 382
| 2,256
| 3.649215
| 0.246073
| 0.012912
| 0.030129
| 0.075323
| 0.471306
| 0.424677
| 0.315638
| 0.315638
| 0.315638
| 0.149928
| 0
| 0.060381
| 0.163121
| 2,256
| 94
| 89
| 24
| 0.677966
| 0.080674
| 0
| 0.362319
| 0
| 0
| 0.111816
| 0.092285
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.057971
| 0
| 0.057971
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9521b11ea24c3b1975d9331d56438810a026e0f3
| 14,298
|
py
|
Python
|
tensorflow_federated/python/research/baselines/emnist/models.py
|
khramtsova/federated
|
88b3ca65204a9922696ccefd774ece03ebf5cc8e
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:19:52.000Z
|
2019-10-10T06:19:52.000Z
|
tensorflow_federated/python/research/baselines/emnist/models.py
|
khramtsova/federated
|
88b3ca65204a9922696ccefd774ece03ebf5cc8e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/research/baselines/emnist/models.py
|
khramtsova/federated
|
88b3ca65204a9922696ccefd774ece03ebf5cc8e
|
[
"Apache-2.0"
] | 2
|
2019-10-10T06:19:41.000Z
|
2021-01-28T03:06:55.000Z
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build a model for EMNIST classification."""
import functools
import tensorflow as tf
def create_conv_dropout_model(only_digits=True):
"""Recommended model to use for EMNIST experiments.
When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
Returns:
A `tf.keras.Model`.
"""
data_format = 'channels_last'
input_shape = [28, 28, 1]
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape(input_shape=(28 * 28,), target_shape=input_shape),
tf.keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation='relu',
input_shape=input_shape,
data_format=data_format),
tf.keras.layers.Conv2D(
64, kernel_size=(3, 3), activation='relu', data_format=data_format),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def create_original_fedavg_cnn_model(only_digits=True):
"""The CNN model used in https://arxiv.org/abs/1602.05629.
The number of parameters when `only_digits=True` is (1,663,370), which matches
what is reported in the paper.
When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 28, 28, 32) 832
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 14, 14, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 14, 14, 64) 51264
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 7, 7, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 3136) 0
_________________________________________________________________
dense (Dense) (None, 512) 1606144
_________________________________________________________________
dense_1 (Dense) (None, 10) 5130
=================================================================
Total params: 1,663,370
Trainable params: 1,663,370
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
Returns:
A `tf.keras.Model`.
"""
data_format = 'channels_last'
input_shape = [28, 28, 1]
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape(input_shape=(28 * 28,), target_shape=input_shape),
conv2d(filters=32, input_shape=input_shape),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def create_two_hidden_layer_model(only_digits=True, hidden_units=200):
"""Create a two hidden-layer fully connected neural network.
Args:
only_digits: A boolean that determines whether to only use the digits in
EMNIST, or the full EMNIST-62 dataset. If True, uses a final layer with 10
outputs, for use with the digit-only EMNIST dataset. If False, uses 62
outputs for the larger dataset.
hidden_units: An integer specifying the number of units in the hidden layer.
Returns:
A `tf.keras.Model`.
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(
hidden_units, activation=tf.nn.relu, input_shape=(28 * 28,)),
tf.keras.layers.Dense(hidden_units, activation=tf.nn.relu),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
# Defining global constants for ResNet model
L2_WEIGHT_DECAY = 2e-4
def _residual_block(input_tensor, kernel_size, filters, base_name):
"""A block of two conv layers with an identity residual connection.
Args:
input_tensor: The input tensor for the residual block.
kernel_size: An integer specifying the kernel size of the convolutional
layers in the residual blocks.
filters: A list of two integers specifying the filters of the conv layers in
the residual blocks. The first integer specifies the number of filters on
the first conv layer within each residual block, the second applies to the
remaining conv layers within each block.
base_name: A string used to generate layer names.
Returns:
The output tensor of the residual block evaluated at the input tensor.
"""
filters1, filters2 = filters
x = tf.keras.layers.Conv2D(
filters1,
kernel_size,
padding='same',
use_bias=False,
name='{}_conv_1'.format(base_name))(
input_tensor)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
filters2,
kernel_size,
padding='same',
use_bias=False,
name='{}_conv_2'.format(base_name))(
x)
x = tf.keras.layers.add([x, input_tensor])
x = tf.keras.layers.Activation('relu')(x)
return x
def _conv_residual_block(input_tensor,
kernel_size,
filters,
base_name,
strides=(2, 2)):
"""A block of two conv layers with a convolutional residual connection.
Args:
input_tensor: The input tensor for the residual block.
kernel_size: An integer specifying the kernel size of the convolutional
layers in the residual blocks.
filters: A list of two integers specifying the filters of the conv layers in
the residual blocks. The first integer specifies the number of filters on
the first conv layer within each residual block, the second applies to the
remaining conv layers within each block.
base_name: A string used to generate layer names.
strides: A tuple of integers specifying the strides lengths in the first
conv layer in the block.
Returns:
The output tensor of the residual block evaluated at the input tensor.
"""
filters1, filters2 = filters
x = tf.keras.layers.Conv2D(
filters1,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
name='{}_conv_1'.format(base_name))(
input_tensor)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
filters2,
kernel_size,
padding='same',
use_bias=False,
name='{}_conv_2'.format(base_name))(
x)
shortcut = tf.keras.layers.Conv2D(
filters2, (1, 1),
strides=strides,
use_bias=False,
name='{}_conv_shortcut'.format(base_name))(
input_tensor)
x = tf.keras.layers.add([x, shortcut])
x = tf.keras.layers.Activation('relu')(x)
return x
def _resnet_block(input_tensor,
size,
kernel_size,
filters,
stage,
conv_strides=(2, 2)):
"""A block which applies multiple residual blocks to a given input.
The resnet block applies a single conv residual block followed by multiple
identity residual blocks to a given input.
Args:
input_tensor: The input tensor for the resnet block.
size: An integer specifying the number of residual blocks. A conv residual
block is applied once, followed by (size - 1) identity residual blocks.
kernel_size: An integer specifying the kernel size of the convolutional
layers in the residual blocks.
filters: A list of two integers specifying the filters of the conv layers in
the residual blocks. The first integer specifies the number of filters on
the first conv layer within each residual block, the second applies to the
remaining conv layers within each block.
stage: An integer representing the the position of the resnet block within
the resnet. Used for generating layer names.
conv_strides: A tuple of integers specifying the strides in the first conv
layer within each conv residual block.
Returns:
The output tensor of the resnet block evaluated at the input tensor.
"""
x = _conv_residual_block(
input_tensor,
kernel_size,
filters,
base_name='res_{}_block_0'.format(stage),
strides=conv_strides)
for i in range(size - 1):
x = _residual_block(
x,
kernel_size,
filters,
base_name='res_{}_block_{}'.format(stage, i + 1))
return x
def create_resnet(num_blocks=5, only_digits=True):
"""Instantiates a ResNet model for EMNIST classification.
Instantiates the ResNet architecture from https://arxiv.org/abs/1512.03385.
The ResNet contains 3 stages of ResNet blocks with each block containing one
conv residual block followed by (num_blocks - 1) idenity residual blocks. Each
residual block has 2 convolutional layers. With the input convolutional
layer and the final dense layer, this brings the total number of trainable
layers in the network to (6*num_blocks + 2). This number is often used to
identify the ResNet, so for example ResNet56 has num_blocks = 9.
Args:
num_blocks: An integer representing the number of residual blocks within
each ResNet block.
only_digits: A boolean that determines whether to only use the digits in
EMNIST, or the full EMNIST-62 dataset. If True, uses a final layer with 10
outputs, for use with the digit-only EMNIST dataset. If False, uses 62
outputs for the larger dataset.
Returns:
A `tf.keras.Model`.
"""
num_classes = 10 if only_digits else 62
target_shape = (28, 28, 1)
img_input = tf.keras.layers.Input(shape=(28 * 28,))
x = img_input
x = tf.keras.layers.Reshape(
target_shape=target_shape, input_shape=(28 * 28,))(
x)
x = tf.keras.layers.ZeroPadding2D(padding=(1, 1), name='initial_pad')(x)
x = tf.keras.layers.Conv2D(
16, (3, 3),
strides=(1, 1),
padding='valid',
use_bias=False,
name='initial_conv')(
x)
x = tf.keras.layers.Activation('relu')(x)
x = _resnet_block(
x,
size=num_blocks,
kernel_size=3,
filters=[16, 16],
stage=2,
conv_strides=(1, 1))
x = _resnet_block(
x,
size=num_blocks,
kernel_size=3,
filters=[32, 32],
stage=3,
conv_strides=(2, 2))
x = _resnet_block(
x,
size=num_blocks,
kernel_size=3,
filters=[64, 64],
stage=4,
conv_strides=(2, 2))
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
num_classes,
activation=tf.nn.softmax,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name='fully_connected')(
x)
inputs = img_input
model = tf.keras.models.Model(
inputs, x, name='resnet{}'.format(6 * num_blocks + 2))
return model
| 34.873171
| 80
| 0.665268
| 1,795
| 14,298
| 4.577716
| 0.159889
| 0.040039
| 0.056955
| 0.027261
| 0.632226
| 0.574054
| 0.531703
| 0.504077
| 0.462212
| 0.450164
| 0
| 0.036658
| 0.234928
| 14,298
| 409
| 81
| 34.958435
| 0.714508
| 0.574486
| 0
| 0.558011
| 0
| 0
| 0.037308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038674
| false
| 0
| 0.01105
| 0
| 0.088398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9522282432e0e76392916180e81134140fe248cd
| 893
|
py
|
Python
|
iterdeciser/loader.py
|
mpavlase/responses-form-evaluator
|
d0066a44c078ece458ae44577afc207583116638
|
[
"MIT"
] | 1
|
2020-02-19T00:39:10.000Z
|
2020-02-19T00:39:10.000Z
|
iterdeciser/loader.py
|
mpavlase/responses-form-evaluator
|
d0066a44c078ece458ae44577afc207583116638
|
[
"MIT"
] | null | null | null |
iterdeciser/loader.py
|
mpavlase/responses-form-evaluator
|
d0066a44c078ece458ae44577afc207583116638
|
[
"MIT"
] | null | null | null |
import csv
from iterdeciser import models
def data_loader(filename):
with open(filename, newline='') as fd:
reader = csv.reader(fd, delimiter=',', quotechar='"')
# remove all previous entries
models.Answer.objects.all().delete()
models.Question.objects.all().delete()
models.Response.objects.all().delete()
header = next(reader)
questions = []
for question in header:
q = models.Question(title=question)
q.save()
questions.append(q)
for row in reader:
response = models.Response()
response.save()
for index, column in enumerate(row):
answer = models.Answer()
answer.title = column
answer.question = questions[index]
answer.response = response
answer.save()
| 27.060606
| 61
| 0.555431
| 89
| 893
| 5.561798
| 0.438202
| 0.060606
| 0.09697
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.339306
| 893
| 32
| 62
| 27.90625
| 0.838983
| 0.030235
| 0
| 0
| 0
| 0
| 0.002315
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
95258effa24ad7ea4b397bc2159a4af1349e68bd
| 6,146
|
py
|
Python
|
adapter.py
|
jain-harshil/Adapter-BERT
|
fd74ed0eea21b13034f9a834244191846de6b8d5
|
[
"Apache-2.0"
] | 4
|
2021-03-14T23:02:14.000Z
|
2022-02-14T10:10:12.000Z
|
adapter.py
|
jain-harshil/Adapter-BERT
|
fd74ed0eea21b13034f9a834244191846de6b8d5
|
[
"Apache-2.0"
] | null | null | null |
adapter.py
|
jain-harshil/Adapter-BERT
|
fd74ed0eea21b13034f9a834244191846de6b8d5
|
[
"Apache-2.0"
] | 2
|
2020-10-12T09:04:55.000Z
|
2021-11-13T03:54:55.000Z
|
import torch
from torch import nn
from transformers.modeling_bert import BertIntermediate, BertOutput, BertLayer, BertEncoder, BertModel, BertForSequenceClassification
def get_nonlin_func(nonlin):
if nonlin == "tanh":
return torch.tanh
elif nonlin == "relu":
return torch.relu
elif nonlin == "gelu":
return nn.functional.gelu
elif nonlin == "sigmoid":
return torch.sigmoid
else:
raise ValueError("Unsupported nonlinearity!")
### Bottleneck Adapter
class BottleneckAdapterLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.adapter_input_size = config.hidden_size
self.adapter_latent_size = config.adapter_latent_size
self.non_linearity = get_nonlin_func(config.adapter_non_linearity)
self.residual = config.adapter_residual
# down projection
self.down_proj = nn.Linear(self.adapter_input_size, self.adapter_latent_size)
# up projection
self.up_proj = nn.Linear(self.adapter_latent_size, self.adapter_input_size)
self.init_weights()
def init_weights(self):
""" Initialize the weights -> so that initially we the whole Adapter layer is a near-identity function """
self.down_proj.weight.data.normal_(mean=0.0, std=0.02)
self.down_proj.bias.data.zero_()
self.up_proj.weight.data.normal_(mean=0.0, std=0.02)
self.up_proj.bias.data.zero_()
def forward(self, x):
output = self.up_proj(self.non_linearity(self.down_proj(x)))
if self.residual:
output = x + output
return output
### BERT
class AdapterBertIntermediate(BertIntermediate):
def __init__(self, config, layer_index):
super().__init__(config)
self.add_adapter = layer_index in config.layers_to_adapt and config.add_intermediate_adapter
if self.add_adapter:
self.intermediate_adapter = BottleneckAdapterLayer(config)
def forward(self, hidden_states):
# adapter extension
if self.add_adapter:
hidden_states = self.intermediate_adapter(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class AdapterBertOutput(BertOutput):
def __init__(self, config, layer_index):
super().__init__(config)
self.add_adapter = layer_index in config.layers_to_adapt
if self.add_adapter:
self.output_adapter = BottleneckAdapterLayer(config)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
# adapter extension
if self.add_adapter:
hidden_states = self.output_adapter(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class AdapterBertLayer(BertLayer):
def __init__(self, config, layer_index):
super().__init__(config)
self.intermediate = AdapterBertIntermediate(config, layer_index)
self.output = AdapterBertOutput(config, layer_index)
class AdapterBertEncoder(BertEncoder):
def __init__(self, config):
super().__init__(config)
self.layer = nn.ModuleList([AdapterBertLayer(config, i) for i in range(config.num_hidden_layers)])
class AdapterBertModel(BertModel):
def __init__(self, config):
super().__init__(config)
self.encoder = AdapterBertEncoder(config)
self.freeze_original_params(config)
def freeze_original_params(self, config):
for param in self.parameters():
param.requires_grad = False
for i in range(config.num_hidden_layers):
if i in config.layers_to_adapt:
for param in self.encoder.layer[i].intermediate.intermediate_adapter.parameters():
param.requires_grad = True
for param in self.encoder.layer[i].output.output_adapter.parameters():
param.requires_grad = True
def unfreeze_original_params(self, config):
for param in self.parameters():
param.requires_grad = True
class AdapterBertForSequenceClassification(BertForSequenceClassification):
def __init__(self, config):
super().__init__(config)
self.bert = AdapterBertModel(config)
self.bert.unfreeze_original_params(config)
### Parallel Adapter
class ParallelAdapterBertModel(BertModel):
def __init__(self, config):
super().__init__(config)
# parallel, adapter-BERT
self.parabert = BertModel(config.parabert_config)
# freezing the pre-trained BERT
self.freeze_original_params()
def freeze_original_params(self):
for param in self.parameters():
param.requires_grad = False
for param in self.parabert.parameters():
param.requires_grad = True
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
outputs_main = super().forward(input_ids, attention_mask, token_type_ids)
outputs_adapter = self.parabert(input_ids, attention_mask, token_type_ids)
outs_cls = []
outs_cls.append(outputs_main[1])
outs_cls.append(outputs_adapter[1])
concat_cls = torch.cat(outs_cls, dim = 1)
outs_tok = []
outs_tok.append(outputs_main[0])
outs_tok.append(outputs_adapter[0])
concat_tok = torch.cat(outs_tok, dim = 2)
outputs = (concat_tok, concat_cls)
return outputs
class ParallelAdapterBertForSequenceClassification(BertForSequenceClassification):
def __init__(self, config):
super().__init__(config)
self.bert = ParallelAdapterBertModel(config)
self.classifier = nn.Linear(config.hidden_size + config.parabert_config.hidden_size, self.config.num_labels)
### XLM-R
| 35.94152
| 134
| 0.678653
| 704
| 6,146
| 5.606534
| 0.208807
| 0.057765
| 0.025082
| 0.038764
| 0.422346
| 0.365087
| 0.31619
| 0.285787
| 0.211553
| 0.186724
| 0
| 0.003399
| 0.234136
| 6,146
| 171
| 135
| 35.94152
| 0.835139
| 0.04328
| 0
| 0.267717
| 0
| 0
| 0.007512
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.141732
| false
| 0
| 0.023622
| 0
| 0.299213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
95277c92e91076992bcacdf611aab098dd6f15f0
| 3,837
|
py
|
Python
|
models/pixelpick/networks/deeplab.py
|
martafdezmAM/lessen_supervision
|
630dfea2e396b9b6f61a3ad6786bb3ee169da3fd
|
[
"MIT"
] | 49
|
2021-04-08T07:45:13.000Z
|
2022-03-08T03:20:30.000Z
|
networks/deeplab.py
|
leiyu1980/PixelPick
|
f0ae7d35f62c1dda70f5bff1689177a513ab6259
|
[
"MIT"
] | 5
|
2021-04-21T02:13:47.000Z
|
2022-03-30T12:06:36.000Z
|
networks/deeplab.py
|
leiyu1980/PixelPick
|
f0ae7d35f62c1dda70f5bff1689177a513ab6259
|
[
"MIT"
] | 15
|
2021-04-14T01:15:06.000Z
|
2022-03-25T05:05:36.000Z
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from .aspp import ASPP
from .decoders import SegmentHead
from .mobilenet_v2 import MobileNetV2
class DeepLab(nn.Module):
def __init__(self,
args,
backbone='mobilenet',
output_stride=16):
super(DeepLab, self).__init__()
self.backbone = MobileNetV2(output_stride, nn.BatchNorm2d, mc_dropout=args.use_mc_dropout)
self.aspp = ASPP(backbone, output_stride, nn.BatchNorm2d)
# low level features
low_level_inplanes = 24
self.low_level_conv = nn.Sequential(nn.Conv2d(low_level_inplanes, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU())
# segment
self.seg_head = SegmentHead(args)
self.return_features = False
self.return_attention = False
def turn_on_dropout(self):
for m in self.modules():
if isinstance(m, torch.nn.Dropout):
m.train()
def turn_off_dropout(self):
for m in self.modules():
if isinstance(m, torch.nn.Dropout):
m.eval()
def forward(self, inputs):
backbone_feat, low_level_feat = self.backbone(inputs) # 1/16, 1/4;
x = self.aspp(backbone_feat) # 1/16 -> aspp -> 1/16
# low + high features
low_level_feat_ = self.low_level_conv(low_level_feat) # 256->48
x = F.interpolate(x, size=low_level_feat_.size()[2:], mode='bilinear', align_corners=True) # 1/4
second_to_last_features = torch.cat((x, low_level_feat_), dim=1) # 304 = 256 + 48
# segment
dict_outputs = self.seg_head(second_to_last_features)
pred = F.interpolate(dict_outputs['pred'], size=inputs.size()[2:], mode='bilinear', align_corners=True)
dict_outputs['pred'] = pred
emb = F.interpolate(dict_outputs['emb'], size=inputs.size()[2:], mode='bilinear', align_corners=True)
dict_outputs['emb'] = emb
return dict_outputs
def set_return_features(self, return_features): # True or False
self.return_features = return_features
def set_return_attention(self, return_attention): # True or False
self.return_attention = return_attention
def get_1x_lr_params(self):
modules = [self.backbone]
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], (nn.Conv2d, nn.BatchNorm2d)):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_10x_lr_params(self):
modules = [self.aspp, self.low_level_conv, self.seg_head]
if self.with_mask:
modules.append(self.mask_head)
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], (nn.Conv2d, nn.BatchNorm2d)):
for p in m[1].parameters():
if p.requires_grad:
yield p
def load_pretrain(self, pretrained):
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained, map_location='cpu')['state_dict']
print('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()} # 不加载最后的 head 参数
# for k, v in pretrained_dict.items():
# print('=> loading {} | {}'.format(k, v.size()))
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
else:
print('No such file {}'.format(pretrained))
| 37.990099
| 111
| 0.584832
| 479
| 3,837
| 4.473904
| 0.265136
| 0.041064
| 0.027998
| 0.037331
| 0.299113
| 0.258049
| 0.258049
| 0.218385
| 0.218385
| 0.218385
| 0
| 0.021421
| 0.306489
| 3,837
| 101
| 112
| 37.990099
| 0.783916
| 0.063852
| 0
| 0.213333
| 0
| 0
| 0.029346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.093333
| 0
| 0.24
| 0.026667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
95293f8eba3bae03a2ebdf267114cb3e46a7731e
| 2,468
|
py
|
Python
|
readthedocs/worker.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,054
|
2015-01-01T00:58:07.000Z
|
2019-06-28T05:50:49.000Z
|
readthedocs/worker.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,282
|
2015-01-01T21:38:49.000Z
|
2019-06-28T15:41:00.000Z
|
readthedocs/worker.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 3,224
|
2015-01-01T07:38:45.000Z
|
2019-06-28T09:19:10.000Z
|
"""Celery worker application instantiation."""
import os
from celery import Celery
from django.conf import settings
from django_structlog.celery.steps import DjangoStructLogInitStep
def create_application():
"""Create a Celery application using Django settings."""
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'readthedocs.settings.dev',
)
application = Celery(settings.CELERY_APP_NAME)
application.config_from_object('django.conf:settings')
application.autodiscover_tasks(None)
# A step to initialize django-structlog
application.steps['worker'].add(DjangoStructLogInitStep)
return application
def register_renamed_tasks(application, renamed_tasks):
"""
Register renamed tasks into Celery registry.
When a task is renamed (changing the function's name or moving it to a
different module) and there are old instances running in production, they
will trigger tasks using the old name. However, the new instances won't
have those tasks registered.
This function re-register the new tasks under the old name to workaround
this problem. New instances will then executed the code for the new task,
but when called under the old name.
This function *must be called after renamed tasks with new names were
already registered/load by Celery*.
When using this function, think about the order the ASG will be deployed.
Deploying webs first will require some type of re-register and deploying
builds may require a different one.
A good way to test this locally is with a code similar to the following:
In [1]: # Register a task with the old name
In [2]: @app.task(name='readthedocs.projects.tasks.update_docs_task')
...: def mytask(*args, **kwargs):
...: return True
...:
In [3]: # Trigger the task
In [4]: mytask.apply_async([99], queue='build:default')
In [5]: # Check it's executed by the worker with the new code
:param application: Celery Application
:param renamed_tasks: Mapping containing the old name of the task as its
and the new name as its value.
:type renamed_tasks: dict
:type application: celery.Celery
:returns: Celery Application
"""
for oldname, newname in renamed_tasks.items():
application.tasks[oldname] = application.tasks[newname]
return application
app = create_application() # pylint: disable=invalid-name
| 32.473684
| 77
| 0.715559
| 333
| 2,468
| 5.246246
| 0.423423
| 0.048082
| 0.02862
| 0.017172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003603
| 0.212723
| 2,468
| 75
| 78
| 32.906667
| 0.895522
| 0.640194
| 0
| 0.105263
| 0
| 0
| 0.095745
| 0.06117
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
952e3eae671c4397df0072361e08791772e8f4d1
| 5,401
|
py
|
Python
|
src/lib/Server/Reports/settings.py
|
pcmxgti/bcfg2
|
33aaf9c6bbeb0d20eef084b1347a0fce42086663
|
[
"mpich2"
] | null | null | null |
src/lib/Server/Reports/settings.py
|
pcmxgti/bcfg2
|
33aaf9c6bbeb0d20eef084b1347a0fce42086663
|
[
"mpich2"
] | null | null | null |
src/lib/Server/Reports/settings.py
|
pcmxgti/bcfg2
|
33aaf9c6bbeb0d20eef084b1347a0fce42086663
|
[
"mpich2"
] | null | null | null |
import django
import sys
# Compatibility import
from Bcfg2.Bcfg2Py3k import ConfigParser
# Django settings for bcfg2 reports project.
c = ConfigParser.ConfigParser()
if len(c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])) == 0:
raise ImportError("Please check that bcfg2.conf or bcfg2-web.conf exists "
"and is readable by your web server.")
try:
DEBUG = c.getboolean('statistics', 'web_debug')
except:
DEBUG = False
if DEBUG:
print("Warning: Setting web_debug to True causes extraordinary memory "
"leaks. Only use this setting if you know what you're doing.")
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Root', 'root'),
)
MANAGERS = ADMINS
try:
db_engine = c.get('statistics', 'database_engine')
except ConfigParser.NoSectionError:
e = sys.exc_info()[1]
raise ImportError("Failed to determine database engine: %s" % e)
db_name = ''
if c.has_option('statistics', 'database_name'):
db_name = c.get('statistics', 'database_name')
if db_engine == 'sqlite3' and db_name == '':
db_name = "%s/etc/brpt.sqlite" % c.get('server', 'repository')
DATABASES = {
'default': {
'ENGINE': "django.db.backends.%s" % db_engine,
'NAME': db_name
}
}
if db_engine != 'sqlite3':
DATABASES['default']['USER'] = c.get('statistics', 'database_user')
DATABASES['default']['PASSWORD'] = c.get('statistics', 'database_password')
DATABASES['default']['HOST'] = c.get('statistics', 'database_host')
try:
DATABASES['default']['PORT'] = c.get('statistics', 'database_port')
except: # An empty string tells Django to use the default port.
DATABASES['default']['PORT'] = ''
if django.VERSION[0] == 1 and django.VERSION[1] < 2:
DATABASE_ENGINE = db_engine
DATABASE_NAME = DATABASES['default']['NAME']
if DATABASE_ENGINE != 'sqlite3':
DATABASE_USER = DATABASES['default']['USER']
DATABASE_PASSWORD = DATABASES['default']['PASSWORD']
DATABASE_HOST = DATABASES['default']['HOST']
DATABASE_PORT = DATABASES['default']['PORT']
# Local time zone for this installation. All choices can be found here:
# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
try:
TIME_ZONE = c.get('statistics', 'time_zone')
except:
if django.VERSION[0] == 1 and django.VERSION[1] > 2:
TIME_ZONE = None
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/site_media'
if c.has_option('statistics', 'web_prefix'):
MEDIA_URL = c.get('statistics', 'web_prefix').rstrip('/') + MEDIA_URL
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'Bcfg2.Server.Reports.urls'
# Authentication Settings
# Use NIS authentication backend defined in backends.py
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
'Bcfg2.Server.Reports.backends.NISBackend')
# The NIS group authorized to login to BCFG2's reportinvg system
AUTHORIZED_GROUP = ''
#create login url area:
try:
import django.contrib.auth
except ImportError:
raise ImportError('Import of Django module failed. Is Django installed?')
django.contrib.auth.LOGIN_URL = '/login'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
'/usr/share/python-support/python-django/django/contrib/admin/templates/',
'Bcfg2.Server.Reports.reports'
)
if django.VERSION[0] == 1 and django.VERSION[1] < 2:
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request'
)
else:
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'Bcfg2.Server.Reports.reports'
)
| 33.339506
| 79
| 0.695797
| 682
| 5,401
| 5.394428
| 0.36217
| 0.042403
| 0.041587
| 0.066051
| 0.159011
| 0.135635
| 0.135635
| 0.135635
| 0.135635
| 0.135635
| 0
| 0.010925
| 0.169598
| 5,401
| 161
| 80
| 33.546584
| 0.809365
| 0.210887
| 0
| 0.196429
| 0
| 0.008929
| 0.446412
| 0.241265
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.017857
| 0.071429
| 0
| 0.071429
| 0.008929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9532e0a3625fbfa97cee2a3c1c1ac08b02e54bbb
| 1,297
|
py
|
Python
|
legacy/lua_data/lua_data_converter.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | 1
|
2021-09-21T01:42:05.000Z
|
2021-09-21T01:42:05.000Z
|
legacy/lua_data/lua_data_converter.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | null | null | null |
legacy/lua_data/lua_data_converter.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | null | null | null |
from slpp import slpp as lua
import json
class LuaConverter:
def parse(self, luafile):
with open(luafile, 'r') as to_convert:
to_convert = str(to_convert.read())
to_convert = to_convert.replace('data:extend(\n{\n {', '').replace('})\n', '') # slpp가 알아먹을수 있는 형태로 가공
to_convert = to_convert.replace(' },\n\n', ' },\n') # 불규칙적으로 두 칸 띄운 경우가 있음.
item_info_list = to_convert.split('\n },\n {')
returndict = {}
for each_item in item_info_list: # 아이템별로 따로 반복
each_item = ' {' + each_item + '\n },'
each_item_dict = lua.decode(each_item) # lua 데이터 변환 라이브러리 slpp 사용
returndict[each_item_dict['name']] = each_item_dict # 딕셔너리 하위에 slpp가 return한 딕셔너리 삽입
return returndict
def write(self, infile, outfile):
towrite = json.dumps(self.parse(infile), sort_keys=False, indent=4)
towrite = infile.replace('.lua', '') + '_info = ' + towrite + '\n'
towrite = towrite.replace('true', 'True').replace('false', 'False')
outfilefulld = '../data/' + outfile
with open(outfilefulld, 'w') as outf:
outf.write(towrite)
print(infile + ' converted to ' + outfilefulld)
'''
사용법
lc=LuaConverter()
lc.write('fluid.lua','fluid_dict.py')
'''
| 36.027778
| 112
| 0.591365
| 168
| 1,297
| 4.416667
| 0.458333
| 0.097035
| 0.044474
| 0.072776
| 0.067385
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001038
| 0.257517
| 1,297
| 35
| 113
| 37.057143
| 0.76947
| 0.085582
| 0
| 0
| 0
| 0
| 0.106211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.086957
| 0
| 0.26087
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9533f3d3d51a5a32d60d0e2337d926980cff5177
| 839
|
py
|
Python
|
odette/scripts/collect_iso_codes.py
|
mdelhoneux/oDETTE
|
1b09bb3a950eb847c409de48c466d6559a010bd8
|
[
"Unlicense"
] | 2
|
2017-04-18T13:31:37.000Z
|
2017-07-12T21:00:10.000Z
|
odette/scripts/collect_iso_codes.py
|
mdelhoneux/oDETTE
|
1b09bb3a950eb847c409de48c466d6559a010bd8
|
[
"Unlicense"
] | null | null | null |
odette/scripts/collect_iso_codes.py
|
mdelhoneux/oDETTE
|
1b09bb3a950eb847c409de48c466d6559a010bd8
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
#==============================================================================
#author :Miryam de Lhoneux
#email :miryam.de_lhoneux@lingfil.uu.se
#date :2015/12/30
#version :1.0
#description :collect iso codes in UD directories
#usage :python scripts/collect_iso_codes.py
#Python version :2.7.6
#==============================================================================
import os
import sys
import pprint
#generate a dictionary of iso_codes from ud treebank directory
codes = {}
ud_dir = sys.argv[1]
for language in os.listdir(ud_dir):
ldir = ud_dir + "/" + language
for f in os.listdir(ldir):
if len(f.split(".")) >1 and f.split(".")[1] == "conllu":
iso_code = f.split("-")[0]
codes[language] = iso_code
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(codes)
| 28.931034
| 79
| 0.54112
| 107
| 839
| 4.158879
| 0.560748
| 0.053933
| 0.067416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025496
| 0.158522
| 839
| 28
| 80
| 29.964286
| 0.604816
| 0.533969
| 0
| 0
| 0
| 0
| 0.026316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
20f86d70eb09a90cb1a4b918de25a5f97e226d8c
| 5,696
|
py
|
Python
|
airtest/core/ios/mjpeg_cap.py
|
Cache-Cloud/Airtest
|
4f831977a32c2b120dee631631c1154407b34d32
|
[
"Apache-2.0"
] | null | null | null |
airtest/core/ios/mjpeg_cap.py
|
Cache-Cloud/Airtest
|
4f831977a32c2b120dee631631c1154407b34d32
|
[
"Apache-2.0"
] | null | null | null |
airtest/core/ios/mjpeg_cap.py
|
Cache-Cloud/Airtest
|
4f831977a32c2b120dee631631c1154407b34d32
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
import socket
import traceback
from airtest import aircv
from airtest.utils.snippet import reg_cleanup, on_method_ready, ready_method
from airtest.core.ios.constant import ROTATION_MODE, DEFAULT_MJPEG_PORT
from airtest.utils.logger import get_logger
from airtest.utils.safesocket import SafeSocket
LOGGING = get_logger(__name__)
class SocketBuffer(SafeSocket):
def __init__(self, sock: socket.socket):
super(SocketBuffer, self).__init__(sock)
def _drain(self):
_data = self.sock.recv(1024)
if _data is None or _data == b"":
raise IOError("socket closed")
self.buf += _data
return len(_data)
def read_until(self, delimeter: bytes) -> bytes:
""" return without delimeter """
while True:
index = self.buf.find(delimeter)
if index != -1:
_return = self.buf[:index]
self.buf = self.buf[index + len(delimeter):]
return _return
self._drain()
def read_bytes(self, length: int) -> bytes:
while length > len(self.buf):
self._drain()
_return, self.buf = self.buf[:length], self.buf[length:]
return _return
def write(self, data: bytes):
return self.sock.sendall(data)
class MJpegcap(object):
def __init__(self, instruct_helper=None, ip='localhost', port=None, ori_function=None):
self.instruct_helper = instruct_helper
self.port = int(port or DEFAULT_MJPEG_PORT)
self.ip = ip
# 如果指定了port,说明已经将wda的9100端口映射到了新端口,无需本地重复映射
self.port_forwarding = True if self.port == DEFAULT_MJPEG_PORT and ip in ('localhost', '127.0.0.1') else False
self.ori_function = ori_function
self.sock = None
self.buf = None
self._is_running = False
@ready_method
def setup_stream_server(self):
if self.port_forwarding:
self.port, _ = self.instruct_helper.setup_proxy(9100)
self.init_sock()
reg_cleanup(self.teardown_stream)
def init_sock(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.ip, self.port))
self.buf = SocketBuffer(self.sock)
self.buf.write(b"GET / HTTP/1.0\r\nHost: localhost\r\n\r\n")
self.buf.read_until(b'\r\n\r\n')
self._is_running = True
LOGGING.info("mjpegsock is ready")
except ConnectionResetError:
# 断开tidevice或是拔线,会导致这个异常,直接退出即可
LOGGING.error("mjpegsock connection error")
raise
@on_method_ready('setup_stream_server')
def get_frame_from_stream(self):
if self._is_running is False:
self.init_sock()
try:
while True:
line = self.buf.read_until(b'\r\n')
if line.startswith(b"Content-Length"):
length = int(line.decode('utf-8').split(": ")[1])
break
while True:
if self.buf.read_until(b'\r\n') == b'':
break
imdata = self.buf.read_bytes(length)
return imdata
except IOError:
# 如果暂停获取mjpegsock的数据一段时间,可能会导致它断开,这里将self.buf关闭并临时返回黑屏图像
# 等待下一次需要获取屏幕时,再进行重连
LOGGING.debug("mjpegsock is closed")
self._is_running = False
self.buf.close()
return self.get_blank_screen()
def get_frame(self):
# 获得单张屏幕截图
return self.get_frame_from_stream()
def snapshot(self, ensure_orientation=True, *args, **kwargs):
"""
Take a screenshot and convert it into a cv2 image object
获取一张屏幕截图,并转化成cv2的图像对象
!!! 注意,该方法拿到的截图可能不是队列中最新的,除非一直在消费队列中的图像,否则可能会是过往图像内容,请谨慎使用
Args:
ensure_orientation: True or False whether to keep the orientation same as display
Returns: numpy.ndarray
"""
screen = self.get_frame_from_stream()
try:
screen = aircv.utils.string_2_img(screen)
except Exception:
# may be black/locked screen or other reason, print exc for debugging
traceback.print_exc()
return None
if ensure_orientation:
if self.ori_function:
display_info = self.ori_function()
orientation = next(key for key, value in ROTATION_MODE.items() if value == display_info["orientation"])
screen = aircv.rotate(screen, -orientation, clockwise=False)
return screen
def get_blank_screen(self):
"""
生成一个黑屏图像,在连接失效时代替屏幕画面返回
Returns:
"""
if self.ori_function:
display_info = self.ori_function()
width, height = display_info['width'], display_info['height']
if display_info["orientation"] in [90, 270]:
width, height = height, width
else:
width, height = 1080, 1920
img = numpy.zeros((width, height, 3)).astype('uint8')
img_string = aircv.utils.img_2_string(img)
return img_string
def teardown_stream(self):
if self.port_forwarding:
self.instruct_helper.remove_proxy(self.port)
if self.buf:
self.buf.close()
self.port = None
if __name__ == "__main__":
import wda
from airtest.core.ios.instruct_cmd import InstructHelper
addr = "http://localhost:8100"
driver = wda.Client(addr)
info = driver.info
instruct_helper = InstructHelper(info['uuid'])
mjpeg_server = MJpegcap(instruct_helper)
print(len(mjpeg_server.get_frame()))
| 33.309942
| 119
| 0.607619
| 673
| 5,696
| 4.945022
| 0.300149
| 0.039964
| 0.022536
| 0.01262
| 0.076022
| 0.059796
| 0.042969
| 0.025841
| 0.025841
| 0
| 0
| 0.011677
| 0.293364
| 5,696
| 171
| 120
| 33.309942
| 0.815155
| 0.102001
| 0
| 0.196721
| 0
| 0.008197
| 0.054135
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106557
| false
| 0
| 0.081967
| 0.016393
| 0.286885
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
20fa7eb3a7346661e1dcc5a7aa474c9102b7df4b
| 3,342
|
py
|
Python
|
happy.py
|
xiaoqcn/LearnLinuxViaPython
|
3c591471bbceefab44161aedb8ff67c2009b8ec0
|
[
"Apache-2.0"
] | null | null | null |
happy.py
|
xiaoqcn/LearnLinuxViaPython
|
3c591471bbceefab44161aedb8ff67c2009b8ec0
|
[
"Apache-2.0"
] | null | null | null |
happy.py
|
xiaoqcn/LearnLinuxViaPython
|
3c591471bbceefab44161aedb8ff67c2009b8ec0
|
[
"Apache-2.0"
] | null | null | null |
import time
import datetime
import os
import sys
import atexit
import signal
from multiprocessing import Pool
from threading import Thread
class HappyScrum:
def __init__(
self,
pid_path,
pool_size=4,
busy_wait=90,
idle_wait=300,
say_hi_wait=1800,
is_debug=False,
):
self.pid_path = pid_path
self.busy_wait = busy_wait
self.idle_wait = idle_wait
self.say_hi_wait = say_hi_wait
self.exception_wait = 300
self.pool_size = pool_size
self.is_debug = is_debug
if self.is_debug:
self.busy_wait = 5
self.idle_wait = 5
self.say_hi_wait = 8
self.round = 0
self.is_busy = True
self.born_utc = datetime.datetime.utcnow()
self.born = datetime.datetime.now()
self.daemon_t = Thread(target=self.sen, daemon=True)
self.dev = lambda x: x
self.po = lambda x: x
def sen(self):
while True:
time.sleep(self.say_hi_wait)
if self.round >= 10000:
print(
f"-DOG [{os.getpid()}]:", datetime.datetime.now(), file=sys.stderr
)
self.round = 0
def run_forever(self):
if os.path.exists(self.pid_path):
raise ValueError(f"pid_file已存在: {PID_FILE}")
with open(self.pid_path, mode="w", encoding="utf-8") as f:
f.write(str(os.getpid()))
print(
f"==================\nMAIN [{os.getpid()}]: 启动", file=sys.stderr, flush=True
)
self.daemon_t.start()
while True:
self.round += 1
try:
self.run_round()
except Exception as ex:
print(
f"MAIN [{os.getpid()}]: HS_ERR: {str(ex)}",
file=sys.stderr,
flush=True,
)
time.sleep(self.exception_wait)
def run_round(self):
if self.is_busy:
print(
f"MAIN [{os.getpid()}]: ROUND: {self.round} BUSY {datetime.datetime.now()}",
file=sys.stderr,
)
time.sleep(self.busy_wait)
else:
print(
f"MAIN [{os.getpid()}]: ROUND: {self.round} IDLE {datetime.datetime.now()}",
file=sys.stderr,
)
time.sleep(self.idle_wait)
_task_list = self.po()
if len(_task_list) == 0:
self.is_busy = False
return
self.do_work(_task_list)
def do_work(self, task_list):
_feature_list = []
_pool = Pool(self.pool_size)
for i in task_list:
_f = _pool.apply_async(self.dev, args=(i,))
_feature_list.append(_f)
_pool.close()
_pool.join()
for r in _feature_list:
print(f"MAIN[{os.getpid()}]: HS_DOD", r.get())
pass
def register_po(self, po_tpl):
self.po = po_tpl
def register_dev(self, dev_tpl):
self.dev = dev_tpl
@classmethod
def register_dispose(cls, func_dispose):
atexit.register(func_dispose)
signal.signal(signal.SIGTERM, func_dispose)
signal.signal(signal.SIGINT, func_dispose)
signal.signal(signal.SIGQUIT, func_dispose)
| 28.084034
| 92
| 0.529623
| 410
| 3,342
| 4.104878
| 0.282927
| 0.033274
| 0.026738
| 0.028521
| 0.212121
| 0.134284
| 0.091503
| 0.091503
| 0.053476
| 0
| 0
| 0.012099
| 0.356972
| 3,342
| 118
| 93
| 28.322034
| 0.771056
| 0
| 0
| 0.116505
| 0
| 0.019417
| 0.090963
| 0.022142
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07767
| false
| 0.009709
| 0.07767
| 0
| 0.174757
| 0.058252
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
20fa9357a93d7d86c13beaf0a8a806393d553ed4
| 526
|
py
|
Python
|
functional_tests/test_gallery.py
|
atypicalrobot/igor_personal_site
|
8fd788bc43884792b786abeb34e9fec9e79492f1
|
[
"MIT"
] | null | null | null |
functional_tests/test_gallery.py
|
atypicalrobot/igor_personal_site
|
8fd788bc43884792b786abeb34e9fec9e79492f1
|
[
"MIT"
] | null | null | null |
functional_tests/test_gallery.py
|
atypicalrobot/igor_personal_site
|
8fd788bc43884792b786abeb34e9fec9e79492f1
|
[
"MIT"
] | null | null | null |
from .base import *
class GalleryPageTests(SeleniumTestCase):
def test_gallery_items(self):
browser = self.browser
browser.get('http://127.0.0.1:8000/gallery/')
assert "we don't have any Galleries" not in browser.page_source
def test_gallery_images(self):
browser = self.browser
browser.get('http://127.0.0.1:8000/gallery/')
link = browser.find_element_by_tag_name("center")
link.click()
assert "No images are tagged" not in browser.page_source
| 29.222222
| 71
| 0.659696
| 72
| 526
| 4.680556
| 0.569444
| 0.130564
| 0.083086
| 0.130564
| 0.445104
| 0.31454
| 0.31454
| 0.31454
| 0.31454
| 0.31454
| 0
| 0.049261
| 0.228137
| 526
| 18
| 72
| 29.222222
| 0.780788
| 0
| 0
| 0.333333
| 0
| 0
| 0.214421
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
20fb6d839493dfeb4698c4e202a1cd7ca0226dba
| 784
|
py
|
Python
|
plates.py
|
winksaville/cq-plates
|
fb175522fae991a8d88cdf26afad273a4b8b9098
|
[
"MIT"
] | null | null | null |
plates.py
|
winksaville/cq-plates
|
fb175522fae991a8d88cdf26afad273a4b8b9098
|
[
"MIT"
] | null | null | null |
plates.py
|
winksaville/cq-plates
|
fb175522fae991a8d88cdf26afad273a4b8b9098
|
[
"MIT"
] | null | null | null |
import cadquery as cq # type: ignore
nd = 0.4 # Nozzle Diameter
length = 50
width = 20
gap = 5
p1 = (
cq.Workplane("XY", origin=(-(width + gap), 0, 0))
.rect(width, length)
.extrude(nd/2)
)
#show_object(p1)
p2 = (
cq.Workplane("XY", origin=(0, 0, 0))
.rect(width, length)
.extrude(nd)
)
#show_object(p2)
p3 = (
cq.Workplane("XY", origin=(width + gap, 0, 0))
.rect(width, length)
.extrude(nd * 2)
)
#show_object(p3)
# Combine the objects so they all can be slected and exported to stl
#
# Note: you must use .val() otherwise the following generates
# a "AttributeError: 'Workplane' object has no 'wapped'"
# all = cq.Compound.makeCompound([p1, p2, p3])
all = cq.Compound.makeCompound([p1.val(), p2.val(), p3.val()])
show_object(all)
| 21.189189
| 68
| 0.626276
| 118
| 784
| 4.127119
| 0.474576
| 0.016427
| 0.080082
| 0.117043
| 0.427105
| 0.316222
| 0.316222
| 0.262834
| 0.262834
| 0.262834
| 0
| 0.044944
| 0.205357
| 784
| 36
| 69
| 21.777778
| 0.736758
| 0.392857
| 0
| 0.136364
| 0
| 0
| 0.012903
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
20fe1adaa92216baa26b834b33664cd9c78ae67b
| 2,430
|
py
|
Python
|
tests/tonalmodel_tests/test_chromatic_scale.py
|
dpazel/music_rep
|
2f9de9b98b13df98f1a0a2120b84714725ce527e
|
[
"MIT"
] | 1
|
2021-05-06T19:45:54.000Z
|
2021-05-06T19:45:54.000Z
|
tests/tonalmodel_tests/test_chromatic_scale.py
|
dpazel/music_rep
|
2f9de9b98b13df98f1a0a2120b84714725ce527e
|
[
"MIT"
] | null | null | null |
tests/tonalmodel_tests/test_chromatic_scale.py
|
dpazel/music_rep
|
2f9de9b98b13df98f1a0a2120b84714725ce527e
|
[
"MIT"
] | null | null | null |
import unittest
import logging
from tonalmodel.chromatic_scale import ChromaticScale
class TestChromaticScale(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_frequencies(self):
assert is_close(ChromaticScale.get_frequency((4, 9)), 440.0), \
"Error A:4 = {0} should be 440.0".format(ChromaticScale.get_frequency((4, 9)))
assert is_close(ChromaticScale.get_frequency((4, 0)), 261.625565301), \
"Error C:4 = {0} should be 261.625565301".format(ChromaticScale.get_frequency((4, 0)))
def test_parse_chromatic_location(self):
for i in range(0, 12):
s = str(4) + ':' + str(i)
location = ChromaticScale.parse_notation(s)
assert location[0] == 4 and location[1] == i
def test_location_to_index(self):
for i in range(1, 4):
for j in range(0, 12):
index = ChromaticScale.location_to_index((i, j))
assert index == 12 * i + j
def test_index_to_location(self):
for i in range(12, 47):
location = ChromaticScale.index_to_location(i)
logging.info(location)
assert location[0] == i // 12 and location[1] == i % 12
def test_scale(self):
scale = ChromaticScale.get_chromatic_scale(ChromaticScale.parse_notation("0:9"),
ChromaticScale.parse_notation("8:0"))
start = ChromaticScale.location_to_index((0, 9))
end = ChromaticScale.location_to_index((8, 0)) + 1
for i in range(start, end):
logging.info('{0}{1} {1}'.format(i, ChromaticScale.index_to_location(i), scale[i - start]))
assert is_close(scale[ChromaticScale.location_to_index((4, 9)) - start], 440.0), \
"Error A:4 = {0} should be 440.0".format(scale[ChromaticScale.location_to_index((4, 9)) - start])
assert is_close(scale[ChromaticScale.location_to_index((4, 0)) - start], 261.625565301), \
"Error C:4 = {0} should be 261.625565301".format(scale[ChromaticScale.location_to_index((4, 0)) - start])
def is_close(value_a, value_b):
return abs(value_a - value_b) < 0.0001
def is_close_in_bounds(value_a, value_b, tolerance):
return abs(value_a - value_b) < tolerance
if __name__ == "__main__":
unittest.main()
| 38.571429
| 117
| 0.60535
| 317
| 2,430
| 4.44164
| 0.205047
| 0.011364
| 0.085227
| 0.144176
| 0.481534
| 0.365767
| 0.303267
| 0.246449
| 0.183239
| 0.183239
| 0
| 0.072799
| 0.270782
| 2,430
| 62
| 118
| 39.193548
| 0.721783
| 0
| 0
| 0.044444
| 0
| 0
| 0.068724
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 1
| 0.2
| false
| 0.044444
| 0.066667
| 0.044444
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
20feae08b04eeba7945d6473eedc0730006c75f9
| 3,093
|
py
|
Python
|
beeseyes/pycode/sampling.py
|
sosi-org/scientific-code
|
395bae0f95fbccb936dc01145c797dc22a1c99a0
|
[
"Unlicense"
] | null | null | null |
beeseyes/pycode/sampling.py
|
sosi-org/scientific-code
|
395bae0f95fbccb936dc01145c797dc22a1c99a0
|
[
"Unlicense"
] | null | null | null |
beeseyes/pycode/sampling.py
|
sosi-org/scientific-code
|
395bae0f95fbccb936dc01145c797dc22a1c99a0
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import math
import polygon_sampler
nan_rgb = np.zeros((3,)) + np.NaN
# sampler session: texture, W_,H_,W,H
'''
Used by `sample_colors_squarepixels()`
Samples a single point.
Using square pixels.
[0, ... ,W-1] (incl.)
By mapping [0,1) -> [0,W) (int)
(mapping u,v)
'''
def sample1(um,vm, texture, W_,H_,W,H):
if np.isnan(um) or np.isnan(vm):
rgb = nan_rgb
else:
# sample
py = math.floor(um * H_)
px = math.floor(vm * W_)
if px < 0 or py < 0 or px >= W or py >= H:
rgb = nan_rgb
else:
rgb = texture[py,px]
return rgb
'''
Simple sampler.
slow.
"Pixel at Centroid" sampler
One pixel is taken for each region
Uses `sample1`
if regions is None, a different irder is used
'''
def sample_colors_squarepixels(uv, regions, texture):
# print('uv.shape', uv.shape)
if texture.shape[2] == 4:
texture = texture[:,:, 0:3]
#print('uv', uv)
#print('regions', regions)
#exit()
EPS = 0.00000001
# (H,W) mmove to slow part.
(H,W) = texture.shape[0:2]
# print('W,H', W,H)
W_ = (W - EPS)
H_ = (H - EPS)
nf = len(regions)
uvm_for_debug = np.zeros((nf,2),dtype=float)
regions_rgb = np.zeros((nf,3),dtype=float)
for i in range(nf):
# temporary solution: sample at center only
#if np.isnan(uv[regions[i], 0]):
um = np.mean(uv[regions[i], 0])
vm = np.mean(uv[regions[i], 1])
uvm_for_debug[i, :] = [um, vm]
rgb = sample1(um,vm, texture, W_,H_,W,H)
regions_rgb[i] = rgb
return regions_rgb, uvm_for_debug
def sample_colors_squarepixels_pointwise(uv, texture):
'''
Based on `sample_colors_squarepixels` but without regioons.
A simple point-wise sampling.
uv:shape => (6496, 2)
'''
if texture.shape[2] == 4:
texture = texture[:,:, 0:3]
EPS = 0.00000001
(H,W) = texture.shape[0:2]
W_ = (W - EPS)
H_ = (H - EPS)
print('uv.shape', uv.shape)
nf = uv.shape[0]
uvm_for_debug = np.zeros((nf,2),dtype=float)
regions_rgb = np.zeros((nf,3),dtype=float)
for i in range(nf):
um = uv[i, 0]
vm = uv[i, 1]
uvm_for_debug[i, :] = [um, vm]
rgb = sample1(um,vm, texture, W_,H_,W,H)
regions_rgb[i] = rgb
assert np.allclose(uvm_for_debug, uv, equal_nan=True)
return regions_rgb, uvm_for_debug
'''
Choice of sampler method
Choose your hexagon sampler here
regions=None => pointwise, simply smple uv s
regions=not None => forms regions from mhiese points and samples those reggions rom the texture. (For now, it is the median point fo each region/facet)
'''
def sample_colors(uv, regions, texture):
if regions is not None:
# Acceptable speed. Samples aa single point. bware of Alising. No Monte-Carlo, integration or downsampling.
return sample_colors_squarepixels (uv, regions, texture)
else:
return sample_colors_squarepixels_pointwise(uv, texture)
# extremely slow. Unusable
#return polygon_sampler.sample_colors_polygons (uv, regions, texture)
| 25.991597
| 154
| 0.6172
| 476
| 3,093
| 3.890756
| 0.289916
| 0.010799
| 0.041577
| 0.010799
| 0.390929
| 0.333693
| 0.187905
| 0.187905
| 0.176026
| 0.141469
| 0
| 0.024464
| 0.246686
| 3,093
| 118
| 155
| 26.211864
| 0.770386
| 0.176528
| 0
| 0.553571
| 0
| 0
| 0.00418
| 0
| 0
| 0
| 0
| 0
| 0.017857
| 1
| 0.071429
| false
| 0
| 0.053571
| 0
| 0.214286
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f00bbb4cb26e6889fa5994c748463440e235c8e
| 654
|
py
|
Python
|
migrations/versions/d805931e1abd_add_topics.py
|
cyberinnovationhub/lunch-roulette
|
0b0b933188c095b6e3778ee7de9d4e21cd7caae5
|
[
"BSD-3-Clause"
] | 4
|
2020-12-03T19:24:20.000Z
|
2022-03-16T13:45:11.000Z
|
migrations/versions/d805931e1abd_add_topics.py
|
cyberinnovationhub/lunch-roulette
|
0b0b933188c095b6e3778ee7de9d4e21cd7caae5
|
[
"BSD-3-Clause"
] | 3
|
2020-08-24T08:05:11.000Z
|
2021-11-07T06:14:36.000Z
|
migrations/versions/d805931e1abd_add_topics.py
|
cyberinnovationhub/lunch-roulette
|
0b0b933188c095b6e3778ee7de9d4e21cd7caae5
|
[
"BSD-3-Clause"
] | 3
|
2020-08-27T13:58:53.000Z
|
2022-03-09T14:09:06.000Z
|
"""add topics
Revision ID: d805931e1abd
Revises: 9430b6bc8d1a
Create Date: 2018-09-18 15:11:45.922659
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd805931e1abd'
down_revision = '9430b6bc8d1a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('topics', sa.String(length=140), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'topics')
# ### end Alembic commands ###
| 22.551724
| 84
| 0.689602
| 82
| 654
| 5.439024
| 0.597561
| 0.060538
| 0.09417
| 0.103139
| 0.197309
| 0.197309
| 0.197309
| 0.197309
| 0
| 0
| 0
| 0.094444
| 0.174312
| 654
| 28
| 85
| 23.357143
| 0.731481
| 0.446483
| 0
| 0
| 0
| 0
| 0.135385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f0432871a66053bea5e2a19da56fe363bea9cb9
| 78,296
|
py
|
Python
|
allesfitter/basement.py
|
pierfra-ro/allesfitter
|
a6a885aaeb3253fec0d924ef3b45e8b7c473b181
|
[
"MIT"
] | null | null | null |
allesfitter/basement.py
|
pierfra-ro/allesfitter
|
a6a885aaeb3253fec0d924ef3b45e8b7c473b181
|
[
"MIT"
] | null | null | null |
allesfitter/basement.py
|
pierfra-ro/allesfitter
|
a6a885aaeb3253fec0d924ef3b45e8b7c473b181
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 00:17:06 2018
@author:
Dr. Maximilian N. Günther
European Space Agency (ESA)
European Space Research and Technology Centre (ESTEC)
Keplerlaan 1, 2201 AZ Noordwijk, The Netherlands
Email: maximilian.guenther@esa.int
GitHub: mnguenther
Twitter: m_n_guenther
Web: www.mnguenther.com
"""
from __future__ import print_function, division, absolute_import
#::: modules
import numpy as np
import os
import sys
import fnmatch
import collections
from datetime import datetime
from multiprocessing import cpu_count
import warnings
warnings.formatwarning = lambda msg, *args, **kwargs: f'\n! WARNING:\n {msg}\ntype: {args[0]}, file: {args[1]}, line: {args[2]}\n'
warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
warnings.filterwarnings('ignore', category=np.RankWarning)
from scipy.stats import truncnorm
#::: allesfitter modules
from .exoworlds_rdx.lightcurves.index_transits import index_transits, index_eclipses, get_first_epoch, get_tmid_observed_transits
from .priors.simulate_PDF import simulate_PDF
from .utils.mcmc_move_translator import translate_str_to_move
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})
###############################################################################
#::: 'Basement' class, which contains all the data, settings, etc.
###############################################################################
class Basement():
'''
The 'Basement' class contains all the data, settings, etc.
'''
###############################################################################
#::: init
###############################################################################
def __init__(self, datadir, quiet=False):
'''
Inputs:
-------
datadir : str
the working directory for allesfitter
must contain all the data files
output directories and files will also be created inside datadir
fast_fit : bool (optional; default is False)
if False:
use all photometric data for the plot
if True:
only use photometric data in an 8h window around the transit
requires a good initial guess of the epoch and period
Returns:
--------
All the variables needed for allesfitter
'''
print('Filling the Basement')
self.quiet = quiet
self.now = "{:%Y-%m-%d_%H-%M-%S}".format(datetime.now())
self.datadir = datadir
self.outdir = os.path.join(datadir,'results')
if not os.path.exists( self.outdir ): os.makedirs( self.outdir )
print('')
self.logprint('\nallesfitter version')
self.logprint('---------------------')
self.logprint('v1.2.8')
self.load_settings()
self.load_params()
self.load_data()
if self.settings['shift_epoch']:
try:
self.change_epoch()
except:
warnings.warn('\nCould not shift epoch (you can peacefully ignore this warning if no period was given)\n')
if self.settings['fit_ttvs']:
self.prepare_ttv_fit()
#::: external priors (e.g. stellar density)
self.external_priors = {}
self.load_stellar_priors()
#::: if baseline model == sample_GP, set up a GP object for photometric data
# self.setup_GPs()
#::: translate limb darkening codes from params.csv (int) into str for ellc
self.ldcode_to_ldstr = ["none",# : 0,
"lin",# : 1,
"quad",# : 2,
"sing",# : 3,
"claret",# : 4,
"log",# : 5,
"sqrt",# : 6,
"exp",# : 7,
"power-2",#: 8,
"mugrid"]# : -1
#::: check if the input is consistent
for inst in self.settings['inst_phot']:
key='flux'
if (self.settings['baseline_'+key+'_'+inst] in ['sample_GP_Matern32', 'sample_GP_SHO']) &\
(self.settings['error_'+key+'_'+inst] != 'sample'):
raise ValueError('If you want to use '+self.settings['baseline_'+key+'_'+inst]+', you will want to sample the jitters, too!')
###############################################################################
#::: print function that prints into console and logfile at the same time
###############################################################################
def logprint(self, *text):
if not self.quiet:
print(*text)
original = sys.stdout
with open( os.path.join(self.outdir,'logfile_'+self.now+'.log'), 'a' ) as f:
sys.stdout = f
print(*text)
sys.stdout = original
else:
pass
###############################################################################
#::: load settings
###############################################################################
def load_settings(self):
'''
For the full list of options see www.allesfitter.com
'''
def set_bool(text):
if text.lower() in ['true', '1']:
return True
else:
return False
def is_empty_or_none(key):
return (key not in self.settings) or (str(self.settings[key]).lower() == 'none') or (len(self.settings[key])==0)
def unique(array):
uniq, index = np.unique(array, return_index=True)
return uniq[index.argsort()]
rows = np.genfromtxt( os.path.join(self.datadir,'settings.csv'),dtype=None,encoding='utf-8',delimiter=',' )
#::: make backwards compatible
for i, row in enumerate(rows):
# print(row)
name = row[0]
if name[:7]=='planets':
rows[i][0] = 'companions'+name[7:]
warnings.warn('You are using outdated keywords. Automatically renaming '+name+' ---> '+rows[i][0]+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
if name[:6]=='ld_law':
rows[i][0] = 'host_ld_law'+name[6:]
warnings.warn('You are using outdated keywords. Automatically renaming '+name+' ---> '+rows[i][0]+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
# self.settings = {r[0]:r[1] for r in rows}
self.settings = collections.OrderedDict( [('user-given:','')]+[ (r[0],r[1] ) for r in rows ]+[('automatically set:','')] )
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Main settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for key in ['companions_phot', 'companions_rv', 'inst_phot', 'inst_rv', 'inst_rv2']:
if key not in self.settings:
self.settings[key] = []
elif len(self.settings[key]):
self.settings[key] = str(self.settings[key]).split(' ')
else:
self.settings[key] = []
self.settings['companions_all'] = list(np.unique(self.settings['companions_phot']+self.settings['companions_rv'])) #sorted by b, c, d...
self.settings['inst_all'] = list(unique( self.settings['inst_phot']+self.settings['inst_rv']+self.settings['inst_rv2'] )) #sorted like user input
if len(self.settings['inst_phot'])==0 and len(self.settings['companions_phot'])>0:
raise ValueError('No photometric instrument is selected, but photometric companions are given.')
if len(self.settings['inst_rv'])==0 and len(self.settings['companions_rv'])>0:
raise ValueError('No RV instrument is selected, but RV companions are given.')
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: General settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'print_progress' in self.settings:
self.settings['print_progress'] = set_bool(self.settings['print_progress'] )
else:
self.settings['print_progress'] = True
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Epoch settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'shift_epoch' in self.settings:
self.settings['shift_epoch'] = set_bool(self.settings['shift_epoch'] )
else:
self.settings['shift_epoch'] = True
for companion in self.settings['companions_all']:
if 'inst_for_'+companion+'_epoch' not in self.settings:
self.settings['inst_for_'+companion+'_epoch'] = 'all'
if self.settings['inst_for_'+companion+'_epoch'] in ['all','none']:
self.settings['inst_for_'+companion+'_epoch'] = self.settings['inst_all']
else:
if len(self.settings['inst_for_'+companion+'_epoch']):
self.settings['inst_for_'+companion+'_epoch'] = str(self.settings['inst_for_'+companion+'_epoch']).split(' ')
else:
self.settings['inst_for_'+companion+'_epoch'] = []
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Multiprocess settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
self.settings['multiprocess'] = set_bool(self.settings['multiprocess'])
if 'multiprocess_cores' not in self.settings.keys():
self.settings['multiprocess_cores'] = cpu_count()-1
elif self.settings['multiprocess_cores'] == 'all':
self.settings['multiprocess_cores'] = cpu_count()-1
else:
self.settings['multiprocess_cores'] = int(self.settings['multiprocess_cores'])
if self.settings['multiprocess_cores'] == cpu_count():
string = 'You are pushing your luck: you want to run on '+str(self.settings['multiprocess_cores'])+' cores, but your computer has only '+str(cpu_count())+'. I will let you go through with it this time...'
warnings.warn(string)
if self.settings['multiprocess_cores'] > cpu_count():
string = 'Oops, you want to run on '+str(self.settings['multiprocess_cores'])+' cores, but your computer has only '+str(cpu_count())+'. Maybe try running on '+str(cpu_count()-1)+'?'
raise ValueError(string)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Phase variations
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if ('phase_variations' in self.settings.keys()) and len(self.settings['phase_variations']):
warnings.warn('You are using outdated keywords. Automatically renaming "phase_variations" ---> "phase_curve".'+'. Please fix this before the Duolingo owl comes to get you.')
self.settings['phase_curve'] = self.settings['phase_variations']
if ('phase_curve' in self.settings.keys()) and len(self.settings['phase_curve']):
self.settings['phase_curve'] = set_bool(self.settings['phase_curve'])
if self.settings['phase_curve']==True:
# self.logprint('The user set phase_curve==True. Automatically set fast_fit=False and secondary_eclispe=True, and overwrite other settings.')
self.settings['fast_fit'] = 'False'
self.settings['secondary_eclipse'] = 'True'
else:
self.settings['phase_curve'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Fast fit
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if ('fast_fit' in self.settings.keys()) and len(self.settings['fast_fit']):
self.settings['fast_fit'] = set_bool(self.settings['fast_fit'])
else:
self.settings['fast_fit'] = False
if ('fast_fit_width' in self.settings.keys()) and len(self.settings['fast_fit_width']):
self.settings['fast_fit_width'] = np.float(self.settings['fast_fit_width'])
else:
self.settings['fast_fit_width'] = 8./24.
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Host stellar density prior
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'use_host_density_prior' in self.settings:
self.settings['use_host_density_prior'] = set_bool(self.settings['use_host_density_prior'] )
else:
self.settings['use_host_density_prior'] = True
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Host stellar density prior
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'use_tidal_eccentricity_prior' in self.settings:
self.settings['use_tidal_eccentricity_prior'] = set_bool(self.settings['use_tidal_eccentricity_prior'] )
else:
self.settings['use_tidal_eccentricity_prior'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: TTVs
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if ('fit_ttvs' in self.settings.keys()) and len(self.settings['fit_ttvs']):
self.settings['fit_ttvs'] = set_bool(self.settings['fit_ttvs'])
if (self.settings['fit_ttvs']==True) and (self.settings['fast_fit']==False):
raise ValueError('fit_ttvs==True, but fast_fit==False.'+\
'Currently, you can only fit for TTVs if fast_fit==True.'+\
'Please choose different settings.')
else:
self.settings['fit_ttvs'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Secondary eclipse
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if ('secondary_eclipse' in self.settings.keys()) and len(self.settings['secondary_eclipse']):
self.settings['secondary_eclipse'] = set_bool(self.settings['secondary_eclipse'])
else:
self.settings['secondary_eclipse'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: MCMC settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'mcmc_pre_run_loops' not in self.settings:
self.settings['mcmc_pre_run_loops'] = 0
if 'mcmc_pre_run_steps' not in self.settings:
self.settings['mcmc_pre_run_steps'] = 0
if 'mcmc_nwalkers' not in self.settings:
self.settings['mcmc_nwalkers'] = 100
if 'mcmc_total_steps' not in self.settings:
self.settings['mcmc_total_steps'] = 2000
if 'mcmc_burn_steps' not in self.settings:
self.settings['mcmc_burn_steps'] = 1000
if 'mcmc_thin_by' not in self.settings:
self.settings['mcmc_thin_by'] = 1
if 'mcmc_moves' not in self.settings:
self.settings['mcmc_moves'] = 'DEMove'
#::: make sure these are integers
for key in ['mcmc_nwalkers','mcmc_pre_run_loops','mcmc_pre_run_steps',
'mcmc_total_steps','mcmc_burn_steps','mcmc_thin_by']:
self.settings[key] = int(self.settings[key])
#::: luser proof
if self.settings['mcmc_total_steps'] <= self.settings['mcmc_burn_steps']:
raise ValueError('Your setting for mcmc_total_steps must be larger than mcmc_burn_steps (check your settings.csv).')
#::: translate the mcmc_move string into a list of emcee commands
self.settings['mcmc_moves'] = translate_str_to_move(self.settings['mcmc_moves'])
# N_evaluation_samples = int( 1. * self.settings['mcmc_nwalkers'] * (self.settings['mcmc_total_steps']-self.settings['mcmc_burn_steps']) / self.settings['mcmc_thin_by'] )
# self.logprint('\nAnticipating ' + str(N_evaluation_samples) + 'MCMC evaluation samples.\n')
# if N_evaluation_samples>200000:
# answer = input('It seems like you are asking for ' + str(N_evaluation_samples) + 'MCMC evaluation samples (calculated as mcmc_nwalkers * (mcmc_total_steps-mcmc_burn_steps) / mcmc_thin_by).'+\
# 'That is an aweful lot of samples.'+\
# 'What do you want to do?\n'+\
# '1 : continue at any sacrifice\n'+\
# '2 : abort and increase the mcmc_thin_by parameter in settings.csv (do not do this if you continued an old run!)\n')
# if answer==1:
# pass
# else:
# raise ValueError('User aborted the run.')
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Nested Sampling settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'ns_modus' not in self.settings:
self.settings['ns_modus'] = 'static'
if 'ns_nlive' not in self.settings:
self.settings['ns_nlive'] = 500
if 'ns_bound' not in self.settings:
self.settings['ns_bound'] = 'single'
if 'ns_sample' not in self.settings:
self.settings['ns_sample'] = 'rwalk'
if 'ns_tol' not in self.settings:
self.settings['ns_tol'] = 0.01
self.settings['ns_nlive'] = int(self.settings['ns_nlive'])
self.settings['ns_tol'] = float(self.settings['ns_tol'])
# if self.settings['ns_sample'] == 'auto':
# if self.ndim < 10:
# self.settings['ns_sample'] = 'unif'
# print('Using ns_sample=="unif".')
# elif 10 <= self.ndim <= 20:
# self.settings['ns_sample'] = 'rwalk'
# print('Using ns_sample=="rwalk".')
# else:
# self.settings['ns_sample'] = 'slice'
# print('Using ns_sample=="slice".')
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: host & companion grids, limb darkening laws, shapes, etc.
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for companion in self.settings['companions_all']:
for inst in self.settings['inst_all']:
if 'host_grid_'+inst not in self.settings:
self.settings['host_grid_'+inst] = 'default'
if companion+'_grid_'+inst not in self.settings:
self.settings[companion+'_grid_'+inst] = 'default'
if is_empty_or_none('host_ld_law_'+inst):
self.settings['host_ld_law_'+inst] = None
if is_empty_or_none(companion+'_ld_law_'+inst):
self.settings[companion+'_ld_law_'+inst] = None
if is_empty_or_none('host_ld_space_'+inst):
self.settings['host_ld_space_'+inst] = 'q'
if is_empty_or_none(companion+'_ld_space_'+inst):
self.settings[companion+'_ld_space_'+inst] = 'q'
if 'host_shape_'+inst not in self.settings:
self.settings['host_shape_'+inst] = 'sphere'
if companion+'_shape_'+inst not in self.settings:
self.settings[companion+'_shape_'+inst] = 'sphere'
for companion in self.settings['companions_rv']:
for inst in list(self.settings['inst_rv']) + list(self.settings['inst_rv2']):
if companion+'_flux_weighted_'+inst in self.settings:
self.settings[companion+'_flux_weighted_'+inst] = set_bool(self.settings[companion+'_flux_weighted_'+inst])
else:
self.settings[companion+'_flux_weighted_'+inst] = False
if 'exact_grav' in self.settings:
self.settings['exact_grav'] = set_bool(self.settings['exact_grav'])
else:
self.settings['exact_grav'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Phase curve styles
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if is_empty_or_none('phase_curve_style'):
self.settings['phase_curve_style'] = None
if self.settings['phase_curve_style'] not in [None, 'sine_series', 'sine_physical', 'ellc_physical', 'GP']:
raise ValueError("The setting 'phase_curve_style' must be one of [None, 'sine_series', 'sine_physical', 'ellc_physical', 'GP'], but was '"+str(self.settings['phase_curve_style'])+"'.")
if (self.settings['phase_curve'] is True) and (self.settings['phase_curve_style'] is None):
raise ValueError("You chose 'phase_curve=True' but did not select a 'phase_curve_style'; please select one of ['sine_series', 'sine_physical', 'ellc_physical', 'GP'].")
if (self.settings['phase_curve'] is False) and (self.settings['phase_curve_style'] in ['sine_series', 'sine_physical', 'ellc_physical', 'GP']):
raise ValueError("You chose 'phase_curve=False' but also selected a 'phase_curve_style'; please double check and set 'phase_curve_style=None' (or remove it).")
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Stellar variability
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for key in ['flux', 'rv', 'rv2']:
if ('stellar_var_'+key not in self.settings) or (self.settings['stellar_var_'+key] is None) or (self.settings['stellar_var_'+key].lower()=='none'):
self.settings['stellar_var_'+key] = 'none'
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Baselines
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for inst in self.settings['inst_all']:
if inst in self.settings['inst_phot']: key='flux'
elif inst in self.settings['inst_rv']: key='rv'
elif inst in self.settings['inst_rv2']: key='rv2'
if 'baseline_'+key+'_'+inst not in self.settings:
self.settings['baseline_'+key+'_'+inst] = 'none'
elif self.settings['baseline_'+key+'_'+inst] == 'sample_GP':
warnings.warn('You are using outdated keywords. Automatically renaming sample_GP ---> sample_GP_Matern32.'+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
self.settings['baseline_'+key+'_'+inst] = 'sample_GP_Matern32'
if 'baseline_'+key+'_'+inst+'_against' not in self.settings:
self.settings['baseline_'+key+'_'+inst+'_against'] = 'time'
if self.settings['baseline_'+key+'_'+inst+'_against'] not in ['time','custom_series']:
raise ValueError("The setting 'baseline_'+key+'_'+inst+'_against' must be one of ['time', custom_series'], but was '" + self.settings['baseline_'+key+'_'+inst+'_against'] + "'.")
# for inst in self.settings['inst_phot']:
# for key in ['flux']:
# if 'baseline_'+key+'_'+inst not in self.settings:
# self.settings['baseline_'+key+'_'+inst] = 'none'
# elif self.settings['baseline_'+key+'_'+inst] == 'sample_GP':
# warnings.warn('You are using outdated keywords. Automatically renaming sample_GP ---> sample_GP_Matern32.'+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
# self.settings['baseline_'+key+'_'+inst] = 'sample_GP_Matern32'
# for inst in self.settings['inst_rv']:
# for key in ['rv']:
# if 'baseline_'+key+'_'+inst not in self.settings:
# self.settings['baseline_'+key+'_'+inst] = 'none'
# elif self.settings['baseline_'+key+'_'+inst] == 'sample_GP':
# warnings.warn('You are using outdated keywords. Automatically renaming sample_GP ---> sample_GP_Matern32.'+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
# self.settings['baseline_'+key+'_'+inst] = 'sample_GP_Matern32'
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Errors
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for inst in self.settings['inst_all']:
if inst in self.settings['inst_phot']: key='flux'
elif inst in self.settings['inst_rv']: key='rv'
elif inst in self.settings['inst_rv2']: key='rv2'
if 'error_'+key+'_'+inst not in self.settings:
self.settings['error_'+key+'_'+inst] = 'sample'
# for inst in self.settings['inst_phot']:
# for key in ['flux']:
# if 'error_'+key+'_'+inst not in self.settings:
# self.settings['error_'+key+'_'+inst] = 'sample'
# for inst in self.settings['inst_rv']:
# for key in ['rv']:
# if 'error_'+key+'_'+inst not in self.settings:
# self.settings['error_'+key+'_'+inst] = 'sample'
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Color plot
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'color_plot' not in self.settings.keys():
self.settings['color_plot'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Companion colors
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for i, companion in enumerate( self.settings['companions_all'] ):
self.settings[companion+'_color'] = sns.color_palette()[i]
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Plot zoom window
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'zoom_window' not in self.settings:
self.settings['zoom_window'] = 8./24. #8h window around transit/eclipse midpoint by Default
else:
self.settings['zoom_window'] = float(self.settings['zoom_window'])
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Exposure time interpolation
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for inst in self.settings['inst_all']:
#::: if t_exp is given
if 't_exp_'+inst in self.settings.keys() and len(self.settings['t_exp_'+inst]):
t_exp = self.settings['t_exp_'+inst].split(' ')
#if float
if len(t_exp)==1:
self.settings['t_exp_'+inst] = np.float(t_exp[0])
#if array
else:
self.settings['t_exp_'+inst] = np.array([ np.float(t) for t in t_exp ])
#::: if not given / given as an empty field
else:
self.settings['t_exp_'+inst] = None
#::: if t_exp_n_int is given
if 't_exp_'+inst in self.settings \
and 't_exp_n_int_'+inst in self.settings \
and len(self.settings['t_exp_n_int_'+inst]):
self.settings['t_exp_n_int_'+inst] = int(self.settings['t_exp_n_int_'+inst])
if self.settings['t_exp_n_int_'+inst] < 1:
raise ValueError('"t_exp_n_int_'+inst+'" must be >= 1, but is given as '+str(self.settings['t_exp_n_int_'+inst])+' in params.csv')
else:
self.settings['t_exp_n_int_'+inst] = None
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Number of spots
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for inst in self.settings['inst_all']:
if 'host_N_spots_'+inst in self.settings and len(self.settings['host_N_spots_'+inst]):
self.settings['host_N_spots_'+inst] = int(self.settings['host_N_spots_'+inst])
else:
self.settings['host_N_spots_'+inst] = 0
for companion in self.settings['companions_all']:
if companion+'_N_spots'+inst in self.settings:
self.settings[companion+'_N_spots_'+inst] = int(self.settings[companion+'_N_spots_'+inst])
else:
self.settings[companion+'_N_spots_'+inst] = 0
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Number of flares
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'N_flares' in self.settings and len(self.settings['N_flares'])>0:
self.settings['N_flares'] = int(self.settings['N_flares'])
else:
self.settings['N_flares'] = 0
###############################################################################
#::: load params
###############################################################################
def load_params(self):
'''
For the full list of options see www.allesfitter.com
'''
#==========================================================================
#::: load params.csv
#==========================================================================
buf = np.genfromtxt(os.path.join(self.datadir,'params.csv'), delimiter=',',comments='#',dtype=None,encoding='utf-8',names=True)
#==========================================================================
#::: function to assure backwards compability
#==========================================================================
def backwards_compability(key_new, key_deprecated):
if key_deprecated in np.atleast_1d(buf['name']):
warnings.warn('You are using outdated keywords. Automatically renaming '+key_deprecated+' ---> '+key_new+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
ind = np.where(buf['name'] == key_deprecated)[0]
np.atleast_1d(buf['name'])[ind] = key_new
#==========================================================================
#::: luser-proof: backwards compability
# (has to happend first thing and right inside buf['name'])
#==========================================================================
for inst in self.settings['inst_all']:
backwards_compability(key_new='host_ldc_q1_'+inst, key_deprecated='ldc_q1_'+inst)
backwards_compability(key_new='host_ldc_q2_'+inst, key_deprecated='ldc_q2_'+inst)
backwards_compability(key_new='host_ldc_q3_'+inst, key_deprecated='ldc_q3_'+inst)
backwards_compability(key_new='host_ldc_q4_'+inst, key_deprecated='ldc_q4_'+inst)
backwards_compability(key_new='ln_err_flux_'+inst, key_deprecated='log_err_flux_'+inst)
backwards_compability(key_new='ln_jitter_rv_'+inst, key_deprecated='log_jitter_rv_'+inst)
backwards_compability(key_new='baseline_gp_matern32_lnsigma_flux_'+inst, key_deprecated='baseline_gp1_flux_'+inst)
backwards_compability(key_new='baseline_gp_matern32_lnrho_flux_'+inst, key_deprecated='baseline_gp2_flux_'+inst)
backwards_compability(key_new='baseline_gp_matern32_lnsigma_rv_'+inst, key_deprecated='baseline_gp1_rv_'+inst)
backwards_compability(key_new='baseline_gp_matern32_lnrho_rv_'+inst, key_deprecated='baseline_gp2_rv_'+inst)
#==========================================================================
#::: luser-proof: check for allowed keys to catch typos etc.
#==========================================================================
#TODO
#==========================================================================
#::: set up stuff
#==========================================================================
self.allkeys = np.atleast_1d(buf['name']) #len(all rows in params.csv)
self.labels = np.atleast_1d(buf['label']) #len(all rows in params.csv)
self.units = np.atleast_1d(buf['unit']) #len(all rows in params.csv)
if 'truth' in buf.dtype.names:
self.truths = np.atleast_1d(buf['truth']) #len(all rows in params.csv)
else:
self.truths = np.nan * np.ones(len(self.allkeys))
self.params = collections.OrderedDict() #len(all rows in params.csv)
self.params['user-given:'] = '' #just for pretty printing
for i,key in enumerate(self.allkeys):
#::: if it's not a "coupled parameter", then use the given value
if np.atleast_1d(buf['value'])[i] not in list(self.allkeys):
self.params[key] = np.float(np.atleast_1d(buf['value'])[i])
#::: if it's a "coupled parameter", then write the string of the key it is coupled to
else:
self.params[key] = np.atleast_1d(buf['value'])[i]
#==========================================================================
#::: function to automatically set default params if they were not given
#==========================================================================
def validate(key, default, default_min, default_max):
if (key in self.params) and (self.params[key] is not None):
if (self.params[key] < default_min) or (self.params[key] > default_max):
raise ValueError("User input for "+key+" is "+self.params+" but must lie within ["+str(default_min)+","+str(default_max)+"].")
if (key not in self.params):
self.params[key] = default
#==========================================================================
#::: luser-proof: make sure the limb darkening values are uniquely
#::: from either the u- or q-space
#==========================================================================
def check_ld(obj, inst):
if self.settings[obj+'_ld_space_'+inst] == 'q':
matches = fnmatch.filter(self.allkeys, obj+'_ldc_u*_'+inst)
if len(matches) > 0:
raise ValueError("The following user input is inconsistent:\n"+\
"Setting: '"+key+"' = 'q'\n"+\
"Parameters: {}".format(matches))
elif self.settings[obj+'_ld_space_'+inst] == 'u':
matches = fnmatch.filter(self.allkeys, obj+'_ldc_q*_'+inst)
if len(matches) > 0:
raise ValueError("The following user input is inconsistent:\n"+\
"Setting: '"+key+"' = 'u'\n"+\
"Parameters: {}".format(matches))
for inst in self.settings['inst_all']:
for obj in ['host'] + self.settings['companions_all']:
check_ld(obj, inst)
#==========================================================================
#::: validate that initial guess params have reasonable values
#==========================================================================
self.params['automatically set:'] = '' #just for pretty printing
for companion in self.settings['companions_all']:
for inst in self.settings['inst_all']:
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: ellc defaults
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: frequently used parameters
validate(companion+'_rr', None, 0., np.inf)
validate(companion+'_rsuma', None, 0., np.inf)
validate(companion+'_cosi', 0., 0., 1.)
validate(companion+'_epoch', 0., -np.inf, np.inf)
validate(companion+'_period', 0., 0., np.inf)
validate(companion+'_sbratio_'+inst, 0., 0., np.inf)
validate(companion+'_K', 0., 0., np.inf)
validate(companion+'_f_s', 0., -1, 1)
validate(companion+'_f_c', 0., -1, 1)
validate('dil_'+inst, 0., -np.inf, np.inf)
#::: limb darkenings, u-space
validate('host_ldc_u1_'+inst, None, 0, 1)
validate('host_ldc_u2_'+inst, None, 0, 1)
validate('host_ldc_u3_'+inst, None, 0, 1)
validate('host_ldc_u4_'+inst, None, 0, 1)
validate(companion+'_ldc_u1_'+inst, None, 0, 1)
validate(companion+'_ldc_u2_'+inst, None, 0, 1)
validate(companion+'_ldc_u3_'+inst, None, 0, 1)
validate(companion+'_ldc_u4_'+inst, None, 0, 1)
#::: limb darkenings, q-space
validate('host_ldc_q1_'+inst, None, 0, 1)
validate('host_ldc_q2_'+inst, None, 0, 1)
validate('host_ldc_q3_'+inst, None, 0, 1)
validate('host_ldc_q4_'+inst, None, 0, 1)
validate(companion+'_ldc_q1_'+inst, None, 0, 1)
validate(companion+'_ldc_q2_'+inst, None, 0, 1)
validate(companion+'_ldc_q3_'+inst, None, 0, 1)
validate(companion+'_ldc_q4_'+inst, None, 0, 1)
#::: catch exceptions
if self.params[companion+'_period'] is None:
self.settings['do_not_phase_fold'] = True
#::: advanced parameters
validate(companion+'_a', None, 0., np.inf)
validate(companion+'_q', 1., 0., np.inf)
validate('didt_'+inst, None, -np.inf, np.inf)
validate('domdt_'+inst, None, -np.inf, np.inf)
validate('host_gdc_'+inst, None, 0., 1.)
validate('host_rotfac_'+inst, 1., 0., np.inf)
validate('host_hf_'+inst, 1.5, -np.inf, np.inf)
validate('host_bfac_'+inst, None, -np.inf, np.inf)
validate('host_heat_'+inst, None, -np.inf, np.inf)
validate('host_lambda', None, -np.inf, np.inf)
validate('host_vsini', None, -np.inf, np.inf)
validate(companion+'_gdc_'+inst, None, 0., 1.)
validate(companion+'_rotfac_'+inst, 1., 0., np.inf)
validate(companion+'_hf_'+inst, 1.5, -np.inf, np.inf)
validate(companion+'_bfac_'+inst, None, -np.inf, np.inf)
validate(companion+'_heat_'+inst, None, -np.inf, np.inf)
validate(companion+'_lambda', None, -np.inf, np.inf)
validate(companion+'_vsini', None, -np.inf, np.inf)
#::: special parameters (list type)
if 'host_spots_'+inst not in self.params:
self.params['host_spots_'+inst] = None
if companion+'_spots_'+inst not in self.params:
self.params[companion+'_spots_'+inst] = None
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: errors and jitters
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#TODO: add validations for all errors / jitters
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: baselines (and backwards compability)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#TODO: add validations for all baseline params
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: phase curve style: sine_series
# all in ppt
# A1 (beaming)
# B1 (atmospheric), can be split in thermal and reflected
# B2 (ellipsoidal)
# B3 (ellipsoidal 2nd order)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# if (self.settings['phase_curve_style'] == 'sine_series') and (inst in self.settings['inst_phot']):
if (inst in self.settings['inst_phot']):
validate(companion+'_phase_curve_A1_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_B1_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B1_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_B1t_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B1t_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_B1r_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B1r_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_B2_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B3_'+inst, None, -np.inf, 0.)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: phase curve style: sine_physical
# A1 (beaming)
# B1 (atmospheric), can be split in thermal and reflected
# B2 (ellipsoidal)
# B3 (ellipsoidal 2nd order)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# if (self.settings['phase_curve_style'] == 'sine_physical') and (inst in self.settings['inst_phot']):
if (inst in self.settings['inst_phot']):
validate(companion+'_phase_curve_beaming_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_atmospheric_thermal_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_thermal_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_atmospheric_reflected_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_reflected_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_ellipsoidal_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_ellipsoidal_2nd_'+inst, None, 0., np.inf)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: to avoid a bug/feature in ellc, if either property is >0, set the other to 1-15 (not 0):
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if self.params[companion+'_heat_'+inst] is not None:
if (self.params[companion+'_sbratio_'+inst] == 0) and (self.params[companion+'_heat_'+inst] > 0):
self.params[companion+'_sbratio_'+inst] = 1e-15 #this is to avoid a bug/feature in ellc
if (self.params[companion+'_sbratio_'+inst] > 0) and (self.params[companion+'_heat_'+inst] == 0):
self.params[companion+'_heat_'+inst] = 1e-15 #this is to avoid a bug/feature in ellc
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: luser proof: avoid conflicting/degenerate phase curve commands
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if (inst in self.settings['inst_phot']) and (self.settings['phase_curve'] == True):
phase_curve_model_1 = (self.params[companion+'_phase_curve_B1_'+inst] is not None)
phase_curve_model_2 = ((self.params[companion+'_phase_curve_B1t_'+inst] is not None) or (self.params[companion+'_phase_curve_B1r_'+inst] is not None))
phase_curve_model_3 = (self.params[companion+'_phase_curve_atmospheric_'+inst] is not None)
phase_curve_model_4 = ((self.params[companion+'_phase_curve_atmospheric_thermal_'+inst] is not None) or (self.params[companion+'_phase_curve_atmospheric_reflected_'+inst] is not None))
phase_curve_model_5 = ((self.params['host_bfac_'+inst] is not None) or (self.params['host_heat_'+inst] is not None) or \
(self.params['host_gdc_'+inst] is not None) or (self.settings['host_shape_'+inst]!='sphere') or \
(self.params[companion+'_bfac_'+inst] is not None) or (self.params[companion+'_heat_'+inst] is not None) or \
(self.params[companion+'_gdc_'+inst] is not None) or (self.settings[companion+'_shape_'+inst]!='sphere'))
if (phase_curve_model_1 + phase_curve_model_2 + phase_curve_model_3 + phase_curve_model_4 + phase_curve_model_5) > 1:
raise ValueError('You can use either\n'\
+'1) the sine_series phase curve model with "*_phase_curve_B1_*",\n'\
+'2) the sine_series phase curve model with "*_phase_curve_B1t_*" and "*_phase_curve_B1r_*", or\n'\
+'3) the sine_physical phase curve model with "*_phase_curve_atmospheric_*",\n'\
+'4) the sine_physical phase curve model with "*_phase_curve_atmospheric_thermal_*" and "*_phase_curve_atmospheric_reflected_*", or\n'\
+'5) the ellc_physical phase curve model with "*_bfac_*", "*_heat_*", "*_gdc_*" etc.\n'\
+'but you shall not pass with a mix&match.')
#==========================================================================
#::: coupled params
#==========================================================================
if 'coupled_with' in buf.dtype.names:
self.coupled_with = buf['coupled_with']
else:
self.coupled_with = [None]*len(self.allkeys)
for i, key in enumerate(self.allkeys):
if isinstance(self.coupled_with[i], str) and (len(self.coupled_with[i])>0):
self.params[key] = self.params[self.coupled_with[i]] #luser proof: automatically set the values of the params coupled to another param
buf['fit'][i] = 0 #luser proof: automatically set fit=0 for the params coupled to another param
#==========================================================================
#::: mark to be fitted params
#==========================================================================
self.ind_fit = (buf['fit']==1) #len(all rows in params.csv)
self.fitkeys = buf['name'][ self.ind_fit ] #len(ndim)
self.fitlabels = self.labels[ self.ind_fit ] #len(ndim)
self.fitunits = self.units[ self.ind_fit ] #len(ndim)
self.fittruths = self.truths[ self.ind_fit ] #len(ndim)
self.theta_0 = buf['value'][ self.ind_fit ] #len(ndim)
if 'init_err' in buf.dtype.names:
self.init_err = buf['init_err'][ self.ind_fit ] #len(ndim)
else:
self.init_err = 1e-8
self.bounds = [ str(item).split(' ') for item in buf['bounds'][ self.ind_fit ] ] #len(ndim)
for i, item in enumerate(self.bounds):
if item[0] in ['uniform', 'normal']:
self.bounds[i] = [ item[0], np.float(item[1]), np.float(item[2]) ]
elif item[0] in ['trunc_normal']:
self.bounds[i] = [ item[0], np.float(item[1]), np.float(item[2]), np.float(item[3]), np.float(item[4]) ]
else:
raise ValueError('Bounds have to be "uniform", "normal" or "trunc_normal". Input from "params.csv" was "'+self.bounds[i][0]+'".')
self.ndim = len(self.theta_0) #len(ndim)
#==========================================================================
#::: luser proof: check if all initial guesses lie within their bounds
#==========================================================================
#TODO: make this part of the validate() function
for th, b, key in zip(self.theta_0, self.bounds, self.fitkeys):
#:::: test bounds
if (b[0] == 'uniform') and not (b[1] <= th <= b[2]):
raise ValueError('The initial guess for '+key+' lies outside of its bounds.')
elif (b[0] == 'normal') and ( np.abs(th - b[1]) > 3*b[2] ):
answer = input('The initial guess for '+key+' lies more than 3 sigma from its prior\n'+\
'What do you want to do?\n'+\
'1 : continue at any sacrifice \n'+\
'2 : stop and let me fix the params.csv file \n')
if answer==1:
pass
else:
raise ValueError('User aborted the run.')
elif (b[0] == 'trunc_normal') and not (b[1] <= th <= b[2]):
raise ValueError('The initial guess for '+key+' lies outside of its bounds.')
elif (b[0] == 'trunc_normal') and ( np.abs(th - b[3]) > 3*b[4] ):
answer = input('The initial guess for '+key+' lies more than 3 sigma from its prior\n'+\
'What do you want to do?\n'+\
'1 : continue at any sacrifice \n'+\
'2 : stop and let me fix the params.csv file \n')
if answer==1:
pass
else:
raise ValueError('User aborted the run.')
###############################################################################
#::: load data
###############################################################################
def load_data(self):
'''
Example:
-------
A lightcurve is stored as
data['TESS']['time'], data['TESS']['flux']
A RV curve is stored as
data['HARPS']['time'], data['HARPS']['flux']
'''
self.fulldata = {}
self.data = {}
#======================================================================
#::: photometry
#======================================================================
for inst in self.settings['inst_phot']:
try:
time, flux, flux_err, custom_series = np.genfromtxt(os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:4]
except:
time, flux, flux_err = np.genfromtxt(os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:3]
custom_series = np.zeros_like(time)
if any(np.isnan(time*flux*flux_err*custom_series)):
raise ValueError('There are NaN values in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if any(flux_err==0):
raise ValueError('There are uncertainties with values of 0 in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if any(flux_err<0):
raise ValueError('There are uncertainties with negative values in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if not all(np.diff(time)>=0):
raise ValueError('The time array in "'+inst+'.csv" is not sorted. Please make sure the file is not corrupted, then sort it by time and restart.')
elif not all(np.diff(time)>0):
warnings.warn('There are repeated time stamps in the time array in "'+inst+'.csv". Please make sure the file is not corrupted (e.g. insuffiecient precision in your time stamps).')
# overwrite = str(input('There are repeated time stamps in the time array in "'+inst+'.csv". Please make sure the file is not corrupted (e.g. insuffiecient precision in your time stamps).'+\
# 'What do you want to do?\n'+\
# '1 : continue and hope for the best; no risk, no fun; #yolo\n'+\
# '2 : abort\n'))
# if (overwrite == '1'):
# pass
# else:
# raise ValueError('User aborted operation.')
self.fulldata[inst] = {
'time':time,
'flux':flux,
'err_scales_flux':flux_err/np.nanmean(flux_err),
'custom_series':custom_series
}
if (self.settings['fast_fit']) and (len(self.settings['inst_phot'])>0):
time, flux, flux_err, custom_series = self.reduce_phot_data(time, flux, flux_err, custom_series=custom_series, inst=inst)
self.data[inst] = {
'time':time,
'flux':flux,
'err_scales_flux':flux_err/np.nanmean(flux_err),
'custom_series':custom_series
}
#======================================================================
#::: RV
#======================================================================
for inst in self.settings['inst_rv']:
try:
time, rv, rv_err, custom_series = np.genfromtxt( os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:4]
except:
time, rv, rv_err = np.genfromtxt( os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:3]
custom_series = np.zeros_like(time)
if any(np.isnan(time*rv*rv_err*custom_series)):
raise ValueError('There are NaN values in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
#aCkTuaLLLyy rv_err=0 is ok, since we add a jitter term here anyway (instead of scaling)
# if any(rv_err==0):
# raise ValueError('There are uncertainties with values of 0 in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if any(rv_err<0):
raise ValueError('There are uncertainties with negative values in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if not all(np.diff(time)>0):
raise ValueError('Your time array in "'+inst+'.csv" is not sorted. You will want to check that...')
self.data[inst] = {
'time':time,
'rv':rv,
'white_noise_rv':rv_err,
'custom_series':custom_series
}
#======================================================================
#::: RV2 (for detached binaries)
#======================================================================
for inst in self.settings['inst_rv2']:
try:
time, rv, rv_err, custom_series = np.genfromtxt( os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:4]
except:
time, rv, rv_err = np.genfromtxt( os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:3]
custom_series = np.zeros_like(time)
if not all(np.diff(time)>0):
raise ValueError('Your time array in "'+inst+'.csv" is not sorted. You will want to check that...')
self.data[inst] = {
'time':time,
'rv2':rv,
'white_noise_rv2':rv_err,
'custom_series':custom_series
}
#======================================================================
#::: also save the combined time series
#::: for cases where all instruments are treated together
#::: e.g. for stellar variability GPs
#======================================================================
self.data['inst_phot'] = {'time':[],'flux':[],'flux_err':[],'inst':[]}
for inst in self.settings['inst_phot']:
self.data['inst_phot']['time'] += list(self.data[inst]['time'])
self.data['inst_phot']['flux'] += list(self.data[inst]['flux'])
self.data['inst_phot']['flux_err'] += [inst]*len(self.data[inst]['time']) #errors will be sampled/derived later
self.data['inst_phot']['inst'] += [inst]*len(self.data[inst]['time'])
ind_sort = np.argsort(self.data['inst_phot']['time'])
self.data['inst_phot']['ind_sort'] = ind_sort
self.data['inst_phot']['time'] = np.array(self.data['inst_phot']['time'])[ind_sort]
self.data['inst_phot']['flux'] = np.array(self.data['inst_phot']['flux'])[ind_sort]
self.data['inst_phot']['flux_err'] = np.array(self.data['inst_phot']['flux_err'])[ind_sort]
self.data['inst_phot']['inst'] = np.array(self.data['inst_phot']['inst'])[ind_sort]
self.data['inst_rv'] = {'time':[],'rv':[],'rv_err':[],'inst':[]}
for inst in self.settings['inst_rv']:
self.data['inst_rv']['time'] += list(self.data[inst]['time'])
self.data['inst_rv']['rv'] += list(self.data[inst]['rv'])
self.data['inst_rv']['rv_err'] += list(np.nan*self.data[inst]['rv']) #errors will be sampled/derived later
self.data['inst_rv']['inst'] += [inst]*len(self.data[inst]['time'])
ind_sort = np.argsort(self.data['inst_rv']['time'])
self.data['inst_rv']['ind_sort'] = ind_sort
self.data['inst_rv']['time'] = np.array(self.data['inst_rv']['time'])[ind_sort]
self.data['inst_rv']['rv'] = np.array(self.data['inst_rv']['rv'])[ind_sort]
self.data['inst_rv']['rv_err'] = np.array(self.data['inst_rv']['rv_err'])[ind_sort]
self.data['inst_rv']['inst'] = np.array(self.data['inst_rv']['inst'])[ind_sort]
self.data['inst_rv2'] = {'time':[],'rv2':[],'rv2_err':[],'inst':[]}
for inst in self.settings['inst_rv2']:
self.data['inst_rv2']['time'] += list(self.data[inst]['time'])
self.data['inst_rv2']['rv2'] += list(self.data[inst]['rv2'])
self.data['inst_rv2']['rv2_err'] += list(np.nan*self.data[inst]['rv2']) #errors will be sampled/derived later
self.data['inst_rv2']['inst'] += [inst]*len(self.data[inst]['time'])
ind_sort = np.argsort(self.data['inst_rv2']['time'])
self.data['inst_rv2']['ind_sort'] = ind_sort
self.data['inst_rv2']['time'] = np.array(self.data['inst_rv2']['time'])[ind_sort]
self.data['inst_rv2']['rv2'] = np.array(self.data['inst_rv2']['rv2'])[ind_sort]
self.data['inst_rv2']['rv2_err'] = np.array(self.data['inst_rv2']['rv2_err'])[ind_sort]
self.data['inst_rv2']['inst'] = np.array(self.data['inst_rv2']['inst'])[ind_sort]
###############################################################################
#::: change epoch
###############################################################################
def my_truncnorm_isf(q,a,b,mean,std):
a_scipy = 1.*(a - mean) / std
b_scipy = 1.*(b - mean) / std
return truncnorm.isf(q,a_scipy,b_scipy,loc=mean,scale=std)
def change_epoch(self):
'''
change epoch entry from params.csv to set epoch into the middle of the range
'''
self.logprint('\nShifting epochs into the data center')
self.logprint('------------------------------------')
#::: for all companions
for companion in self.settings['companions_all']:
self.logprint('Companion',companion)
self.logprint('\tinput epoch:',self.params[companion+'_epoch'])
#::: get data time range
alldata = []
for inst in self.settings['inst_for_'+companion+'_epoch']:
alldata += list(self.data[inst]['time'])
start = np.nanmin( alldata )
end = np.nanmax( alldata )
#::: get the given values
user_epoch = 1.*self.params[companion+'_epoch']
period = 1.*self.params[companion+'_period']
# buf = self.bounds[ind_e].copy()
#::: calculate the true first_epoch
if 'fast_fit_width' in self.settings and self.settings['fast_fit_width'] is not None:
width = self.settings['fast_fit_width']
else:
width = 0
first_epoch = get_first_epoch(alldata, self.params[companion+'_epoch'], self.params[companion+'_period'], width=width)
#::: calculate the mid_epoch (in the middle of the data set)
N = int(np.round((end-start)/2./period))
self.settings['mid_epoch'] = first_epoch + N * period
#::: calculate how much the user_epoch has to be shifted to get the mid_epoch
N_shift = int(np.round((self.settings['mid_epoch']-user_epoch)/period))
#::: set the new initial guess (and truth)
self.params[companion+'_epoch'] = 1.*self.settings['mid_epoch']
#::: also shift the truth (implies that the turth epoch is set where the initial guess is)
try:
ind_e = np.where(self.fitkeys==companion+'_epoch')[0][0]
ind_p = np.where(self.fitkeys==companion+'_period')[0][0]
N_truth_shift = int(np.round((self.settings['mid_epoch']-self.fittruths[ind_e])/self.fittruths[ind_p]))
self.fittruths[ind_e] += N_truth_shift * self.fittruths[ind_p]
except:
pass
#::: if a fit param, also update the bounds accordingly
if (N_shift != 0) and (companion+'_epoch' in self.fitkeys):
ind_e = np.where(self.fitkeys==companion+'_epoch')[0][0]
ind_p = np.where(self.fitkeys==companion+'_period')[0][0]
# print('\n')
# print('############################################################################')
# print('user_epoch', user_epoch, self.bounds[ind_e])
# print('user_period', period, self.bounds[ind_p])
# print('----------------------------------------------------------------------------')
#::: set the new initial guess
self.theta_0[ind_e] = 1.*self.settings['mid_epoch']
#::: get the bounds / errors
#::: if the epoch and period priors are both uniform
if (self.bounds[ind_e][0] == 'uniform') & (self.bounds[ind_p][0] == 'uniform'):
if N_shift > 0:
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * self.bounds[ind_p][1] #lower bound
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * self.bounds[ind_p][2] #upper bound
elif N_shift < 0:
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * self.bounds[ind_p][2] #lower bound; period bounds switched if N_shift is negative
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * self.bounds[ind_p][1] #upper bound; period bounds switched if N_shift is negative
#::: if the epoch and period priors are both normal
elif (self.bounds[ind_e][0] == 'normal') & (self.bounds[ind_p][0] == 'normal'):
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * self.bounds[ind_p][1] #mean (in case the prior-mean is not the initial-guess-mean)
self.bounds[ind_e][2] = np.sqrt( self.bounds[ind_e][2]**2 + N_shift**2 * self.bounds[ind_p][2]**2 ) #std (in case the prior-mean is not the initial-guess-mean)
#::: if the epoch and period priors are both trunc_normal
elif (self.bounds[ind_e][0] == 'trunc_normal') & (self.bounds[ind_p][0] == 'trunc_normal'):
if N_shift > 0:
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * self.bounds[ind_p][1] #lower bound
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * self.bounds[ind_p][2] #upper bound
elif N_shift < 0:
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * self.bounds[ind_p][2] #lower bound; period bounds switched if N_shift is negative
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * self.bounds[ind_p][1] #upper bound; period bounds switched if N_shift is negative
self.bounds[ind_e][3] = self.bounds[ind_e][3] + N_shift * self.bounds[ind_p][3] #mean (in case the prior-mean is not the initial-guess-mean)
self.bounds[ind_e][4] = np.sqrt( self.bounds[ind_e][4]**2 + N_shift**2 * self.bounds[ind_p][4]**2 ) #std (in case the prior-mean is not the initial-guess-mean)
#::: if the epoch prior is uniform and period prior is normal
elif (self.bounds[ind_e][0] == 'uniform') & (self.bounds[ind_p][0] == 'normal'):
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * (period + self.bounds[ind_p][2]) #lower bound epoch + Nshift * period + Nshift * std_period
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * (period + self.bounds[ind_p][2]) #upper bound + Nshift * period + Nshift * std_period
#::: if the epoch prior is uniform and period prior is trunc_normal
elif (self.bounds[ind_e][0] == 'uniform') & (self.bounds[ind_p][0] == 'trunc_normal'):
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * (period + self.bounds[ind_p][4]) #lower bound epoch + Nshift * period + Nshift * std_period
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * (period + self.bounds[ind_p][4]) #upper bound + Nshift * period + Nshift * std_period
elif (self.bounds[ind_e][0] == 'normal') & (self.bounds[ind_p][0] == 'uniform'):
raise ValueError('shift_epoch with different priors for epoch and period is not yet implemented.')
elif (self.bounds[ind_e][0] == 'normal') & (self.bounds[ind_p][0] == 'trunc_normal'):
raise ValueError('shift_epoch with different priors for epoch and period is not yet implemented.')
elif (self.bounds[ind_e][0] == 'trunc_normal') & (self.bounds[ind_p][0] == 'uniform'):
raise ValueError('shift_epoch with different priors for epoch and period is not yet implemented.')
elif (self.bounds[ind_e][0] == 'trunc_normal') & (self.bounds[ind_p][0] == 'normal'):
raise ValueError('shift_epoch with different priors for epoch and period is not yet implemented.')
else:
raise ValueError('Parameters "bounds" have to be "uniform", "normal" or "trunc_normal".')
self.logprint('\tshifted epoch:',self.params[companion+'_epoch'])
self.logprint('\tshifted by',N_shift,'periods')
###############################################################################
#::: reduce_phot_data
###############################################################################
def reduce_phot_data(self, time, flux, flux_err, custom_series=None, inst=None):
ind_in = []
for companion in self.settings['companions_phot']:
epoch = self.params[companion+'_epoch']
period = self.params[companion+'_period']
width = self.settings['fast_fit_width']
if self.settings['secondary_eclipse']:
ind_ecl1x, ind_ecl2x, ind_outx = index_eclipses(time,epoch,period,width,width) #TODO: currently this assumes width_occ == width_tra
ind_in += list(ind_ecl1x)
ind_in += list(ind_ecl2x)
self.fulldata[inst][companion+'_ind_ecl1'] = ind_ecl1x
self.fulldata[inst][companion+'_ind_ecl2'] = ind_ecl2x
self.fulldata[inst][companion+'_ind_out'] = ind_outx
else:
ind_inx, ind_outx = index_transits(time,epoch,period,width)
ind_in += list(ind_inx)
self.fulldata[inst][companion+'_ind_in'] = ind_inx
self.fulldata[inst][companion+'_ind_out'] = ind_outx
ind_in = np.sort(np.unique(ind_in))
self.fulldata[inst]['all_ind_in'] = ind_in
self.fulldata[inst]['all_ind_out'] = np.delete( np.arange(len(self.fulldata[inst]['time'])), ind_in )
if len(ind_in)==0:
raise ValueError(inst+'.csv does not contain any in-transit data. Check that your epoch and period guess are correct.')
time = time[ind_in]
flux = flux[ind_in]
flux_err = flux_err[ind_in]
if custom_series is None:
return time, flux, flux_err
else:
custom_series = custom_series[ind_in]
return time, flux, flux_err, custom_series
###############################################################################
#::: prepare TTV fit (if chosen)
###############################################################################
def prepare_ttv_fit(self):
'''
this must be run *after* reduce_phot_data()
'''
for companion in self.settings['companions_phot']:
all_times = []
all_flux = []
for inst in self.settings['inst_phot']:
all_times += list(self.data[inst]['time'])
all_flux += list(self.data[inst]['flux'])
self.data[companion+'_tmid_observed_transits'] = get_tmid_observed_transits(all_times,self.params[companion+'_epoch'],self.params[companion+'_period'],self.settings['fast_fit_width'])
#::: plots
# if self.settings['fit_ttvs']:
# flux_min = np.nanmin(all_flux)
# flux_max = np.nanmax(all_flux)
# N_days = int( np.max(all_times) - np.min(all_times) )
# figsizex = np.min( [1, int(N_days/20.)] )*5
# fig, ax = plt.subplots(figsize=(figsizex, 4)) #figsize * 5 for every 20 days
# for inst in self.settings['inst_phot']:
# ax.plot(self.data[inst]['time'], self.data[inst]['flux'],ls='none',marker='.',label=inst)
# ax.plot( self.data[companion+'_tmid_observed_transits'], np.ones_like(self.data[companion+'_tmid_observed_transits'])*0.995*flux_min, 'k^' )
# for i, tmid in enumerate(self.data[companion+'_tmid_observed_transits']):
# ax.text( tmid, 0.9925*flux_min, str(i+1), ha='center' )
# ax.set(ylim=[0.99*flux_min, flux_max], xlabel='Time (BJD)', ylabel='Realtive Flux')
# if not os.path.exists( os.path.join(self.datadir,'results') ):
# os.makedirs(os.path.join(self.datadir,'results'))
# ax.legend()
# fname = os.path.join(self.datadir,'results','preparation_for_TTV_fit_'+companion+'.pdf')
# if os.path.exists(fname):
# overwrite = str(input('Figure "preparation_for_TTV_fit_'+companion+'.pdf" already exists.\n'+\
# 'What do you want to do?\n'+\
# '1 : overwrite it\n'+\
# '2 : skip it and move on\n'))
# if (overwrite == '1'):
# fig.savefig(fname, bbox_inches='tight' )
# else:
# pass
# plt.close(fig)
width = self.settings['fast_fit_width']
for inst in self.settings['inst_phot']:
time = self.data[inst]['time']
for i, t in enumerate(self.data[companion+'_tmid_observed_transits']):
ind = np.where((time >= (t - width/2.)) & (time <= (t + width/2.)))[0]
self.data[inst][companion+'_ind_time_transit_'+str(i+1)] = ind
self.data[inst][companion+'_time_transit_'+str(i+1)] = time[ind]
###############################################################################
#::: stellar priors
###############################################################################
def load_stellar_priors(self, N_samples=10000):
if os.path.exists(os.path.join(self.datadir,'params_star.csv')) and (self.settings['use_host_density_prior'] is True):
buf = np.genfromtxt( os.path.join(self.datadir,'params_star.csv'), delimiter=',', names=True, dtype=None, encoding='utf-8', comments='#' )
radius = simulate_PDF(buf['R_star'], buf['R_star_lerr'], buf['R_star_uerr'], size=N_samples, plot=False) * 6.957e10 #in cgs
mass = simulate_PDF(buf['M_star'], buf['M_star_lerr'], buf['M_star_uerr'], size=N_samples, plot=False) * 1.9884754153381438e+33 #in cgs
volume = (4./3.)*np.pi*radius**3 #in cgs
density = mass / volume #in cgs
self.params_star = {'R_star_median':buf['R_star'],
'R_star_lerr':buf['R_star_lerr'],
'R_star_uerr':buf['R_star_uerr'],
'M_star_median':buf['M_star'],
'M_star_lerr':buf['M_star_lerr'],
'M_star_uerr':buf['M_star_uerr']
}
self.external_priors['host_density'] = ['normal', np.median(density), np.max( [np.median(density)-np.percentile(density,16), np.percentile(density,84)-np.median(density)] ) ] #in cgs
| 58.04003
| 224
| 0.481008
| 8,322
| 78,296
| 4.333694
| 0.094448
| 0.099154
| 0.035325
| 0.016692
| 0.626314
| 0.562041
| 0.460585
| 0.369721
| 0.310162
| 0.259864
| 0
| 0.010743
| 0.293821
| 78,296
| 1,349
| 225
| 58.04003
| 0.641533
| 0.247829
| 0
| 0.226902
| 0
| 0.019022
| 0.212424
| 0.0203
| 0
| 0
| 0
| 0.001483
| 0
| 1
| 0.021739
| false
| 0.006793
| 0.019022
| 0.001359
| 0.05163
| 0.024457
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f08e87bb685c5de27a28a6c0f75d6ba70a73d31
| 3,334
|
py
|
Python
|
schematron/ssk.py
|
SarahTV/SSK
|
ac7f5b7b1f1c02aefcb706abd80178f86c216cf7
|
[
"CC-BY-4.0"
] | null | null | null |
schematron/ssk.py
|
SarahTV/SSK
|
ac7f5b7b1f1c02aefcb706abd80178f86c216cf7
|
[
"CC-BY-4.0"
] | null | null | null |
schematron/ssk.py
|
SarahTV/SSK
|
ac7f5b7b1f1c02aefcb706abd80178f86c216cf7
|
[
"CC-BY-4.0"
] | null | null | null |
#coding: utf-8
import re
import os
from lxml import etree as ET
from bs4 import BeautifulSoup
import csv
class schSSK:
def create_directory(self, directory):
"""Create a new directory.
:param directory: path to new directory
:type directory: string
"""
if not os.path.exists(directory):
os.makedirs(directory)
# Manage input files to handle
def get_files(self, d):
filesList = [] # liste fichiers
for fileName in os.listdir(d):
if fileName.endswith(".xml"):
filesList.append(d + "/" + fileName)
return filesList
def loadBS(self, xmlfile):
with open(xmlfile) as file:
testedFile = BeautifulSoup(file, 'xml')
return testedFile
def loadTree(self, xmlfile):
parser = ET.XMLParser(ns_clean=True)
tree = ET.parse(xmlfile, parser)
return tree
def parseSVRL(self, svrl, tree):
diagnostic = []
fired = svrl.find_all('failed-assert')
successfulReports = svrl.find_all('successful-report')
fired.extend(successfulReports)
for fire in fired:
location = self.getLocations(fire.attrs['location'], tree)
if location[1] is not None:
lineNumber = location[1].sourceline
tagName = location[1].tag
tagText = location[1].text
else:
lineNumber = ""
tagName = ""
tagText = ""
role = fire.attrs['role']
message = " ".join(fire.text.split())
rule = {
# "context": fire.findPrevious('fired-rule')['context'],
#"test": fire['test'],
"location": location[0],
"line": lineNumber,
"role" : role,
#"tag" : tagName,
# "attributes" : location[1].attrib,
#"nodeText": tagText,
"message": message
}
diagnostic.append(rule)
return diagnostic
def getLocations(self, assertLocation, tree):
# patters to process the xpathes
pattern1 = re.compile('/\*:')
pattern2 = re.compile('\[namespace-uri\(\)=\'http://www\.tei\-c\.org/ns/1\.0\'\]')
pattern3 = re.compile('/')
location1 = re.sub(pattern1, '/', assertLocation)
location2 = re.sub(pattern2, '', location1)
# Different processings if the context node is root or not
if len(location2) > 7:
locationNorm = re.sub(pattern3, '/{http://www.tei-c.org/ns/1.0}', location2[7:])[1:]
else:
locationNorm = re.sub(pattern3, '/{http://www.tei-c.org/ns/1.0}', location2)[1:]
location = tree.getroot().find(locationNorm)
return location2, location
def writeCSV(self, diagnostic, report, reportFolder):
keys = diagnostic[0].keys()
reportFile = re.search('\/(.+?)\.xml', report).group(1) + "_report.csv"
csvFile = reportFolder + "/" + os.path.basename(os.path.normpath(reportFile))
with open(csvFile, 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(diagnostic)
| 35.468085
| 96
| 0.54889
| 344
| 3,334
| 5.287791
| 0.421512
| 0.024739
| 0.016493
| 0.018142
| 0.06707
| 0.06707
| 0.06707
| 0.06707
| 0.057174
| 0.057174
| 0
| 0.015071
| 0.323335
| 3,334
| 94
| 97
| 35.468085
| 0.791223
| 0.113977
| 0
| 0.029412
| 0
| 0
| 0.064494
| 0.007204
| 0
| 0
| 0
| 0
| 0.044118
| 1
| 0.102941
| false
| 0
| 0.073529
| 0
| 0.264706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f098e212077f84f0f80919da194e6c3605bd4fb
| 14,798
|
py
|
Python
|
src/01_eigenprogression_transform.py
|
lostanlen/nemisig2018
|
2868da84c938ff6db98936d81a830b838eef1131
|
[
"MIT"
] | 1
|
2018-09-27T09:07:05.000Z
|
2018-09-27T09:07:05.000Z
|
src/01_eigenprogression_transform.py
|
lostanlen/nemisig2018
|
2868da84c938ff6db98936d81a830b838eef1131
|
[
"MIT"
] | null | null | null |
src/01_eigenprogression_transform.py
|
lostanlen/nemisig2018
|
2868da84c938ff6db98936d81a830b838eef1131
|
[
"MIT"
] | null | null | null |
import localmodule
import datetime
import h5py
import math
import music21 as m21
import numpy as np
import os
import scipy
import scipy.linalg
import sys
import time
# Parse arguments
args = sys.argv[1:]
composer_str = args[0]
track_str = args[1]
# Define constants.
J_tm = 8
N = 2**10
n_octaves = 8
midi_octave_offset = 2
quantization = 2.0
xi = 0.25
sigma = 0.1
# Print header.
start_time = int(time.time())
print(str(datetime.datetime.now()) + " Start.")
print("Eigenprogression transform.")
print("Composer: " + composer_str + ".")
print("Piece: " + track_str + ".")
print("")
print("h5py version: {:s}".format(h5py.__version__))
print("music21 version: {:s}".format(m21.__version__))
print("numpy version: {:s}".format(np.__version__))
print("scipy version: {:s}".format(scipy.__version__))
print("")
############################# (1) PARSING ##################################
# Start clock.
parsing_start_time = int(time.time())
# Parse Kern score with music21.
data_dir = localmodule.get_data_dir()
dataset_name = localmodule.get_dataset_name()
kern_name = "_".join([dataset_name, "kern"])
kern_dir = os.path.join(data_dir, kern_name)
composer_dir = os.path.join(kern_dir, composer_str)
track_name = track_str + ".krn"
track_path = os.path.join(composer_dir, track_name)
score = m21.converter.parse(track_path)
pianoroll_parts = []
n_parts = len(score.parts)
n_semitones = 12 * n_octaves
# Loop over parts to extract piano rolls.
for part_id in range(n_parts):
part = score.parts[part_id]
pianoroll_part = np.zeros((n_semitones, N), dtype=np.float32)
# Get the measure offsets
measure_offset = {}
for el in part.recurse(classFilter=('Measure')):
measure_offset[el.measureNumber] = el.offset
# Loop over notes
for note in part.recurse(classFilter=('Note')):
note_start = int(math.ceil(
(measure_offset[note.measureNumber] +\
note.offset) *\
quantization))
note_end = int(math.ceil((
measure_offset[note.measureNumber] +\
note.offset +\
note.duration.quarterLength) *\
quantization))
pianoroll_part[
note.midi - midi_octave_offset * 12,
note_start:note_end] = 1
pianoroll_parts.append(pianoroll_part)
# Stack parts into piano roll.
mtrack_pianoroll = np.stack(pianoroll_parts, 2)
pianoroll = mtrack_pianoroll.max(axis=2)
# Print elapsed time.
elapsed_time = time.time() - int(parsing_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Parsing took " + elapsed_str + " seconds.")
####################### (2) WAVELET TRANSFORM ##############################
# Start clock.
wavelet_start_time = int(time.time())
# Setup wavelet filter bank over time.
wavelet_filterbank_ft = np.zeros((1, N, J_tm), dtype=np.float32)
for j in range(J_tm-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi_ft = localmodule.morlet(center, den, N, n_periods=4)
wavelet_filterbank_ft[0, :, -1 - j] = psi_ft
# Append scaling function phi (average).
wavelet_filterbank_ft[0, 0, 0] = 1
# Convolve pianoroll with filterbank.
pianoroll_ft = scipy.fftpack.fft(pianoroll, axis=1)
pianoroll_ft = np.expand_dims(pianoroll_ft, axis=2)
wavelet_transform_ft = pianoroll_ft * wavelet_filterbank_ft
wavelet_transform = scipy.fftpack.ifft(wavelet_transform_ft, axis=1)
# Print elapsed time.
elapsed_time = time.time() - int(parsing_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Wavelet transform took " + elapsed_str + " seconds.")
####################### (3) EIGENTRIAD TRANSFORM ###########################
# Start clock.
eigentriad_start_time = int(time.time())
# Reshape MIDI axis to chromagram
chromagram = np.reshape(wavelet_transform,
(12, -1, wavelet_transform.shape[1], wavelet_transform.shape[2]), 'F')
# Construct eigentriads
cosine_basis = np.array([[np.cos(2*np.pi*omega*t/3)
for omega in range(3)] for t in range(3)]).T
sine_basis = np.array([[np.sin(2*np.pi*omega*t/3)
for omega in range(3)] for t in range(3)]).T
fourier_basis = cosine_basis + 1.0j * sine_basis
major_template = [0, 4, 7]
minor_template = [0, 3, 7]
major_eigentriads = np.zeros((12, 3), dtype=np.complex64)
minor_eigentriads = np.zeros((12, 3), dtype=np.complex64)
for omega in range(3):
for t, p in enumerate(major_template):
major_eigentriads[p, omega] = fourier_basis[t, omega]
for t, p in enumerate(minor_template):
minor_eigentriads[p, omega] = fourier_basis[t, omega]
eigentriads = np.stack(
(major_eigentriads, minor_eigentriads), axis=1)
eigentriads = eigentriads.astype(np.complex64)
# Convolve chromagram with eigentriads
chromagram_ft = scipy.fftpack.fft(chromagram, axis=0)
chromagram_ft = chromagram_ft[:, np.newaxis, :, :, :, np.newaxis]
eigentriads_ft = scipy.fftpack.fft(eigentriads, axis=0)
eigentriads_ft = eigentriads_ft[:, :, np.newaxis,
np.newaxis, np.newaxis, :]
eigentriad_transform_ft = chromagram_ft * eigentriads_ft
eigentriad_transform = scipy.fftpack.fft(
eigentriad_transform_ft, axis=0)
# Apply modulus nonlinearity
eigentriad_transform_modulus = np.abs(eigentriad_transform)
# Print elapsed time.
elapsed_time = time.time() - int(eigentriad_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Eigentriad transform took " + elapsed_str + " seconds.")
####################### (4) SCATTERING TRANSFORM ###########################
# Start clock.
scattering_start_time = int(time.time())
# Setup scattering filter bank over time.
scattering_filterbank_ft = np.zeros((1, N, 2*J_tm-1), dtype=np.float32)
for j in range(J_tm-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi_ft = localmodule.morlet(center, den, N, n_periods=4)
conj_psi_ft = np.roll(psi_ft, -1)[::-1]
scattering_filterbank_ft[0, :, -1 - 2*j] = psi_ft
scattering_filterbank_ft[0, :, -1 - (2*j+1)] = conj_psi_ft
scattering_filterbank_ft[0, 0, 0] = 1
# Convolve eigentriad transform with filterbank again.
# This is akin to a scattering transform.
# We remove the finest scale (last two coefficients).
eigentriad_transform_modulus_ft =\
scipy.fftpack.fft(eigentriad_transform_modulus, axis=3)
eigentriad_transform_modulus_ft =\
eigentriad_transform_modulus_ft[:, :, :, :, :, :, np.newaxis]
scattering_filterbank_ft =\
wavelet_filterbank_ft[:, np.newaxis, np.newaxis, :,
np.newaxis, np.newaxis, :-2]
scattering_transform_ft =\
eigentriad_transform_modulus_ft * scattering_filterbank_ft
scattering_transform = scipy.fftpack.ifft(scattering_transform_ft, axis=3)
# Print elapsed time.
elapsed_time = time.time() - int(scattering_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Scattering transform took " + elapsed_str + " seconds.")
###################### (5) EIGENPROGRESSION TRANSFORM ######################
# Start clock.
eigenprogression_start_time = int(time.time())
# Reshape chroma and quality into a chord axis
sc_shape = scattering_transform.shape
tonnetz_shape = (
sc_shape[0]*sc_shape[1], sc_shape[2],
sc_shape[3], sc_shape[4], sc_shape[5],
sc_shape[6])
tonnetz = np.reshape(scattering_transform,
tonnetz_shape, 'F')
# Build adjacency matrix for Tonnetz graph
# (1/3) Major to minor transitions.
major_edges = np.zeros((12,), dtype=np.float32)
# Parallel minor (C major to C minor)
major_edges[0] = 1
# Relative minor (C major to A minor)
major_edges[9] = 1
# Leading tone minor (C major to E minor)
major_edges[4] = 1
# (2/3) Minor to major transitions
minor_edges = np.zeros((12,))
# Parallel major (C minor to C major)
minor_edges[0] = 1
# Relative major (C minor to Eb major)
minor_edges[3] = 1
# Leading tone major (C major to Ab minor)
minor_edges[8] = 1
# (2/3) Build full adjacency matrix by 4 blocks.
major_adjacency = scipy.linalg.toeplitz(major_edges, minor_edges)
minor_adjacency = scipy.linalg.toeplitz(minor_edges, major_edges)
tonnetz_adjacency = np.zeros((24, 24), dtype=np.float32)
tonnetz_adjacency[:12, 12:] = minor_adjacency
tonnetz_adjacency[12:, :12] = major_adjacency
# Define Laplacian on the Tonnetz graph.
tonnetz_laplacian = 3 * np.eye(24, dtype=np.float32) - tonnetz_adjacency
# Compute eigenprogressions, i.e. eigenvectors of the Tonnetz Laplacian
eigvecs, eigvals = np.linalg.eig(tonnetz_laplacian)
# Diagonalize Laplacian.
eigvals, eigvecs = np.linalg.eig(tonnetz_laplacian)
sorting_indices = np.argsort(eigvals)
eigvals = eigvals[sorting_indices]
eigvecs = eigvecs[:, sorting_indices]
# Key invariance
phi = eigvecs[:, 0]
# Tonic invariance with quality covariance
psi_quality = eigvecs[:, 23]
# C -> C# -> D ... simultaneously with Cm -> C#m -> ...
# Major third periodicity.
psi_chromatic = eigvecs[:, 1] + 1j * eigvecs[:, 2]
# Major keys: pentatonic pattern (C D F G A) moving up a minor third.
# Major keys: minor seventh pattern (B D E A) moving down a minor third.
psi_pentatonic_up = eigvecs[:, 3] + 1j * eigvecs[:, 4]
# Cm -> B -> Bm -> Bb -> Am -> ...
# Minor third periodicity
psi_Cm_B_Bm_Bb = eigvecs[:, 5] + 1j * eigvecs[:, 6]
# C -> Am -> A -> Cm -> C ...
# Relative (R) followed by parallel (P).
# Major third periodicity
j = np.complex(np.cos(2*np.pi/3), np.sin(2*np.pi/3))
jbar = np.complex(np.cos(-2*np.pi/3), np.sin(-2*np.pi/3))
psi_RP = eigvecs[:, 7] + j * eigvecs[:, 8] + jbar * eigvecs[:, 9]
# C -> Bm -> Bb -> Am -> Ab -> ...
psi_C_Bm_Bb_Am = eigvecs[:, 10] + 1j * eigvecs[:, 11]
# Upwards minor third. Qualities in phase opposition.
psi_minorthird_quality = eigvecs[:, 12] + 1j * eigvecs[:, 13]
# Ab is simultaneous with Am.
# Abstract notion of "third" degree with quality invariance?
# Tritone periodicity
j = np.complex(np.cos(2*np.pi/3), np.sin(2*np.pi/3))
jbar = np.complex(np.cos(-2*np.pi/3), np.sin(-2*np.pi/3))
psi_third_tritone = eigvecs[:, 14] + j * eigvecs[:, 15] + jbar * eigvecs[:, 16]
# C -> C#m -> D -> D#m -> ...
# Minor third periodicity.
psi_C_Dbm_D_Ebm = eigvecs[:, 17] + 1j * eigvecs[:, 18]
# Major keys: pentatonic pattern (C D F G A) moving down a minor third.
# Major keys: minor seventh pattern (B D E A) moving up a minor third.
psi_pentatonic_down = eigvecs[:, 19] + 1j * eigvecs[:, 20]
# C is simultaneous with Dm.
# Abstract notion of minor key?
# Major third periodicity.
psi_minorkey = eigvecs[:, 21] + 1j * eigvecs[:, 22]
# Concatenate eigenprogressions.
eigenprogressions = np.stack((
phi,
psi_quality,
psi_chromatic,
psi_pentatonic_up,
psi_Cm_B_Bm_Bb,
psi_RP,
psi_C_Bm_Bb_Am,
psi_C_Bm_Bb_Am,
psi_minorthird_quality,
psi_third_tritone,
psi_C_Dbm_D_Ebm,
psi_pentatonic_down,
psi_minorkey), axis=-1)
eigenprogressions = np.reshape(eigenprogressions, (12, 2, -1), 'F')
eigenprogressions = eigenprogressions.astype(np.complex64)
# Apply eigenprogression transform.
scattering_transform_ft = scipy.fftpack.fft(scattering_transform, axis=0)
scattering_transform_ft = scattering_transform_ft[:, :, :, :, :, :, :, np.newaxis]
eigenprogressions_ft = scipy.fftpack.fft(eigenprogressions, axis=0)
eigenprogressions_ft = eigenprogressions_ft[
:, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis]
eigenprogression_transform_ft = scattering_transform_ft * eigenprogressions_ft
eigenprogression_transform = scipy.fftpack.ifft(eigenprogression_transform_ft, axis=0)
# Print elapsed time.
elapsed_time = time.time() - int(eigenprogression_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Eigenprogression transform took " + elapsed_str + " seconds.")
###################### (5) SPIRAL TRANSFORM ######################
# Start clock.
spiral_start_time = int(time.time())
# Setup wavelet filter bank across octaves.
# This is comparable to a spiral scattering transform.
J_oct = 3
octave_filterbank_ft = np.zeros((n_octaves, 2*J_oct-1), dtype=np.float32)
for j in range(J_oct-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * n_octaves
den = 2 * sigma_j * sigma_j * n_octaves * n_octaves
psi_ft = localmodule.morlet(center, den, n_octaves, n_periods=4)
conj_psi_ft = np.roll(psi_ft, -1)[::-1]
octave_filterbank_ft[:, -1 - 2*j] = psi_ft
octave_filterbank_ft[:, -1 - (2*j+1)] = conj_psi_ft
octave_filterbank_ft[0, 0] = 1
octave_filterbank_ft = octave_filterbank_ft[
np.newaxis, np.newaxis, :,
np.newaxis, np.newaxis,
np.newaxis, np.newaxis, np.newaxis]
# Apply octave transform.
eigenprogression_transform_ft = scipy.fftpack.fft(
eigenprogression_transform, axis=2)
eigenprogression_transform_ft = eigenprogression_transform_ft[
:, :, :, :, :, :, :, :, np.newaxis]
spiral_transform_ft =\
eigenprogression_transform_ft * octave_filterbank_ft
spiral_transform = scipy.fftpack.fft(
spiral_transform_ft, axis=2)
# Print elapsed time.
elapsed_time = time.time() - int(spiral_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Spiral transform took " + elapsed_str + " seconds.")
######################## (6) MODULUS AND AVERAGING #########################
modulus_start_time = time.time()
# Apply second-order modulus nonlinearity.
U2 = np.abs(spiral_transform)
# Average over chroma, quality, octave, and time.
S2 = np.sum(U2, axis=(0, 1, 2, 3))
# Print elapsed time.
elapsed_time = time.time() - int(modulus_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Averaging took " + elapsed_str + " seconds.")
############################### (7) STORAGE #################################
# Store to HDF5 container
hdf5_name = "_".join([dataset_name, "eigenprogression-transforms"])
hdf5_dir = os.path.join(data_dir, hdf5_name)
os.makedirs(hdf5_dir, exist_ok=True)
composer_dir = os.path.join(hdf5_dir, composer_str)
os.makedirs(composer_dir, exist_ok=True)
out_path = os.path.join(composer_dir,
"_".join([
dataset_name,
"eigenprogression-transform",
composer_str,
track_str + ".hdf5"]))
out_file = h5py.File(out_path)
hdf5_dataset_size = S2.shape
hdf5_dataset_key = "_".join([
"eigenprogression-transform",
composer_str,
track_str])
hdf5_dataset = out_file.create_dataset(hdf5_dataset_key, hdf5_dataset_size)
hdf5_dataset[:] = S2
out_file.close()
# Print elapsed time.
print(str(datetime.datetime.now()) + " Finish.")
elapsed_time = time.time() - int(start_time)
elapsed_hours = int(elapsed_time / (60 * 60))
elapsed_minutes = int((elapsed_time % (60 * 60)) / 60)
elapsed_seconds = elapsed_time % 60.
elapsed_str = "{:>02}:{:>02}:{:>05.2f}".format(elapsed_hours,
elapsed_minutes,
elapsed_seconds)
print("Total elapsed time: " + elapsed_str + ".")
| 34.334107
| 86
| 0.68462
| 2,086
| 14,798
| 4.642378
| 0.146692
| 0.030669
| 0.018174
| 0.02974
| 0.356981
| 0.278604
| 0.22594
| 0.185977
| 0.170487
| 0.140335
| 0
| 0.027031
| 0.157521
| 14,798
| 430
| 87
| 34.413953
| 0.749739
| 0.188607
| 0
| 0.156364
| 0
| 0
| 0.04997
| 0.008864
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0.069091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f09cb31eceadc76ff93699e82ee70df317cae82
| 983
|
py
|
Python
|
src/spaceone/monitoring/manager/plugin_manager.py
|
jean1042/monitoring
|
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
|
[
"Apache-2.0"
] | 5
|
2020-06-04T23:01:30.000Z
|
2020-09-09T08:58:51.000Z
|
src/spaceone/monitoring/manager/plugin_manager.py
|
jean1042/monitoring
|
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
|
[
"Apache-2.0"
] | 9
|
2022-02-10T00:58:28.000Z
|
2022-03-23T11:12:47.000Z
|
src/spaceone/monitoring/manager/plugin_manager.py
|
jean1042/monitoring
|
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
|
[
"Apache-2.0"
] | 7
|
2020-06-10T01:56:35.000Z
|
2021-12-02T05:36:21.000Z
|
import logging
from spaceone.core.manager import BaseManager
from spaceone.core.connector.space_connector import SpaceConnector
_LOGGER = logging.getLogger(__name__)
class PluginManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.plugin_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='plugin')
def get_plugin_endpoint(self, plugin_info, domain_id):
plugin_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='plugin')
response = plugin_connector.dispatch(
'Plugin.get_plugin_endpoint',
{
'plugin_id': plugin_info['plugin_id'],
'version': plugin_info.get('version'),
'upgrade_mode': plugin_info.get('upgrade_mode', 'AUTO'),
'domain_id': domain_id
}
)
return response['endpoint'], response.get('updated_version')
| 35.107143
| 110
| 0.666328
| 100
| 983
| 6.2
| 0.37
| 0.148387
| 0.051613
| 0.106452
| 0.254839
| 0.254839
| 0.254839
| 0.254839
| 0.254839
| 0.254839
| 0
| 0
| 0.220753
| 983
| 27
| 111
| 36.407407
| 0.809399
| 0
| 0
| 0
| 0
| 0
| 0.160732
| 0.02645
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.15
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f0a14df894f78200ec160dd56d1194d86c6d8d9
| 1,107
|
py
|
Python
|
Segmentation/bins/hist_label_portarit.py
|
ttthomaschan/DeepcvLib
|
18f7728559136a3c5c8ad54666788ea771e95b16
|
[
"MIT"
] | null | null | null |
Segmentation/bins/hist_label_portarit.py
|
ttthomaschan/DeepcvLib
|
18f7728559136a3c5c8ad54666788ea771e95b16
|
[
"MIT"
] | null | null | null |
Segmentation/bins/hist_label_portarit.py
|
ttthomaschan/DeepcvLib
|
18f7728559136a3c5c8ad54666788ea771e95b16
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
# @file name : hist_label_portrait.py
# @author : JLChen
# @date : 2020-03-11
# @brief : 统计各类别数量
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import pylab as pl
import cv2
def cal_cls_nums(path, t=0.78):
label_img = cv2.imread(path)
label_img = cv2.cvtColor(label_img, cv2.COLOR_BGR2GRAY)
label_img[label_img > t] = 1
label_img[label_img <= t] = 0
label_img = label_img.flatten()
count = np.bincount(label_img, minlength=2) # np.bincount
return count
if __name__ == '__main__':
data_dir = r"G:\deep_learning_data\EG_dataset\dataset\training"
counter = np.zeros((2,))
# 遍历每张标签图,统计标签
file_names = [n for n in os.listdir(data_dir) if n.endswith('_matte.png')]
for i, name in enumerate(file_names):
path_img = os.path.join(data_dir, name)
counter += cal_cls_nums(path_img) # 统计的数据记录于 counter中
# https://pytorch.org/docs/stable/generated/torch.nn.BCEWithLogitsLoss.html?highlight=pos_weight
# pos_weight设置为 负样本数量/正样本数量
print(counter, counter[0] / counter[1])
| 26.357143
| 100
| 0.67299
| 165
| 1,107
| 4.284848
| 0.563636
| 0.113154
| 0.046676
| 0.067893
| 0.048091
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025959
| 0.199639
| 1,107
| 41
| 101
| 27
| 0.772009
| 0.270099
| 0
| 0
| 0
| 0
| 0.084489
| 0.061791
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.238095
| 0
| 0.333333
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f0a726404191dd0a8ef9e2cd1c7c33d9b482f77
| 7,924
|
py
|
Python
|
yoapi/contexts.py
|
YoApp/yo-api
|
a162e51804ab91724cc7ad3e7608410329da6789
|
[
"MIT"
] | 1
|
2021-12-17T03:25:34.000Z
|
2021-12-17T03:25:34.000Z
|
yoapi/contexts.py
|
YoApp/yo-api
|
a162e51804ab91724cc7ad3e7608410329da6789
|
[
"MIT"
] | null | null | null |
yoapi/contexts.py
|
YoApp/yo-api
|
a162e51804ab91724cc7ad3e7608410329da6789
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Client context module."""
import pytz
import time
from flask import current_app
from datetime import datetime, timedelta
from mongoengine import DoesNotExist
from .ab_test import get_enrolled_experiments
from .core import cache
from .errors import APIError
from .helpers import assert_valid_time
from .models import GifPhrase
from .constants.context import DEFAULT_CONTEXTS, ALL_CONTEXT_IDS, LOCATION_CTX, DEFAULT_CTX, AUDIO_CTX, CAMERA_CTX
import semver
from yoapi.models import Yo
from yoapi.notification_endpoints import get_useragent_profile
def get_contexts(user, request=None):
"""Gets the contexts associated with the supplied user"""
default_context = current_app.config.get('DEFAULT_CONTEXT')
if user is None:
return [LOCATION_CTX, DEFAULT_CTX, CAMERA_CTX, AUDIO_CTX], default_context
week_ago = datetime.now() - timedelta(days=27)
week_ago_unix = int(time.mktime(week_ago.timetuple()) * 1e6)
if Yo.objects.filter(sender=user, created__gt=week_ago_unix, context_id='gif').count() > 0:
return ALL_CONTEXT_IDS, default_context
if Yo.objects.filter(sender=user, created__gt=week_ago_unix, context_id='emoji').count() > 0:
return ALL_CONTEXT_IDS, default_context
try:
if request and semver.match(get_useragent_profile(request).get('app_version'), '>=2.5.0'):
return [LOCATION_CTX, DEFAULT_CTX, CAMERA_CTX, AUDIO_CTX], default_context
except:
pass
experiments = get_enrolled_experiments(user, dimension='context')
if experiments:
experiment = experiments[0]
contexts = DEFAULT_CONTEXTS[:]
assignments = experiment.get_params()
exp_context = assignments.get('context')
exp_context_position = assignments.get('context_position')
exp_default_context = assignments.get('default_context')
if exp_context:
if (exp_context_position is not None and
exp_context_position < len(DEFAULT_CONTEXTS) and
exp_context_position >= 0):
contexts.insert(exp_context_position, exp_context)
else:
contexts.append(exp_context)
if exp_default_context:
default_context = exp_default_context
if not experiment.ab_test.debug:
experiment.log_event('context_ab_test_enrolled',
extras={'dimension': 'context'})
return contexts, default_context
if current_app.config.get('ENABLE_ALL_CONTEXTS'):
return ALL_CONTEXT_IDS, default_context
return DEFAULT_CONTEXTS, default_context
def update_gif_phrases(payload):
items = []
for item in payload:
item = item.copy()
items.append(item)
phrase_id = item.get('id')
is_new = False
if phrase_id:
try:
phrase = get_gif_phrase_by_id(phrase_id)
except DoesNotExist:
item.update({'update_result': 'discarded'})
continue
if item.get('delete'):
phrase.delete()
item.update({'update_result': 'deleted'})
continue
else:
phrase = GifPhrase()
is_new = True
if item.get('delete'):
item.update({'update_result': 'skipped'})
continue
end_time = item.get('end_time')
start_time = item.get('start_time')
keyword = item.get('keyword')
header = item.get('header')
day = item.get('day')
date = item.get('date')
default = item.get('is_default')
default = bool(default)
# Parse the iso8601 dates that google spreadsheets provide.
if date:
try:
date = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')
date = date.strftime('%-m/%-d/%y')
except:
raise APIError('Invalid date format')
else:
date = None
try:
start_time = datetime.strptime(start_time, '%H:%M')
start_time = start_time.strftime('%H:%M')
except:
raise APIError('Invalid start_time format')
try:
end_time = datetime.strptime(end_time, '%H:%M')
end_time = end_time.strftime('%H:%M')
except:
raise APIError('Invalid end_time format')
if default and date:
raise APIError('defaults cannot have a date')
if default and not day:
raise APIError('defaults must have a day')
if default != phrase.is_default:
phrase.is_default = default
if start_time != phrase.start_time:
phrase.start_time = start_time
if end_time != phrase.end_time:
phrase.end_time = end_time
if keyword != phrase.keyword:
phrase.keyword = keyword
if header != phrase.header:
phrase.header = header
if day != phrase.day:
if day:
day = day.lower()
try:
assert_valid_time(day, time_format='%A')
except ValueError:
raise APIError('invalid day of the week')
else:
day = None
phrase.day = day
if date != phrase.date:
phrase.date = date
if is_new:
item.update({'update_result': 'created'})
elif phrase._changed_fields:
item.update({'update_result': 'updated'})
else:
item.update({'update_result': 'nochange'})
continue
try:
phrase.save()
except ValidationError:
item.update({'update_result': 'discarded'})
message = 'Tried to update gif phrase with invalid information.'
current_app.log_error({'message': message, 'item': item})
continue
item.update({'id': phrase.phrase_id})
if phrase.is_default:
clear_get_default_phrase_cache(phrase.day)
clear_get_phrases_cache()
return {'items': items}
def clear_get_phrases_cache(date=None):
if date:
# This is a hack to make sure dates are NEVER 0 padded
# when dealing with them in cache.
ts = time.strptime(date, '%m/%d/%y')
date = datetime(*ts[:6]).strftime('%-m/%-d/%y')
cache.delete_memoized(_get_all_phrases, date)
else:
cache.delete_memoized(_get_all_phrases)
def clear_get_default_phrase_cache(day):
day = str(day).lower()
cache.delete_memoized(_get_default_phrases, day)
def get_gif_phrase_by_id(phrase_id):
return GifPhrase.objects(id=phrase_id).get()
@cache.memoize()
def _get_default_phrases(day):
phrases = GifPhrase.objects(day=day, is_default=True).all()
return list(phrases)
# Timeout after 2 days.
@cache.memoize(timeout=60*60*24*2)
def _get_all_phrases(date):
phrases = GifPhrase.objects(date=date).all()
return list(phrases)
def get_gif_phrase(user):
if user.timezone:
zone = pytz.timezone(user.timezone)
current_datetime = datetime.now(zone)
else:
zone = pytz.utc
current_datetime = datetime.now(zone)
current_time = current_datetime.strftime('%H:%M')
current_date = current_datetime.strftime('%-m/%-d/%y')
current_day = current_datetime.strftime('%A').lower()
phrases = _get_all_phrases(current_date)
for phrase in phrases:
if (current_time >= phrase.start_time and
current_time <= phrase.end_time):
return phrase
phrases = _get_default_phrases(current_day)
for phrase in phrases:
if (current_time >= phrase.start_time and
current_time <= phrase.end_time):
return phrase
return GifPhrase(keyword=current_app.config.get('GIPHY_PHRASE'),
header=current_app.config.get('GIPHY_TEXT'))
| 32.342857
| 114
| 0.617365
| 948
| 7,924
| 4.931435
| 0.205696
| 0.041925
| 0.023957
| 0.032941
| 0.224813
| 0.149947
| 0.129198
| 0.11893
| 0.085134
| 0.085134
| 0
| 0.004569
| 0.281928
| 7,924
| 244
| 115
| 32.47541
| 0.817047
| 0.03319
| 0
| 0.243386
| 0
| 0
| 0.08802
| 0.005885
| 0
| 0
| 0
| 0
| 0.010582
| 1
| 0.042328
| false
| 0.005291
| 0.074074
| 0.005291
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f0acc1fb7d824f01253e231a80bcc928842ee31
| 4,180
|
py
|
Python
|
coyote_framework/config/abstract_config.py
|
vaibhavrastogi1988/python_testing_framework
|
583a2286479ed0ccda309c866a403dc92fa1bb3b
|
[
"MIT"
] | null | null | null |
coyote_framework/config/abstract_config.py
|
vaibhavrastogi1988/python_testing_framework
|
583a2286479ed0ccda309c866a403dc92fa1bb3b
|
[
"MIT"
] | null | null | null |
coyote_framework/config/abstract_config.py
|
vaibhavrastogi1988/python_testing_framework
|
583a2286479ed0ccda309c866a403dc92fa1bb3b
|
[
"MIT"
] | null | null | null |
from configparser import ConfigParser
import json
import fnmatch
import os
__author__ = 'justin@shapeways.com'
TEST_RUN_SETTING_CONFIG = 'TEST_RUN_SETTING_CONFIG'
confg_dict = {}
class NullConfigAttribute(object):
def __init__(self, description, default_value=None):
self.description = description
self.default_value = default_value
class ConfigBase(object):
"""The config base; do not inherit from ConfigParser because it is an old-style class"""
def __init__(self, section):
if section not in confg_dict.keys():
self.section = section
self.parser = ConfigParser()
self._readall()
confg_dict[section] = self
else:
this_config = confg_dict[section]
self.section = section
self.parser = this_config.parser
def get(self, key):
return self.parser.get(self.section, key)
def getbool(self, key):
return bool(self.parser.getboolean(self.section, key))
def getint(self, key):
return int(self.get(key))
def getfloat(self, key):
return float(self.get(key))
def getjson(self, key):
raw = self.get(key)
if not raw:
raw = '{}'
return json.loads(raw)
def _readall(self):
"""Read configs from all available configs. It will read files in the following order:
1.) Read all default settings:
These are located under: `<project_root>/config/*/default.cfg`
2.) Read the user's config settings:
This is located on the path: `~/.aftrc`
3.) Read all config files specified by the config string in the environment variable TEST_RUN_SETTING_CONFIG
A config string such as "browser.headless,scripts.no_ssh" will read paths:
`<project_root>/config/browser/headless.cfg`
`<project_root>/config/scripts/no_ssh.cfg`
OR a config string such as "<project_root>/config/browser/headless.cfg" will load that path directly
"""
# First priority -- read all default configs
config_path = os.path.dirname(__file__)
config_defaults = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(config_path)
for f in fnmatch.filter(files, 'default.cfg')]
# Second priority -- read the user overrides
user_config = os.path.expanduser('~/.aftrc')
# Third priority -- read the environment variable overrides
override_filenames = []
if TEST_RUN_SETTING_CONFIG in os.environ:
for test_config in os.environ[TEST_RUN_SETTING_CONFIG].split(','):
if os.path.exists(test_config): #is this a file path
override_filenames.append(test_config)
elif "." in test_config and not test_config.endswith('.cfg'): #else it might be in xxxx.yyyy format
config_parts = test_config.split('.')
config_parts[-1]+='.cfg' #add file ext to last part, which should be file
filename = os.path.join(config_path, *config_parts)
override_filenames.append(filename)
else: #else unknown, might throw exception here
pass
all_configs = config_defaults + [user_config] + override_filenames
return self.parser.read(all_configs)
def load_config_vars(target_config, source_config):
"""Loads all attributes from source config into target config
@type target_config: TestRunConfigManager
@param target_config: Config to dump variables into
@type source_config: TestRunConfigManager
@param source_config: The other config
@return: True
"""
# Overwrite all attributes in config with new config
for attr in dir(source_config):
# skip all private class attrs
if attr.startswith('_'):
continue
val = getattr(source_config, attr)
if val is not None:
setattr(target_config, attr, val)
| 35.726496
| 134
| 0.617943
| 502
| 4,180
| 4.982072
| 0.338645
| 0.02399
| 0.027989
| 0.039984
| 0.065574
| 0.027989
| 0
| 0
| 0
| 0
| 0
| 0.001366
| 0.299282
| 4,180
| 116
| 135
| 36.034483
| 0.852509
| 0.326794
| 0
| 0.064516
| 0
| 0
| 0.028701
| 0.008686
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145161
| false
| 0.016129
| 0.064516
| 0.064516
| 0.33871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f0d5b67c4f91743453ccb056ca36b102ec5a878
| 6,485
|
py
|
Python
|
src/main.py
|
matthewb96/NetSpeedGraphs
|
51e6f6d4f24845e50f34ed56452a4fa454db189b
|
[
"MIT"
] | null | null | null |
src/main.py
|
matthewb96/NetSpeedGraphs
|
51e6f6d4f24845e50f34ed56452a4fa454db189b
|
[
"MIT"
] | 4
|
2021-06-08T21:23:15.000Z
|
2022-03-12T00:29:23.000Z
|
src/main.py
|
matthewb96/NetSpeedGraphs
|
51e6f6d4f24845e50f34ed56452a4fa454db189b
|
[
"MIT"
] | null | null | null |
"""
Main module for running NetSpeedGraphs.
"""
##### IMPORTS #####
# Standard imports
from pathlib import Path
from datetime import datetime, timedelta
from argparse import ArgumentParser
# Third party imports
import speedtest
import numpy as np
import pandas as pd
from bokeh.plotting import figure, output_file, save
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.models import (ColumnDataSource, DataTable, TableColumn,
NumberFormatter, DateFormatter)
from bokeh.layouts import grid
##### CONSTANTS #####
DATA_HEADER = ['Time', 'Ping (ms)', 'Download Speed (Mbs)',
'Upload Speed (Mbs)']
##### FUNCTIONS #####
def allTests():
""" Runs ping, download and upload speed tests.
Returns
-------
ping: float
Ping value in miliseconds.
download: float
Download speed in Mbs.
upload: float
Upload speed in Mbs.
"""
st = speedtest.Speedtest()
server = st.get_best_server()
down = st.download()
up = st.upload()
return server['latency'], down / 1e6, up / 1e6
def plotGraph(data, path):
""" Plots a graph with the download and upload speeds and pings.
Parameters
----------
data: pandas.DataFrame
DataFrame containing 4 columns:
- Time: datetime objects for the time the test was ran.
- Ping: ping in milliseconds.
- Download: download speed in megabits per second.
- Upload: upload speed in megabits per second.
path: pathlib Path
Path to html file for outputting plots to.
See Also
--------
readResults
"""
# output to static HTML file
output_file(path)
# Use the pandas dataframe as the source
source = ColumnDataSource(data)
# Create a new plot and set x-axis type as datetime
netPlot = figure(title="Network Speeds", x_axis_type='datetime',
x_axis_label='Time of Test',
y_axis_label='Speed (Mbs) / Ping (ms)',
tools=['xpan', 'xwheel_zoom', 'box_select', 'reset'],
active_drag='xpan', active_scroll='xwheel_zoom',
sizing_mode='stretch_both')
# Change x axis tick format depending on zoom level
netPlot.xaxis.formatter = DatetimeTickFormatter(hours = ['%H:%M'],
days = ['%d/%m/%Y'],
months = ['%m/%Y'])
# Add the lines to the plot in different colours
WIDTH = 2
netPlot.line(x='Time', y='Ping', source=source, legend_label='Ping',
line_color='orange', line_width=WIDTH)
netPlot.line(x='Time', y='Download', source=source, legend_label='Download',
line_color='blue', line_width=WIDTH)
netPlot.line(x='Time', y='Upload', source=source, legend_label='Upload',
line_color='green', line_width=WIDTH)
# Create table
numFormatter = NumberFormatter(format='0.00')
columns = [
TableColumn(field="Time", title='Time',
formatter=DateFormatter(format="%Y-%m-%d %H:%M")),
TableColumn(field='Ping', title='Ping (ms)', formatter=numFormatter),
TableColumn(field="Download", title='Download Speed (Mbs)',
formatter=numFormatter),
TableColumn(field='Upload', title='Upload Speed (Mbs)',
formatter=numFormatter)
]
table = DataTable(source=source, columns=columns, width=400,
sizing_mode='stretch_height')
# Add plot to grid layout
layout = grid([netPlot, table], ncols=2)
# show the results
save(layout)
return
def storeResults(results, path):
""" Save the network speed results to CSV containing all results.
Will create a CSV if it doesn't exist, or append results to it if it does.
Parameters
----------
results: list-like of floats
The results from a single run of the network test in the following
order: ping (miliseconds), download speed (Mbs) and upload speed (Mbs).
path: pathlib Path
Path to csv file for saving results to.
See Also
--------
allTests
"""
# Get current time of results
now = datetime.now()
# Create row of results
row = [now.isoformat(), *[str(i) for i in results]]
# Check if file exists and create it with header if not
# then append current results to it
header = not path.exists()
with open(path, 'at') as out:
if header:
out.writelines(','.join(DATA_HEADER) + '\n')
out.write(','.join(row) + '\n')
return
def readResults(path):
""" Read the csv containing all the results into a DataFrame.
The `DATA_PATH` and `DATA_HEADER` constants are used when reading the csv.
Parameters
----------
path: pathlib Path
Path to csv file for reading from.
Returns
-------
data: pandas.DataFrame
DataFrame containing 4 columns:
- Time: datetime objects for the time the test was ran.
- Ping: ping in milliseconds.
- Download: download speed in megabits per second.
- Upload: upload speed in megabits per second.
"""
data = pd.read_csv(path, usecols=DATA_HEADER, parse_dates=[0])
rename = {i: i.split()[0].strip().capitalize() for i in DATA_HEADER}
data = data.rename(columns=rename)
return data
def argParser():
""" Creates an ArgumentParser to get output locations.
Returns
-------
parser: argparse ArgumentParser
Parser to get the output file locations from the arguments.
"""
parser = ArgumentParser(description='Run a network test and update html plots.')
parser.add_argument('data_file', type=Path,
help='csv file for storing all network test results.')
parser.add_argument('html_file', type=Path,
help='html file for saving the output plots to.')
return parser
def main():
""" Runs the network test to get results then updates the csv and graphs. """
# Get file locations from arguments
parser = argParser()
args = parser.parse_args()
# Run a test and update the graphs
netRes = allTests()
storeResults(netRes, args.data_file)
results = readResults(args.data_file)
plotGraph(results, args.html_file)
return
##### MAIN #####
if __name__ == '__main__':
main()
| 33.25641
| 84
| 0.612336
| 782
| 6,485
| 5.011509
| 0.292839
| 0.014289
| 0.01531
| 0.018372
| 0.136259
| 0.126563
| 0.126563
| 0.126563
| 0.094922
| 0.094922
| 0
| 0.003417
| 0.278026
| 6,485
| 194
| 85
| 33.427835
| 0.833618
| 0.369776
| 0
| 0.035714
| 0
| 0
| 0.138667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.119048
| 0
| 0.261905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f0da5b719cc8ed4639299b06648e3a470d196da
| 7,478
|
py
|
Python
|
tests/planar_tests/test_region_in_multiregion.py
|
lycantropos/orient
|
01f4f67a717c5ee911d83756d455cc35e85ce817
|
[
"MIT"
] | 2
|
2020-11-01T00:25:09.000Z
|
2021-04-07T10:13:59.000Z
|
tests/planar_tests/test_region_in_multiregion.py
|
lycantropos/orient
|
01f4f67a717c5ee911d83756d455cc35e85ce817
|
[
"MIT"
] | null | null | null |
tests/planar_tests/test_region_in_multiregion.py
|
lycantropos/orient
|
01f4f67a717c5ee911d83756d455cc35e85ce817
|
[
"MIT"
] | null | null | null |
from typing import Tuple
from ground.base import Relation
from hypothesis import given
from orient.hints import (Multiregion,
Region)
from orient.planar import (contour_in_multiregion,
region_in_multiregion,
region_in_region)
from tests.utils import (MULTIPART_COMPOUND_RELATIONS,
equivalence,
implication,
reverse_contour,
reverse_contour_coordinates,
reverse_multiregion,
reverse_multiregion_coordinates,
reverse_multiregion_regions,
sequence_rotations)
from . import strategies
@given(strategies.multiregions_with_contours)
def test_basic(multiregion_with_region: Tuple[Multiregion, Region]) -> None:
multiregion, region = multiregion_with_region
result = region_in_multiregion(region, multiregion)
assert isinstance(result, Relation)
assert result in MULTIPART_COMPOUND_RELATIONS
@given(strategies.multiregions)
def test_self(multiregion: Multiregion) -> None:
assert all(region_in_multiregion(region, multiregion) is Relation.COMPONENT
for region in multiregion)
@given(strategies.size_three_or_more_multiregions_with_contours)
def test_step(multiregion_with_region: Tuple[Multiregion, Region]) -> None:
multiregion, region = multiregion_with_region
first_region, *rest_multiregion = multiregion
result = region_in_multiregion(region, rest_multiregion)
next_result = region_in_multiregion(region, multiregion)
relation_with_first_region = region_in_region(region, first_region)
assert equivalence(next_result is Relation.DISJOINT,
result is relation_with_first_region
is Relation.DISJOINT)
assert equivalence(next_result is Relation.TOUCH,
result is Relation.DISJOINT
and relation_with_first_region is Relation.TOUCH
or result is Relation.TOUCH
and relation_with_first_region in (Relation.DISJOINT,
Relation.TOUCH))
assert equivalence(next_result is Relation.COMPONENT,
result is Relation.COMPONENT
or bool(rest_multiregion)
and relation_with_first_region is Relation.EQUAL)
assert equivalence(next_result is Relation.OVERLAP,
result is Relation.OVERLAP
or relation_with_first_region is Relation.OVERLAP
or (bool(rest_multiregion)
and result is Relation.DISJOINT
or result is Relation.TOUCH)
and relation_with_first_region in (Relation.COVER,
Relation.ENCLOSES)
or result in (Relation.COVER, Relation.ENCLOSES)
and relation_with_first_region is Relation.DISJOINT)
assert equivalence(next_result is Relation.COVER,
(not rest_multiregion or result is Relation.COVER)
and relation_with_first_region is Relation.COVER)
assert equivalence(next_result is Relation.ENCLOSES,
result is Relation.ENCLOSES
and relation_with_first_region in (Relation.ENCLOSES,
Relation.COVER)
or (not rest_multiregion or result is Relation.COVER)
and relation_with_first_region is Relation.ENCLOSES)
assert equivalence(next_result is Relation.EQUAL,
not rest_multiregion
and relation_with_first_region is Relation.EQUAL)
assert equivalence(next_result is Relation.ENCLOSED,
result is Relation.ENCLOSED
or relation_with_first_region is Relation.ENCLOSED)
assert equivalence(next_result is Relation.WITHIN,
result is Relation.WITHIN
or relation_with_first_region is Relation.WITHIN)
@given(strategies.multiregions_with_contours)
def test_reversals(multiregion_with_region: Tuple[Multiregion, Region]
) -> None:
multiregion, region = multiregion_with_region
result = region_in_multiregion(region, multiregion)
assert result is region_in_multiregion(reverse_contour(region),
multiregion)
assert result is region_in_multiregion(region,
reverse_multiregion(multiregion))
assert result is region_in_multiregion(
region, reverse_multiregion_regions(multiregion))
assert result is region_in_multiregion(
reverse_contour_coordinates(region),
reverse_multiregion_coordinates(multiregion))
@given(strategies.multiregions_with_contours)
def test_rotations(multiregion_with_region: Tuple[Multiregion, Region]
) -> None:
multiregion, region = multiregion_with_region
result = region_in_multiregion(region, multiregion)
assert all(result is region_in_multiregion(region, rotated)
for rotated in sequence_rotations(multiregion))
@given(strategies.multiregions_with_contours)
def test_connection_with_contour_in_multiregion(multiregion_with_region
: Tuple[Multiregion, Region]
) -> None:
multiregion, region = multiregion_with_region
result = region_in_multiregion(region, multiregion)
contour_relation = contour_in_multiregion(region, multiregion)
assert implication(result is Relation.DISJOINT
or result is Relation.COVER,
contour_relation is Relation.DISJOINT)
assert implication(contour_relation is Relation.DISJOINT,
result is Relation.DISJOINT
or result is Relation.OVERLAP
or result is Relation.COVER)
assert implication(result is Relation.TOUCH
or result is Relation.ENCLOSES
or result is Relation.COMPOSITE,
contour_relation is Relation.TOUCH)
assert implication(contour_relation is Relation.TOUCH,
result is Relation.TOUCH
or result is Relation.ENCLOSES
or result is Relation.OVERLAP
or result is Relation.COMPOSITE)
assert implication(result is Relation.OVERLAP,
contour_relation is Relation.DISJOINT
or contour_relation is Relation.CROSS
or contour_relation is Relation.TOUCH)
assert implication(contour_relation is Relation.CROSS,
result is Relation.OVERLAP)
assert equivalence(result is Relation.COMPONENT
or result is Relation.EQUAL,
contour_relation is Relation.COMPONENT)
assert equivalence(result is Relation.ENCLOSED,
contour_relation is Relation.ENCLOSED)
assert equivalence(result is Relation.WITHIN,
contour_relation is Relation.WITHIN)
| 47.329114
| 79
| 0.622626
| 724
| 7,478
| 6.191989
| 0.093923
| 0.13607
| 0.139193
| 0.071827
| 0.725184
| 0.596253
| 0.517734
| 0.448807
| 0.401963
| 0.364711
| 0
| 0
| 0.33418
| 7,478
| 157
| 80
| 47.630573
| 0.900382
| 0
| 0
| 0.195489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195489
| 1
| 0.045113
| false
| 0
| 0.052632
| 0
| 0.097744
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f0ed2213b59cdb0f244b760bfd1759ed4538c6a
| 11,676
|
py
|
Python
|
gui/src/core/parse_qca.py
|
retallickj/qca-embedding
|
96fd37a3ecd4beacb04ad1cb193d65d0b48ceab2
|
[
"MIT"
] | 1
|
2017-11-02T20:38:20.000Z
|
2017-11-02T20:38:20.000Z
|
gui/src/core/parse_qca.py
|
retallickj/qca-embedding
|
96fd37a3ecd4beacb04ad1cb193d65d0b48ceab2
|
[
"MIT"
] | null | null | null |
gui/src/core/parse_qca.py
|
retallickj/qca-embedding
|
96fd37a3ecd4beacb04ad1cb193d65d0b48ceab2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#---------------------------------------------------------
# Name: parse_qca.py
# Purpose: Parsing functions for QCADesigner files
# Author: Jacob Retallick
# Created: 2015.10.22
# Last Modified: 2015.10.22
#---------------------------------------------------------
# NOTE
# the original parse script no longer seems to work (change in networkx?)
# for the purposes of the embedder, we don't need to consider clock zones so
# I have simplified the parseing script to remove that functionality.
import re
import networkx as nx
import numpy as np
from auxil import getEk, CELL_FUNCTIONS, CELL_MODES
from itertools import combinations
## general global parameters
R_MAX = 2.1 # max cell-cell interaction range (rel to grid spacing)
EK_THRESH = 1e-3 # threshold for included Ek, relative to max(abs(Ek))
X_ROUND = 4 # places to round to when deciding if cell is rotated
### FILE PROCESSING
def build_hierarchy(fn):
'''Build a dict hierarchy containing all objects, their parameters, and
childen.'''
fp = open(fn, 'r')
linemap = lambda s: s.replace(',', '.')
# general re expression. may need to change if future format changes
re_start = re.compile('^\[.+\]$')
re_term = re.compile('^\[#.+\]$')
hier = {'label': 'Hierarchy', 'children': [], 'vars': {}}
key_stack = ['Hierarchy'] # stack of active keys, pop of top of stack
dict_stack = [hier] # stack of corresponding dict objects.
line_cnt = 0
for line in fp:
line = linemap(line)
line_cnt += 1
line = line.strip() # remove endline and possible whitespace
# must check object termination first
if re_term.match(line):
key = line[2:-1]
if key_stack[-1] == key:
d = dict_stack.pop()
key_stack.pop()
try:
dict_stack[-1]['children'].append(d)
except:
print('Somehow over-popped dict_stack...')
return None
else:
print('Start-end mismatch in line {0}'.format(line_cnt))
return None
# for a new object, create a new dict template
elif re_start.match(line):
key = line[1:-1]
key_stack.append(key)
d = {'label': key, 'children': [], 'vars': {}}
dict_stack.append(d)
# otherwise check for new variable to add to most recent dict
else:
if '=' in line:
var, val = line.split('=')
dict_stack[-1]['vars'][var] = val
fp.close()
return hier
def proc_hierarchy(hier):
'''Process the extracted data hierarchy to extract useful information. In
the current information, we are interested in the overall cell grid spacing
(for deciding on the range of included cell) and the properties of each
cell in the circuit'''
cells = []
spacing = None
# hierarchy should only have two children: VERSION and TYPE:DESIGN. The
# former might be useful in later implentations for selecting formatting
# options but for now all we care about are the DESIGN objects
hier = [child for child in hier['children']
if child['label'] == 'TYPE:DESIGN'][0]
# for now assert that there can be only one cell layer, no vertical x-over
layers = [child for child in hier['children']
if child['label'] == 'TYPE:QCADLayer']
# isolate cell layers
cell_layers = [layer for layer in layers if layer['vars']['type'] == '1']
# merge cell layers, will lead to qdot conflict if vertical x-over
cell_dicts = [layer['children'] for layer in cell_layers]
cell_dicts = reduce(lambda x, y: x+y, cell_dicts)
# get grid-spacing (average cell bounding box)
cx = float(cell_dicts[0]['vars']['cell_options.cxCell'])
cy = float(cell_dicts[0]['vars']['cell_options.cyCell'])
spacing = np.sqrt(cx*cy)
# create cell objects
cells = []
for cd in cell_dicts:
cell = {}
# cell type
cell['cf'] = CELL_FUNCTIONS[cd['vars']['cell_function']]
cell['cm'] = CELL_MODES[cd['vars']['cell_options.mode']]
cell['clk'] = int(cd['vars']['cell_options.clock'])
# just for show sol
cell['cx'] = float(cd['vars']['cell_options.cxCell'])
cell['cy'] = float(cd['vars']['cell_options.cyCell'])
# position, first child will be the QCADesignObject
design_object = cd['children'][0]
cell['x'] = float(design_object['vars']['x'])
cell['y'] = float(design_object['vars']['y'])
# quantum dots
qdot_dicts = [child for child in cd['children']
if child['label'] == 'TYPE:CELL_DOT']
qdots = []
for d in qdot_dicts:
dot = {}
dot['x'] = float(d['vars']['x'])
dot['y'] = float(d['vars']['y'])
dot['c'] = float(d['vars']['charge'])
qdots.append(dot)
cell['qdots'] = qdots
# determine if cell is rotated, will have three x values
x = set([round(dt['x'], X_ROUND) for dt in qdots])
if len(x) == 3:
cell['rot'] = True
elif len(x) == 2:
cell['rot'] = False
else:
print('Could not decide cell rotation')
cell['rot'] = False
# keep track of polarization if cell is fixed: don't rely on labels
if cell['cf'] == CELL_FUNCTIONS['QCAD_CELL_FIXED']:
pol = qdots[0]['c']+qdots[2]['c']-qdots[1]['c']-qdots[3]['c']
pol /= qdots[0]['c']+qdots[2]['c']+qdots[1]['c']+qdots[3]['c']
cell['pol'] = pol
cells.append(cell)
return cells, spacing
## CIRCUIT PROCESSING
def build_J(cells, spacing, r_max=R_MAX):
'''Build the J matrix for the given circuit. Restricts the interaction
distance to r_max but does not apply any adjacency contraints'''
N = len(cells)
# contruct connectivvity matrix
J = np.zeros([N, N], dtype=float)
DR = r_max*spacing
for i,j in combinations(range(N), 2):
Ek = getEk(cells[i], cells[j], DR=DR)
if Ek:
J[i,j] = J[j,i] = Ek
# remove very weak interactions
J = J*(np.abs(J) >= np.max(np.abs(J)*EK_THRESH))
return J
def zone_cells(cells, spacing, show=False):
'''Split cells into clock zones. Distinguishes disjoint zones with the
same zone index'''
N = len(cells) # number of cells
# construct connectivity matrix
J = np.zeros([N, N], dtype=float)
DR = R_MAX*spacing
for i in xrange(N-1):
for j in xrange(i+1, N):
Ek = getEk(cells[i], cells[j], DR=DR)
if Ek:
J[i, j] = Ek
J[j, i] = Ek
# remove very weak interactions
J = J * (np.abs(J) >= np.max(np.abs(J)*EK_THRESH))
# make full cell connectivity Graph
G = nx.Graph(J)
# if show:
# plt.figure(0)
# plt.clf()
# nx.draw_graphviz(G)
# plt.show()
# get indices for each clock index
clk = [cell['clk'] for cell in cells]
clk_ind = list(set(clk)) # will sort by default
inds = [[i for i, x in enumerate(clk) if x == ind] for ind in clk_ind]
# split graph into sub-graphs with the same clock indices
sub_G = {ind: G.subgraph(inds[ind]) for ind in clk_ind}
# split disconnected components for each label graph
sub_ind = {ind: list(nx.connected_components(sub_G[ind]))
for ind in clk_ind}
## find zone order
# create abstract zone connectivity graph
G = nx.DiGraph()
# nodes
for clk in clk_ind:
for i in xrange(len(sub_ind[clk])):
key = (clk, i)
G.add_node(key, inds=sub_ind[clk][i])
# edges
for clk in clk_ind:
adj_clk = 3 if clk == 0 else clk-1
if not adj_clk in sub_ind:
continue
for i in xrange(len(sub_ind[clk])):
k1 = (clk, i)
for j in xrange(len(sub_ind[adj_clk])):
k2 = (adj_clk, j)
if np.any(J[G.node[k1]['inds'], :][:, G.node[k2]['inds']]):
G.add_edge(k2, k1)
# if show:
# plt.figure(1)
# plt.clf()
# nx.draw_graphviz(G)
# plt.show()
# find input nodes, have no predecessors
predecs = {n: len(G.predecessors(n)) for n in G.nodes_iter()}
inputs = [ky for ky, val in predecs.iteritems() if val == 0]
# expand from inputs
visited = {key: False for key in G.nodes()}
nodes = inputs
order = [nodes]
while nodes:
new_nodes = set()
for node in nodes:
new_nodes.update(G.successors(node))
visited[node] = True
# remove already visited nodes from new nodes
new_nodes = [node for node in new_nodes if not visited[node]]
nodes = new_nodes
if nodes:
order.append(nodes)
# find feedback interactions
feedback = {}
for n in G.nodes_iter():
for p in G.predecessors(n):
pshell = 0
nshell = 0
pzone = 0
nzone = 0
for shell in order:
if p in shell:
pshell = order.index(shell)
pzone = shell.index(p)
if n in shell:
nshell = order.index(shell)
nzone = shell.index(n)
if pshell > nshell:
if (pshell,pzone) in feedback:
feedback[(pshell,pzone)].append((nshell,nzone))
else:
feedback[(pshell,pzone)] = [(nshell,nzone)]
# reformat order list to contain zone indices
form_func = lambda n: sub_ind[n[0]][n[1]]
order = [[form_func(zone) for zone in shell] for shell in order]
return order, J, feedback
def reorder_cells(cells, J, flipy=False):
'''Renumber cells by position rather than the default QCADesigner placement
order. Cells ordered by the tuple (zone, y, x)'''
keys = {}
ysgn = -1 if flipy else 1
# assign sortable tuples for each cell
for ind, cell in enumerate(cells):
keys[ind] = (ysgn*cell['y'], cell['x'])
order = zip(*sorted([(keys[i], i) for i in keys]))[1]
# relabel cells and reorder the J matrix
cells = [cells[i] for i in order]
J = J[order, :][:, order]
for i in range(len(cells)):
cells[i]['num'] = i
cells[i]['number'] = i
return cells, J
## MAIN FUNCTION
def parse_qca_file(fn, verbose=False):
'''Parse a QCADesigner file to extract cell properties. Returns an ordered
list of cells, the QCADesigner grid spacing in nm, a list structure of the
indices of each clock zone (propogating from inputs), and a coupling matrix
J which contains the Ek values for cells within a radius of R_MAX times the
grid spacing'''
# build data hierarchy
hier = build_hierarchy(fn)
# extract useful information from data hierarchy
cells, spacing = proc_hierarchy(hier)
if verbose:
print('Parsed QCA file...')
for cell in cells:
cell['clk'] = 0
# construct J matrix
J = build_J(cells, spacing)
# reorder cells by zone and position
cells, J = reorder_cells(cells, J)
return cells, spacing, J
| 32.34349
| 80
| 0.554642
| 1,569
| 11,676
| 4.06246
| 0.250478
| 0.00502
| 0.01412
| 0.010668
| 0.122372
| 0.104644
| 0.096956
| 0.08064
| 0.064324
| 0.064324
| 0
| 0.008943
| 0.320058
| 11,676
| 360
| 81
| 32.433333
| 0.793929
| 0.309267
| 0
| 0.123711
| 0
| 0
| 0.074834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030928
| false
| 0
| 0.025773
| 0
| 0.097938
| 0.020619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f0fed3d680bffcc5eeafee6ce65b7395cfecca1
| 7,391
|
py
|
Python
|
docs/Tutorial/1-glm/plot_1_LinearRegression.py
|
bbayukari/abess
|
3b21b0a58cac6c1464ec9403ffbe4902fee7b890
|
[
"Intel"
] | null | null | null |
docs/Tutorial/1-glm/plot_1_LinearRegression.py
|
bbayukari/abess
|
3b21b0a58cac6c1464ec9403ffbe4902fee7b890
|
[
"Intel"
] | null | null | null |
docs/Tutorial/1-glm/plot_1_LinearRegression.py
|
bbayukari/abess
|
3b21b0a58cac6c1464ec9403ffbe4902fee7b890
|
[
"Intel"
] | null | null | null |
"""
=================
Linear Regression
=================
In this tutorial, we are going to demonstrate how to use the ``abess`` package to carry out best subset selection
in linear regression with both simulated data and real data.
"""
###############################################################################
#
# Our package ``abess`` implements a polynomial algorithm in the following best-subset selection problem:
#
# .. math::
# \min_{\beta\in \mathbb{R}^p} \frac{1}{2n} ||y-X\beta||^2_2,\quad \text{s.t.}\ ||\beta||_0\leq s,
#
#
# where :math:`\| \cdot \|_2` is the :math:`\ell_2` norm, :math:`\|\beta\|_0=\sum_{i=1}^pI( \beta_i\neq 0)`
# is the :math:`\ell_0` norm of :math:`\beta`, and the sparsity level :math:`s`
# is an unknown non-negative integer to be determined.
# Next, we present an example to show the ``abess`` package can get an optimal estimation.
#
# Toward optimality: adaptive best-subset selection
# ^^^^^^^^^^^^^^^^^^^^^^
#
# Synthetic dataset
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We generate a design matrix :math:`X` containing :math:`n = 300` observations and each observation has :math:`p = 1000` predictors.
# The response variable :math:`y` is linearly related to the first, second, and fifth predictors in :math:`X`:
#
# .. math::
# y = 3X_1 + 1.5X_2 + 2X_5 + \epsilon,
#
# where :math:`\epsilon` is a standard normal random variable.
import numpy as np
from abess.datasets import make_glm_data
np.random.seed(0)
n = 300
p = 1000
true_support_set=[0, 1, 4]
true_coef = np.array([3, 1.5, 2])
real_coef = np.zeros(p)
real_coef[true_support_set] = true_coef
data1 = make_glm_data(n=n, p=p, k=len(true_coef), family="gaussian", coef_=real_coef)
print(data1.x.shape)
print(data1.y.shape)
# %%
# This dataset is high-dimensional and brings large challenge for subset selection.
# As a typical data examples, it mimics data appeared in real-world for modern scientific researches and data mining,
# and serves a good quick example for demonstrating the power of the ``abess`` library.
#
# Optimality
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The optimality of subset selection means:
#
# - ``true_support_set`` (i.e. ``[0, 1, 4]``) can be exactly identified;
# - the estimated coefficients is `ordinary least squares (OLS) estimator <https://en.wikipedia.org/wiki/Ordinary_least_squares>`__ under the true subset such that is very closed to ``true_coef = np.array([3, 1.5, 2])``.
#
# To understand the second criterion, we take a look on the estimation given by ``scikit-learn`` library:
from sklearn.linear_model import LinearRegression as SKLLinearRegression
sklearn_lr = SKLLinearRegression()
sklearn_lr.fit(data1.x[:, [0, 1, 4]], data1.y)
print("OLS estimator: ", sklearn_lr.coef_)
# %%
# The fitted coefficients ``sklearn_lr.coef_`` is OLS estimator
# when the true support set is known.
# It is very closed to the ``true_coef``, and is hard to be improve under finite sample size.
# %%
# Adaptive Best Subset Selection
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The adaptive best subset selection (ABESS) algorithm is a very powerful for the selection of the best subset.
# We will illustrate its power by showing it can reach to the optimality.
#
# The following code shows the simple syntax for using ABESS algorithm via ``abess`` library.
from abess import LinearRegression
model = LinearRegression()
model.fit(data1.x, data1.y)
# %%
# ``LinearRegression`` functions in ``abess`` is designed for selecting the best subset under the linear model,
# which can be imported by: ``from abess import LinearRegression``.
# Following similar syntax like ``scikit-learn``, we can fit the data via ABESS algorithm.
#
# Next, we going to see that the above approach can successfully recover the true set ``np.array([0, 1, 4])``.
# The fitted coefficients are stored in ``model.coef_``.
# We use ``np.nonzero`` function to find the selected subset of ``abess``,
# and we can extract the non-zero entries in ``model.coef_`` which is the coefficients estimation for the selected predictors.
#
ind = np.nonzero(model.coef_)
print("estimated non-zero: ", ind)
print("estimated coef: ", model.coef_[ind])
# %%
# From the result, we know that ``abess`` exactly found the true set ``np.array([0, 1, 4])`` among all 1000 predictors.
# Besides, the estimated coefficients of them are quite close to the real ones,
# and is exactly the same as the estimation ``sklearn_lr.coef_`` given by ``scikit-learn``.
###############################################################################
# Real data example
# ^^^^^^^^^^^^^^^^^
#
# Hitters Dataset
# ~~~~~~~~~~~~~~~
# Now we focus on real data on the `Hitters dataset <https://www.kaggle.com/floser/hitters>`__.
# We hope to use several predictors related to the performance of
# the baseball athletes last year to predict their salary.
#
# First, let's have a look at this dataset. There are 19 variables except
# `Salary` and 322 observations.
import os
import pandas as pd
data2 = pd.read_csv(os.path.join(os.getcwd(), 'Hitters.csv'))
print(data2.shape)
print(data2.head(5))
# %%
# Since the dataset contains some missing values, we simply drop those rows with missing values.
# Then we have 263 observations remain:
data2 = data2.dropna()
print(data2.shape)
# %%
# What is more, before fitting, we need to transfer the character
# variables to dummy variables:
data2 = pd.get_dummies(data2)
data2 = data2.drop(['League_A', 'Division_E', 'NewLeague_A'], axis=1)
print(data2.shape)
print(data2.head(5))
###############################################################################
# Model Fitting
# ~~~~~~~~~~~~~
# As what we do in simulated data, an adaptive best subset can be formed
# easily:
x = np.array(data2.drop('Salary', axis=1))
y = np.array(data2['Salary'])
model = LinearRegression(support_size=range(20))
model.fit(x, y)
# %%
# The result can be shown as follows:
ind = np.nonzero(model.coef_)
print("non-zero:\n", data2.columns[ind])
print("coef:\n", model.coef_)
# %%
# Automatically, variables `Hits`, `CRBI`, `PutOuts`, `League\_N` are
# chosen in the model (the chosen sparsity level is 4).
###############################################################################
# More on the results
# ~~~~~~~~~~~~~~~~~~~
# We can also plot the path of abess process:
import matplotlib.pyplot as plt
coef = np.zeros((20, 19))
ic = np.zeros(20)
for s in range(20):
model = LinearRegression(support_size=s)
model.fit(x, y)
coef[s, :] = model.coef_
ic[s] = model.ic_
for i in range(19):
plt.plot(coef[:, i], label=i)
plt.xlabel('support_size')
plt.ylabel('coefficients')
plt.title('ABESS Path')
plt.show()
# %%
# Besides, we can also generate a graph about the tuning parameter.
# Remember that we used the default EBIC to tune the support size.
plt.plot(ic, 'o-')
plt.xlabel('support_size')
plt.ylabel('EBIC')
plt.title('Model selection via EBIC')
plt.show()
# %%
# In EBIC criterion, a subset with the support size 3 has the lowest value,
# so the process adaptively chooses 3 variables.
# Note that under other information criteria, the result may be different.
###############################################################################
# R tutorial
# ^^^^^^^^^^
# For R tutorial, please view
# https://abess-team.github.io/abess/articles/v01-abess-guide.html.
| 35.028436
| 221
| 0.64998
| 1,060
| 7,391
| 4.471698
| 0.339623
| 0.016878
| 0.020042
| 0.017089
| 0.052321
| 0.052321
| 0.029114
| 0.016456
| 0
| 0
| 0
| 0.017588
| 0.146124
| 7,391
| 210
| 222
| 35.195238
| 0.733481
| 0.672981
| 0
| 0.220339
| 0
| 0
| 0.107274
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.118644
| 0
| 0.118644
| 0.20339
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f11e9df2b051fcb60ef9a9128d6a058c4f210e2
| 2,386
|
py
|
Python
|
pyppeteer/tracing.py
|
cr1pt/pypyteer
|
b3aade3741b385f2e1dde600b501776f1f5e8479
|
[
"MIT"
] | null | null | null |
pyppeteer/tracing.py
|
cr1pt/pypyteer
|
b3aade3741b385f2e1dde600b501776f1f5e8479
|
[
"MIT"
] | null | null | null |
pyppeteer/tracing.py
|
cr1pt/pypyteer
|
b3aade3741b385f2e1dde600b501776f1f5e8479
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tracing module."""
import asyncio
from pathlib import Path
from typing import Any, Awaitable
from pyppeteer.connection import Session
class Tracing(object):
"""Tracing class."""
def __init__(self, client: Session) -> None:
"""Make new tracing object."""
self._client = client
self._recording = False
self._path = ''
async def start(self, options: dict = None, **kwargs: Any) -> None:
"""Start."""
options = options or dict()
options.update(kwargs)
categoriesArray = [
'-*', 'devtools.timeline', 'v8.execute',
'disabled-by-default-devtools.timeline',
'disabled-by-default-devtools.timeline.frame', 'toplevel',
'blink.console', 'blink.user_timing', 'latencyInfo',
'disabled-by-default-devtools.timeline.stack',
'disabled-by-default-v8.cpu_profiler',
]
if 'screenshots' in options:
categoriesArray.append('disabled-by-default-devtools.screenshot')
self._path = options.get('path', '')
self._recording = True
await self._client.send('Tracing.start', {
'transferMode': 'ReturnAsStream',
'categories': ','.join(categoriesArray),
})
async def stop(self) -> Awaitable:
"""Stop."""
contentPromise = asyncio.get_event_loop().create_future()
self._client.once(
'Tracing.tracingComplete',
lambda event: asyncio.ensure_future(
self._readStream(event.get('stream'), self._path)
).add_done_callback(
lambda fut: contentPromise.set_result(
fut.result()) # type: ignore
)
)
await self._client.send('Tracing.end')
self._recording = False
return await contentPromise
async def _readStream(self, handle: str, path: str) -> None:
eof = False
file = Path(path)
with file.open('w') as f:
while not eof:
response = await self._client.send('IO.read', {
'handle': handle
})
eof = response.get('eof', False)
if path:
f.write(response.get('data', ''))
await self._client.send('IO.close', {'handle': handle})
| 32.684932
| 77
| 0.559933
| 237
| 2,386
| 5.523207
| 0.455696
| 0.053476
| 0.064935
| 0.076394
| 0.147441
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002418
| 0.30679
| 2,386
| 72
| 78
| 33.138889
| 0.788996
| 0.04694
| 0
| 0.074074
| 0
| 0
| 0.185849
| 0.098522
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.074074
| 0
| 0.12963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f126ef87161ba2d8fbb4e598c5bbb09c32019bd
| 2,627
|
py
|
Python
|
src/wai/annotations/isp/map_labels/component/_MapLabels.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | null | null | null |
src/wai/annotations/isp/map_labels/component/_MapLabels.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | 3
|
2021-06-30T23:42:47.000Z
|
2022-03-01T03:45:07.000Z
|
src/wai/annotations/isp/map_labels/component/_MapLabels.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, Dict
from wai.common.adams.imaging.locateobjects import LocatedObjects
from wai.common.cli.options import TypedOption
from ....core.component import ProcessorComponent
from ....core.stream import ThenFunction, DoneFunction
from ....core.stream.util import RequiresNoFinalisation
from ....core.util import InstanceState
from ....domain.image.object_detection import ImageObjectDetectionInstance
from ....domain.image.object_detection.util import get_object_label, set_object_label
class MapLabels(
RequiresNoFinalisation,
ProcessorComponent[ImageObjectDetectionInstance, ImageObjectDetectionInstance]
):
"""
Processes a stream of object-detection instances, mapping labels
from one set to another.
"""
label_mapping = TypedOption(
"-m", "--mapping",
type=str,
metavar="old=new", action='concat',
help="mapping for labels, for replacing one label string with another (eg when fixing/collapsing labels)"
)
@InstanceState
def label_table(self) -> Dict[str, str]:
label_table = {}
for map_string in self.label_mapping:
old, new = map_string.split("=")
# Make sure we don't double-map a label
if old in label_table:
raise ValueError(f"Multiple mappings specified for label '{old}': "
f"{label_table[old]}, {new}")
label_table[old] = new
return label_table
def process_element(
self,
element: ImageObjectDetectionInstance,
then: ThenFunction[ImageObjectDetectionInstance],
done: DoneFunction
):
# Apply the label mapping
self.apply_label_mapping(element.annotations)
then(element)
def apply_label_mapping(self, located_objects: LocatedObjects):
"""
Maps the labels in the located objects from their current value to
their new value.
:param located_objects: The parsed objects
"""
# Do nothing if no mapping provided
if len(self.label_table) == 0:
return
# Process each object
for located_object in located_objects:
# Get the object's current label
label: Optional[str] = get_object_label(located_object, None)
# If the object doesn't have a label, skip it
if label is None:
continue
# If there is a mapping for this label, change it
if label in self.label_table:
set_object_label(located_object, self.label_table[label])
| 33.679487
| 113
| 0.650552
| 296
| 2,627
| 5.658784
| 0.378378
| 0.053731
| 0.025075
| 0.025075
| 0.035821
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000524
| 0.272935
| 2,627
| 77
| 114
| 34.116883
| 0.87644
| 0.175485
| 0
| 0.043478
| 0
| 0
| 0.092813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.195652
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f14096bca569e364e31b3699b308c6507e8fe1b
| 8,221
|
py
|
Python
|
nlg/app.py
|
samrudh/gramex-nlg
|
fb1b1ce14347947c8644adda7bd63856dcb2ce3d
|
[
"MIT"
] | null | null | null |
nlg/app.py
|
samrudh/gramex-nlg
|
fb1b1ce14347947c8644adda7bd63856dcb2ce3d
|
[
"MIT"
] | null | null | null |
nlg/app.py
|
samrudh/gramex-nlg
|
fb1b1ce14347947c8644adda7bd63856dcb2ce3d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Module for gramex exposure. This shouldn't be imported anywhere, only for use
with gramex.
"""
import glob
import json
import os
import os.path as op
import pandas as pd
from six.moves.urllib import parse
from tornado.template import Template
from gramex.apps.nlg import grammar
from gramex.apps.nlg import nlgutils as utils
from gramex.apps.nlg import templatize
from gramex.config import variables
DATAFILE_EXTS = {'.csv', '.xls', '.xlsx', '.tsv'}
nlg_path = op.join(variables['GRAMEXDATA'], 'nlg')
if not op.isdir(nlg_path):
os.mkdir(nlg_path)
def clean_anonymous_files():
"""Remove all files uploaded by anonymous users.
This may be used at startup when deploying the app."""
import shutil
anon_dir = op.join(nlg_path, 'anonymous')
if op.isdir(anon_dir):
shutil.rmtree(anon_dir)
def is_user_authenticated(handler):
"""Check if the current user is authenticated."""
current_user = getattr(handler, 'current_user', False)
return bool(current_user)
def get_user_dir(handler):
if is_user_authenticated(handler):
dirpath = op.join(nlg_path, handler.current_user.id)
else:
dirpath = op.join(nlg_path, 'anonymous')
return dirpath
def render_live_template(handler):
"""Given a narrative ID and df records, render the template."""
payload = json.loads(handler.request.body)
orgdf = get_original_df(handler)
nrid = payload['nrid']
if not nrid.endswith('.json'):
nrid += '.json'
df = pd.DataFrame.from_records(payload['data'])
nrpath = op.join(nlg_path, handler.current_user.id, nrid)
with open(nrpath, 'r') as fout: # noqa: No encoding for json
templates = json.load(fout)
narratives = []
for t in templates['config']:
tmpl = utils.add_html_styling(t['template'], payload['style'])
s = Template(tmpl).generate(df=df, fh_args=t.get('fh_args', {}),
G=grammar, U=utils, orgdf=orgdf)
rendered = s.decode('utf8')
narratives.append(rendered)
return '\n'.join(narratives)
def get_original_df(handler):
"""Get the original dataframe which was uploaded to the webapp."""
data_dir = get_user_dir(handler)
with open(op.join(data_dir, 'meta.cfg'), 'r') as fout: # noqa: No encoding for json
meta = json.load(fout)
dataset_path = op.join(data_dir, meta['dsid'])
return pd.read_csv(dataset_path, encoding='utf-8')
def render_template(handler):
"""Render a set of templates against a dataframe and formhandler actions on it."""
orgdf = get_original_df(handler)
payload = json.loads(handler.request.body.decode('utf8'))
fh_args = payload['args']
templates = payload['template']
df = pd.DataFrame.from_records(payload['data'])
# fh_args = {k: [x.lstrip('-') for x in v] for k, v in fh_args.items()}
resp = []
for t in templates:
rendered = Template(t).generate(
orgdf=orgdf, df=df, fh_args=fh_args, G=grammar, U=utils).decode('utf8')
rendered = rendered.replace('-', '')
# grmerr = utils.check_grammar(rendered)
resp.append({'text': rendered}) # , 'grmerr': grmerr})
return json.dumps(resp)
def process_text(handler):
"""Process English text in the context of a df and formhandler arguments
to templatize it."""
payload = json.loads(handler.request.body.decode('utf8'))
df = pd.DataFrame.from_records(payload['data'])
args = payload.get('args', {}) or {}
resp = []
for t in payload['text']:
# grammar_errors = yield utils.check_grammar(t)
replacements, t, infl = templatize(t, args.copy(), df)
resp.append({
'text': t, 'tokenmap': replacements, 'inflections': infl,
'fh_args': args
# 'grmerr': json.loads(grammar_errors.decode('utf8'))['matches']
})
return json.dumps(resp)
def read_current_config(handler):
"""Read the current data and narrative IDs written to the session file."""
user_dir = get_user_dir(handler)
meta_path = op.join(user_dir, 'meta.cfg')
if not op.isdir(user_dir):
os.mkdir(user_dir)
if not op.isfile(meta_path):
return {}
with open(meta_path, 'r') as fout: # noqa: No encoding for json
meta = json.load(fout)
return meta
def get_dataset_files(handler):
"""Get all filenames uploaded by the user.
Parameters
----------
handler : tornado.RequestHandler
Returns
-------
list
List of filenames.
"""
files = glob.glob('{}/*'.format(get_user_dir(handler)))
return [f for f in files if op.splitext(f)[-1].lower() in DATAFILE_EXTS]
def get_narrative_config_files(handler):
"""Get list of narrative config files generated by the user.
Parameters
----------
handler : tornado.RequestHandler
Returns
-------
list
List of narrative configurations.
"""
return glob.glob('{}/*.json'.format(get_user_dir(handler)))
def save_config(handler):
"""Save the current narrative config.
(to $GRAMEXDATA/{{ handler.current_user.id }})"""
payload = {}
for k in ['config', 'name', 'dataset']:
payload[k] = parse.unquote(handler.args[k][0])
payload['config'] = json.loads(payload['config'])
nname = payload['name']
if not nname.endswith('.json'):
nname += '.json'
payload['dataset'] = parse.unquote(handler.args['dataset'][0])
fpath = op.join(nlg_path, handler.current_user.id, nname)
with open(fpath, 'w') as fout: # noqa: No encoding for json
json.dump(payload, fout, indent=4)
def get_gramopts(handler):
"""Find all Grammar and token inflection options from the NLG library.
Primarily used for creating the select box in the template settings dialog."""
funcs = {}
for attrname in dir(grammar):
obj = getattr(grammar, attrname)
if getattr(obj, 'gramopt', False):
funcs[obj.fe_name] = {'source': obj.source, 'func_name': attrname}
return funcs
def init_form(handler):
"""Process input from the landing page and write the current session config."""
meta = {}
data_dir = get_user_dir(handler)
if not op.isdir(data_dir):
os.makedirs(data_dir)
# handle dataset
data_file = handler.request.files.get('data-file', [{}])[0]
if data_file:
# TODO: Unix filenames may not be valid Windows filenames.
outpath = op.join(data_dir, data_file['filename'])
with open(outpath, 'wb') as fout:
fout.write(data_file['body'])
else:
dataset = handler.args['dataset'][0]
outpath = op.join(data_dir, dataset)
# shutil.copy(outpath, fh_fpath)
meta['dsid'] = op.basename(outpath)
# handle config
config_name = handler.get_argument('narrative', '')
if config_name:
config_path = op.join(data_dir, config_name)
# shutil.copy(config_path, op.join(local_data_dir, 'config.json'))
meta['nrid'] = op.basename(config_path)
# write meta config
with open(op.join(data_dir, 'meta.cfg'), 'w') as fout: # NOQA
json.dump(meta, fout, indent=4)
def edit_narrative(handler):
"""Set the handler's narrative and dataset ID to the current session."""
user_dir = op.join(nlg_path, handler.current_user.id)
dataset_name = handler.args.get('dsid', [''])[0]
narrative_name = handler.args.get('nrid', [''])[0] + '.json'
with open(op.join(user_dir, 'meta.cfg'), 'w') as fout: # NOQA: no encoding for JSON
json.dump({'dsid': dataset_name, 'nrid': narrative_name}, fout, indent=4)
def get_init_config(handler):
"""Get the initial default configuration for the current user."""
user_dir = get_user_dir(handler)
metapath = op.join(user_dir, 'meta.cfg')
if op.isfile(metapath):
with open(metapath, 'r') as fout: # NOQA: no encoding for JSON
meta = json.load(fout)
config_file = op.join(user_dir, meta.get('nrid', ''))
if op.isfile(config_file):
with open(config_file, 'r') as fout: # NOQA: no encoding for JSON
meta['config'] = json.load(fout)
return meta
return {}
| 33.555102
| 88
| 0.643961
| 1,122
| 8,221
| 4.602496
| 0.215686
| 0.020914
| 0.015492
| 0.023044
| 0.285244
| 0.197134
| 0.162277
| 0.129357
| 0.070488
| 0.064291
| 0
| 0.002802
| 0.218465
| 8,221
| 244
| 89
| 33.692623
| 0.800934
| 0.243158
| 0
| 0.16
| 0
| 0
| 0.068189
| 0
| 0
| 0
| 0
| 0.004098
| 0
| 1
| 0.1
| false
| 0
| 0.08
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f14219d12c0adf9ade099f871dd4550e114601e
| 3,682
|
py
|
Python
|
data/main_data_flow.py
|
SterArcher/OHCA-registry-Slovenia
|
ad8278a28039503ab6a75d48ffea314de9a759ba
|
[
"MIT"
] | 1
|
2022-02-28T13:02:14.000Z
|
2022-02-28T13:02:14.000Z
|
data/main_data_flow.py
|
SterArcher/dispatch
|
ad8278a28039503ab6a75d48ffea314de9a759ba
|
[
"MIT"
] | 1
|
2022-03-20T10:51:17.000Z
|
2022-03-21T07:52:57.000Z
|
data/main_data_flow.py
|
SterArcher/OHCA-registry-Slovenia
|
ad8278a28039503ab6a75d48ffea314de9a759ba
|
[
"MIT"
] | null | null | null |
import plotly.graph_objects as go
import plotly as plt
import random
# Uncomment the names you want the diagram to show
# Names in english
# sta = "Statistical Office"
# si = "Emergency call admission" #"sprejem intervencij"
# pni = "Emergency intervention report" #"poročilo/protokol nujne intervencije"
# pnrv = "Emergency protocol of the out-of-hospital EMS" # "protokol nujnega reševalnega vozila"
# ppo = "Out-of-hospital CPR" #"predbolnišnično oživljanje"
# utst = "Supplementary Utstein protocol"
# nijz = "National Institute of Public Health" #"NIJZ (v primeru smrti)"
# hosp = "Hospitals" # Večinoma v obliki protokola triaže,statusa/anamneze/rezultatov diagnostike in odpustnice
# disp = "Dispatch service"
# ppp = "First responders"
# comp = "IT system provider" #"Computel"
# api = "API"
# api_csv = "API/CSV"
# db = "Utstein database"
# title_text = "Representation of data flow for the Slovenian OHCA registry based on the Utstein protocol."
# Names in Slovene
si = "Sprejem intervencij" #"sprejem intervencij"
pni = "Protokol nujne intervencije" #"poročilo/protokol nujne intervencije"
pnrv = "Protokol nujnega reševalnega vozila" # "protokol nujnega reševalnega vozila"
ppo = "Protokol predbolnišničnega oživljanja" #"predbolnišnično oživljanje"
utst = "Dodatni protokol Utstein"
nijz = "NIJZ" #"NIJZ (v primeru smrti)"
hosp = "Bolnišnice" # Večinoma v obliki protokola triaže,statusa/anamneze/rezultatov diagnostike in odpustnice
disp = "Dispečerska služba zdravstva"
ppp = "Protokol prvih posredovalcev"
comp = "Ponudnik informacijske tehnologije" #"Computel"
sta = "Statistični urad"
api = "API"
api_csv = "API/CSV"
db = "Baza podatkov Utstein"
title_text = "Prikaz pretoka podatkov za Register slovenskih predbolnišničnih srčnih dogodkov v skladu s protokolom Utstein."
def random_color_generator():
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
return [r, g, b]
colors, colors_conn = [], []
for i in range(25):
[r, g, b] = random_color_generator()
colors.append("rgba(" + str(r) + "," + str(g) + "," + str(b) + "," + str(0.9) + ")")
colors_conn.append("rgba(" + str(r) + "," + str(g) + "," + str(b) + "," + str(0.5) + ")")
elements = [si, pni, pnrv, ppo, utst, nijz, hosp, disp, ppp, comp, api, api_csv, db]
labels, counter = dict(), 0
for elt in elements:
labels[elt] = counter
counter += 1
labels[sta] = counter
protocols, rest = [si, pni, pnrv, ppo, utst], [nijz, hosp, disp, ppp]
connections = dict()
for protocol in protocols:
connections[(labels[protocol], labels[comp])] = 1
for elt in rest:
connections[(labels[elt], labels[api_csv])] = 1
connections[(labels[comp], labels[api])] = len(protocols)
connections[(labels[api_csv], labels[db])] = len(rest)
connections[(labels[api], labels[db])] = len(protocols)
connections[(labels[sta], labels[db])] = 1
label = list(labels.keys())
sources, targets, values = [], [], []
for key in connections:
sources.append(key[0])
targets.append(key[1])
values.append(connections[key])
fig = go.Figure(data = [go.Sankey(
valueformat = ".0f",
valuesuffix = "TWh",
node = dict(pad = 15,
thickness = 20,
line = dict(color="black", width = 0.5),
label = label,
color = colors),
link = dict(source = sources,
target = targets,
value = values,
#label = 'label',
color = colors_conn))]) # 'rgb(220,220,220)'
fig.update_layout(title=dict(text=title_text, font=dict(size = 20, color = 'gray')),
font=dict(size = 12, color = 'black'),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)")
fig.show()
| 35.066667
| 125
| 0.67409
| 478
| 3,682
| 5.15272
| 0.39749
| 0.017052
| 0.004872
| 0.038977
| 0.216809
| 0.141291
| 0.129111
| 0.11287
| 0.11287
| 0.087698
| 0
| 0.01759
| 0.181695
| 3,682
| 104
| 126
| 35.403846
| 0.799867
| 0.311244
| 0
| 0
| 0
| 0
| 0.1868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014925
| false
| 0
| 0.044776
| 0
| 0.074627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f1586c55bd8026b70c428056979527a8012b8fd
| 8,468
|
py
|
Python
|
apcadastros.py
|
Alexsussa/ap-cadastros
|
9b5e9b57970a6a044ebde071a68403e0d513e89b
|
[
"MIT"
] | null | null | null |
apcadastros.py
|
Alexsussa/ap-cadastros
|
9b5e9b57970a6a044ebde071a68403e0d513e89b
|
[
"MIT"
] | null | null | null |
apcadastros.py
|
Alexsussa/ap-cadastros
|
9b5e9b57970a6a044ebde071a68403e0d513e89b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
__developer__ = 'Alex Pinheiro'
__version__ = 1.4
__build__ = 6
import sqlite3
from tkinter.ttk import *
from tkinter.filedialog import *
from threading import Thread
from utils import Utils
from login import Login
u = Utils
# Listas
estados = ['AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MT', 'MS,', 'MG', 'PA',
'PB,', 'PR', 'PE', 'PI', 'RJ', 'RN', 'RS', 'RO', 'RR', 'SC', 'SP', 'SE', 'TO']
cidades = []
cpfcnpjs = ['CPF', 'CNPJ']
# Janela principal
class Clientes(Thread, Tk):
def __init__(self, master=None):
Thread.__init__(self)
banco = 'banco/dados.db'
conexao = sqlite3.connect(banco)
c = conexao.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS clientes (id INTEGER PRIMARY KEY AUTOINCREMENT,
cliente TEXT VARCHAR(30) UNIQUE NOT NULL, cpf_cnpj TINYINT(18) UNIQUE NOT NULL, telefone TINYINT(15) NOT NULL,
cep TINYINT(10) NOT NULL, endereco TEXT VARCHA(30) NOT NULL, numero TINYINT(5) NOT NULL,
bairro TEXT VARCHAR(20) NOT NULL, cidade TEXT VARCHAR(15) NOT NULL, estado TEXT VARCHAR(2) NOT NULL)''')
conexao.commit()
self.c0 = Frame(master)
self.c0.pack(pady=20)
self.c1 = Frame(master)
self.c1.pack(pady=10)
self.c2 = Frame(master)
self.c2.pack(pady=10)
self.c3 = Frame(master)
self.c3.pack(pady=10)
self.c4 = Frame(master)
self.c4.pack()
# Barra de menu superior ainda não implementada
self.menuBar = Menu(janela, bd=0, bg='#d9d9d9')
self.menuArquivo = Menu(self.menuBar, tearoff=0)
self.menuArquivo.add_command(label='Produtos', command=self.produtos, accelerator='Ctrl+P')
self.menuArquivo.add_command(label='Salvar', command=lambda: u.cadastrarClientes(self), accelerator='Ctrl+S')
self.menuArquivo.add_command(label='Atualizar', command=lambda: u.atualizar(self), accelerator='Ctrl+U')
self.menuArquivo.add_command(label='Deletar', command=lambda: u.deletar(self), accelerator='Ctrl+D')
self.menuArquivo.add_separator()
self.menuArquivo.add_command(label='Sair', command=janela.destroy, accelerator='Ctrl+Q')
self.menuBar.add_cascade(label='Arquivo', menu=self.menuArquivo)
self.menuAjuda = Menu(self.menuBar, tearoff=0)
self.menuAjuda.add_command(label='Sobre', command=lambda: u.sobre(self, window=janela), accelerator='Ctrl+H')
self.menuBar.add_cascade(label='Ajuda', menu=self.menuAjuda)
janela.config(menu=self.menuBar)
self.lbid = Label(self.c1, text='ID:', width=3)
self.lbid.pack(side=LEFT)
self.txtid = Combobox(self.c1, width=8, background='white', foreground='black',
values=u.listaID(self))
self.txtid.pack(side=LEFT)
self.btnlupa = Button(self.c1, width=20, height=20, bg='white', command='u.lupaID(self)')
self.lupa = PhotoImage(file='imagens/lupa.png')
self.btnlupa.config(image=self.lupa)
self.btnlupa.image = self.lupa
self.lbcliente = Label(self.c1, text='CLIENTE:', width=8)
self.lbcliente.pack(side=LEFT)
self.txtcliente = Entry(self.c1, width=30, background='white', foreground='black')
self.txtcliente.pack(side=LEFT)
self.lbcpfcnpj = Combobox(self.c1, text='CPF/CNPJ:', width=5, values=cpfcnpjs)
self.lbcpfcnpj.pack(side=LEFT, padx=3)
self.lbcpfcnpj.set(cpfcnpjs[0])
self.lbcpfcnpj.bind('<<ComboboxSelected>>', lambda e: u.maskCampos(self))
self.txtcpfcnpj = Entry(self.c1, width=18, background='white', foreground='black')
self.txtcpfcnpj.pack(side=LEFT)
self.btnlupa = Button(self.c1, width=20, height=20, bg='white', command=lambda: u.lupaCPF(self))
self.lupa = PhotoImage(file='imagens/lupa.png')
self.btnlupa.config(image=self.lupa)
self.btnlupa.image = self.lupa
self.btnlupa.pack(side=LEFT, padx=2)
self.lbtelcel = Label(self.c2, text='TEL/CEL:', width=8)
self.lbtelcel.pack(side=LEFT)
self.txttelcel = Entry(self.c2, text='Telefone ou Celular...', width=15, bg='white', fg='black')
self.txttelcel.pack(side=LEFT)
self.lbcep = Label(self.c2, text='CEP:', width=4)
self.lbcep.pack(side=LEFT)
self.txtcep = Entry(self.c2, width=10, bg='white', fg='black')
self.txtcep.pack(side=LEFT)
self.btnlupa = Button(self.c2, width=20, height=20, bg='white', command=lambda: u.buscaCep(self))
self.lupa = PhotoImage(file='imagens/lupa.png')
self.btnlupa.config(image=self.lupa)
self.btnlupa.image = self.lupa
self.btnlupa.pack(side=LEFT, padx=2)
self.lbendereco = Label(self.c2, text='ENDEREÇO:', width=10)
self.lbendereco.pack(side=LEFT)
self.txtendereco = Entry(self.c2, width=30, bg='white', fg='black')
self.txtendereco.pack(side=LEFT)
self.lbnumero = Label(self.c2, text='Nº:', width=3)
self.lbnumero.pack(side=LEFT)
self.txtnumero = Entry(self.c2, width=5, bg='white', fg='black')
self.txtnumero.pack(side=LEFT)
self.lbbairro = Label(self.c3, text='BAIRRO:', width=7)
self.lbbairro.pack(side=LEFT)
self.txtbairro = Entry(self.c3, width=30, bg='white', fg='black')
self.txtbairro.pack(side=LEFT)
self.lbcidade = Label(self.c3, text='CIDADE:', width=7)
self.lbcidade.pack(side=LEFT)
self.txtcidade = Entry(self.c3, width=20, background='white', foreground='black')
self.txtcidade.pack(side=LEFT)
self.lbestado = Label(self.c3, text='ESTADO:', width=7)
self.lbestado.pack(side=LEFT)
self.txtestado = Combobox(self.c3, width=3, background='white', foreground='black',
values=sorted(estados))
self.txtestado.pack(side=LEFT)
self.logo = Label(self.c4, image=imglogo)
self.logo.pack()
###############################################################################
# Menu do mouse
self.MenuMouse = Menu(tearoff=0)
self.MenuMouse.add_command(label='Cortar')
self.MenuMouse.add_command(label='Copiar')
self.MenuMouse.add_command(label='Colar')
janela.bind('<Button-3><ButtonRelease-3>', self.MostrarMenuMouse)
# Binds
self.txtid.bind('<<ComboboxSelected>>', lambda e: u.lupaID(self))
janela.bind('<Button-1>', lambda e: u.maskCampos(self))
janela.bind('<Control-S>', lambda e: u.cadastrarClientes(self))
janela.bind('<Control-s>', lambda e: u.cadastrarClientes(self))
janela.bind('<Control-U>', lambda e: u.atualizar(self))
janela.bind('<Control-u>', lambda e: u.atualizar(self))
janela.bind('<Control-D>', lambda e: u.deletar(self))
janela.bind('<Control-d>', lambda e: u.deletar(self))
janela.bind('<Control-L>', lambda e: u.limpar(self))
janela.bind('<Control-l>', lambda e: u.limpar(self))
janela.bind('<Control-Q>', lambda e: janela.destroy())
janela.bind('<Control-q>', lambda e: janela.destroy())
janela.bind('<Control-P>', lambda e: self.produtos())
janela.bind('<Control-p>', lambda e: self.produtos())
janela.bind('<Control-H>', lambda e: u.sobre(self, window=janela))
janela.bind('<Control-h>', lambda e: u.sobre(self, window=janela))
def MostrarMenuMouse(self, event):
w = event.widget
self.MenuMouse.entryconfigure('Cortar', command=lambda: w.event_generate('<<Cut>>'))
self.MenuMouse.entryconfigure('Copiar', command=lambda: w.event_generate('<<Copy>>'))
self.MenuMouse.entryconfigure('Colar', command=lambda: w.event_generate('<<Paste>>'))
self.MenuMouse.tk.call('tk_popup', self.MenuMouse, event.x_root, event.y_root)
def produtos(self):
from produtos import jan
janela.iconify()
if jan.withdraw:
jan.deiconify()
jan.focus_force()
else:
jan.withdraw()
janela.deiconify()
# Término janela clientes
janela = Tk()
imglogo = PhotoImage(file='imagens/logo.png')
iconejanela = PhotoImage(file='imagens/iconejanela.png')
Clientes(janela)
janela.tk.call('wm', 'iconphoto', janela._w, iconejanela)
janela.title('AP CADASTROS - CLIENTES')
janela.geometry('800x450')
janela.resizable(False, False)
janela.mainloop()
| 41.920792
| 118
| 0.62931
| 1,095
| 8,468
| 4.829224
| 0.238356
| 0.033283
| 0.049924
| 0.057489
| 0.364032
| 0.231467
| 0.221256
| 0.20556
| 0.20556
| 0.197428
| 0
| 0.018508
| 0.202409
| 8,468
| 201
| 119
| 42.129353
| 0.764436
| 0.018777
| 0
| 0.071895
| 0
| 0.019608
| 0.148869
| 0.006081
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.045752
| 0
| 0.071895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f18f11b5d9f381e25d945aa36634594b061dc4c
| 3,749
|
py
|
Python
|
exps/supp-synthetic/notebooks/hp_analysis.py
|
Viktour19/overlap-code
|
f5c6e63146a00f65710c38b9181bb9d12de6454f
|
[
"MIT"
] | 2
|
2020-07-09T03:15:58.000Z
|
2022-03-09T11:57:17.000Z
|
exps/supp-synthetic/notebooks/hp_analysis.py
|
Viktour19/overlap-code
|
f5c6e63146a00f65710c38b9181bb9d12de6454f
|
[
"MIT"
] | null | null | null |
exps/supp-synthetic/notebooks/hp_analysis.py
|
Viktour19/overlap-code
|
f5c6e63146a00f65710c38b9181bb9d12de6454f
|
[
"MIT"
] | 1
|
2021-05-18T11:55:04.000Z
|
2021-05-18T11:55:04.000Z
|
#!/usr/bin/env python
# coding: utf-8
from sacred.observers import TinyDbReader
import pdb
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def get_exclusion_metadata(
d,
ideal_rule = [['i0', 'not', ''], ['i1', 'not', '']],
w_lb=1e-8):
r = dict(d)
r['rule_avg_coverage'] = np.mean([rule['p_covered'] for rule in r['rule_stats']])
r['rule_n_perfect'] = np.sum([rule['n_covered'] == 0 for rule in r['rule_stats']])
r['rule_n_total'] = len(r['rule_stats'])
r['rule_avg_length'] = np.mean([len(rule) for rule in r['rules']])
ideal_literal_idx = np.array([i in ideal_rule for i in r['z_index']])
dirty_rules_idx = r['z_values'][ideal_literal_idx].sum(axis=0) == len(ideal_rule)
clean_rules_idx = np.logical_and(
dirty_rules_idx,
r['z_values'][~ideal_literal_idx].sum(axis=0) == 0
)
# Make sure dirty rules exclude the clean rule
dirty_rules_idx = np.logical_xor(dirty_rules_idx, clean_rules_idx)
other_rules_idx = np.logical_not(np.logical_or(dirty_rules_idx, clean_rules_idx))
assert sum(clean_rules_idx) <= 1
# Rules considered (i.e., they show up in W at all)
r['n_lp_rules_considered_dirty'] = dirty_rules_idx.sum()
r['n_lp_rules_considered_clean'] = clean_rules_idx.sum()
r['n_lp_rules_considered_other'] = other_rules_idx.sum()
# Rules used (i.e., non-zero values in W)
r['n_lp_coeff_above_lb_dirty'] = np.logical_and(
dirty_rules_idx, r['w'] > w_lb).sum()
r['n_lp_coeff_above_lb_clean'] = np.logical_and(
clean_rules_idx, r['w'] > w_lb).sum()
r['n_lp_coeff_above_lb_other'] = np.logical_and(
other_rules_idx, r['w'] > w_lb).sum()
# Average value of coefficients
# r['lp_coeff_avg_value_dirty'] = np.nan if dirty_rules_idx.sum() == 0 else np.mean(r['w'][dirty_rules_idx])
# r['lp_coeff_avg_value_clean'] = np.nan if clean_rules_idx.sum() == 0 else np.mean(r['w'][clean_rules_idx])
# r['lp_coeff_avg_value_other'] = np.nan if other_rules_idx.sum() == 0 else np.mean(r['w'][other_rules_idx])
r['n_rounded_rules_considered_clean'] = sum(this_r == ideal_rule for this_r in r['rules'])
r['n_rounded_rules_considered_dirty'] = \
sum([np.all(np.array([i in ideal_rule for i in this_r])) for this_r in r['rules']]) - \
sum(this_r == ideal_rule for this_r in r['rules'])
r['n_lp_rules_viewed'] = r['z_values'].shape[1]
del r['rules']
del r['w']
del r['z_index']
del r['z_values']
del r['rule_stats']
return r
def rename_filter_df(df):
return df.rename(columns={'n_rounded_rules_considered_clean': 'id_exclusion_rr',
'n_lp_rules_considered_clean' : 'id_exclusion_lp',
'reference_coverage': 'ref_coverage',
'literals': 'n_rules_literals'})[['B', 'K', 'alpha', 'lambda0', 'lambda1', 'n_ref_mult',
'lp_obj', 'rounded_obj', 'ref_coverage',
'n_lp_rules_viewed', 'id_exclusion_lp', 'id_exclusion_rr',
'n_rules', 'rule_n_perfect', 'rule_avg_coverage', 'rule_avg_length']]
def get_data(data_path, verbose=False):
reader = TinyDbReader(data_path)
meta = reader.fetch_metadata(exp_name='synthetic_removal')
if verbose:
print("{} / {} experiments completed".format(
len([d['status'] for d in meta if d['status'] == 'COMPLETED']),
len([d['status'] for d in meta])))
info = [d['info'] for d in meta if d['status'] == 'COMPLETED']
data = [get_exclusion_metadata(d) for d in info]
df = rename_filter_df(pd.DataFrame(data))
return df, info
| 41.197802
| 112
| 0.628434
| 586
| 3,749
| 3.692833
| 0.226962
| 0.081331
| 0.054067
| 0.025878
| 0.414048
| 0.331331
| 0.292514
| 0.235675
| 0.18207
| 0.102588
| 0
| 0.005483
| 0.221659
| 3,749
| 90
| 113
| 41.655556
| 0.736121
| 0.141104
| 0
| 0
| 0
| 0
| 0.259421
| 0.086889
| 0
| 0
| 0
| 0
| 0.016129
| 1
| 0.048387
| false
| 0
| 0.080645
| 0.016129
| 0.177419
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f1e0b869b9f01994358b74334809a1ece521ead
| 774
|
py
|
Python
|
345_ReverseVowelsOfAString.py
|
satwiksabharwal01/LeetcodeProblemSolutions
|
c08fb77b76519f9c543d74f84cb2c0477aeddcd9
|
[
"MIT"
] | 1
|
2020-06-03T22:00:54.000Z
|
2020-06-03T22:00:54.000Z
|
345_ReverseVowelsOfAString.py
|
AmiGandhi/leetcode
|
238186f1e4dd7f243caab47173ebc2511ae5902e
|
[
"MIT"
] | null | null | null |
345_ReverseVowelsOfAString.py
|
AmiGandhi/leetcode
|
238186f1e4dd7f243caab47173ebc2511ae5902e
|
[
"MIT"
] | null | null | null |
# Write a function that takes a string as input and reverse only the vowels of a string.
# Example 1:
# Input: "hello"
# Output: "holle"
# Example 2:
# Input: "leetcode"
# Output: "leotcede"
class Solution:
def reverseVowels(self, s: str) -> str:
vowels = set(list("aeiouAEIOU"))
s = list(s)
left, right = 0, len(s)-1
while left<right:
if s[left] in vowels and s[right] in vowels:
s[left], s[right] = s[right], s[left]
left, right = left + 1, right -1
if s[left] not in vowels:
left += 1
if s[right] not in vowels:
right -= 1
return ''.join(s)
if __name__ == "__main__":
s = "hello"
print(Solution().reverseVowels(s))
| 26.689655
| 88
| 0.536176
| 105
| 774
| 3.87619
| 0.428571
| 0.061425
| 0.034398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015564
| 0.335917
| 774
| 29
| 89
| 26.689655
| 0.776265
| 0.22739
| 0
| 0
| 0
| 0
| 0.038917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.176471
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f1f88fe67e806539b890092e9e0d182702100b7
| 574
|
py
|
Python
|
script/run_basic_slackbot.py
|
imperial-genomics-facility/IGFSlackBot
|
2692460e907381cea067b674a560cacef6fff981
|
[
"Apache-2.0"
] | null | null | null |
script/run_basic_slackbot.py
|
imperial-genomics-facility/IGFSlackBot
|
2692460e907381cea067b674a560cacef6fff981
|
[
"Apache-2.0"
] | null | null | null |
script/run_basic_slackbot.py
|
imperial-genomics-facility/IGFSlackBot
|
2692460e907381cea067b674a560cacef6fff981
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from slackbot.basic.igfbasicslackbot import IgfBasicSlackBot
parser=argparse.ArgumentParser()
parser.add_argument('-s','--slack_config', required=True, help='Slack configuration json file')
parser.add_argument('-p','--project_data', required=True, help='Project data CSV file')
args=parser.parse_args()
slack_config=args.slack_config
project_data=args.project_data
try:
igf_bot=IgfBasicSlackBot(slack_config_json=slack_config, \
project_data_file=project_data)
igf_bot.start_igfslackbot()
except Exception as e:
print(e)
| 31.888889
| 95
| 0.771777
| 75
| 574
| 5.666667
| 0.466667
| 0.155294
| 0.08
| 0.103529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120209
| 574
| 17
| 96
| 33.764706
| 0.841584
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f2103ff16477b77dbb801e6f1f09baa26d1ea3b
| 1,170
|
py
|
Python
|
bgbl/management/commands/fix_glyphs.py
|
okfde/api.offenegesetze.de
|
85bc0a1a65dfa77651b7319eb0fccde1a27ba193
|
[
"MIT"
] | 16
|
2018-12-10T11:59:44.000Z
|
2020-06-28T21:37:15.000Z
|
bgbl/management/commands/fix_glyphs.py
|
bundestag/api.offenegesetze.de
|
280673b9995a8a5c1fd01b1cb14dc0046599530f
|
[
"MIT"
] | 21
|
2020-02-11T23:17:52.000Z
|
2022-01-05T13:58:20.000Z
|
bgbl/management/commands/fix_glyphs.py
|
bundestag/api.offenegesetze.de
|
280673b9995a8a5c1fd01b1cb14dc0046599530f
|
[
"MIT"
] | 1
|
2018-12-11T20:17:09.000Z
|
2018-12-11T20:17:09.000Z
|
from glob import glob
import os
import shutil
from django.core.management.base import BaseCommand
from bgbl.pdf_utils import fix_glyphs, remove_watermark
class Command(BaseCommand):
help = 'Fix glyphs pdfs'
def add_arguments(self, parser):
parser.add_argument('doc_path', type=str)
def handle(self, *args, **options):
doc_path = options['doc_path']
if doc_path.endswith('.pdf'):
filenames = [doc_path]
else:
pattern = os.path.join(doc_path, '**/*.pdf')
filenames = glob(pattern, recursive=True)
for original_filename in filenames:
if original_filename.endswith(('_original.pdf', '_watermarked.pdf')):
continue
print('Fix glyphs', original_filename)
fixed_filename = fix_glyphs(original_filename)
real_filename = fixed_filename.replace('_fixed.pdf', '.pdf')
if os.path.exists(real_filename):
os.remove(real_filename)
shutil.move(fixed_filename, real_filename)
print('Adding meta data', real_filename)
remove_watermark(real_filename, force=True)
| 30.789474
| 81
| 0.638462
| 135
| 1,170
| 5.318519
| 0.42963
| 0.058496
| 0.038997
| 0.069638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.260684
| 1,170
| 37
| 82
| 31.621622
| 0.830058
| 0
| 0
| 0
| 0
| 0
| 0.095727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.185185
| 0
| 0.333333
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f228e4d5652a96220edc4fa67e8ff6e9ecc91ac
| 657
|
py
|
Python
|
catalog/bindings/csw/time_topology_complex_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/time_topology_complex_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/time_topology_complex_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List
from bindings.csw.abstract_time_complex_type import AbstractTimeComplexType
from bindings.csw.time_topology_primitive_property_type import (
TimeTopologyPrimitivePropertyType,
)
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class TimeTopologyComplexType(AbstractTimeComplexType):
"""
A temporal topology complex.
"""
primitive: List[TimeTopologyPrimitivePropertyType] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 1,
},
)
| 26.28
| 75
| 0.703196
| 61
| 657
| 7.360656
| 0.57377
| 0.053452
| 0.066815
| 0.10245
| 0.129176
| 0.129176
| 0
| 0
| 0
| 0
| 0
| 0.001908
| 0.202435
| 657
| 24
| 76
| 27.375
| 0.854962
| 0.042618
| 0
| 0
| 0
| 0
| 0.133768
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.235294
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f251d95dc8853c21e444177f77e27a265f912f3
| 1,534
|
py
|
Python
|
maro/cli/grass/lib/services/node_api_server/blueprints/containers.py
|
yangboz/maro
|
0973783e55ca07bf8e177910c9d47854117a4ea8
|
[
"MIT"
] | 598
|
2020-09-23T00:50:22.000Z
|
2022-03-31T08:12:54.000Z
|
maro/cli/grass/lib/services/node_api_server/blueprints/containers.py
|
gx9702/maro
|
38c796f0a7ed1e0f64c299d96c6e0df032401fa9
|
[
"MIT"
] | 235
|
2020-09-22T10:20:48.000Z
|
2022-03-31T02:10:03.000Z
|
maro/cli/grass/lib/services/node_api_server/blueprints/containers.py
|
gx9702/maro
|
38c796f0a7ed1e0f64c299d96c6e0df032401fa9
|
[
"MIT"
] | 116
|
2020-09-22T09:19:04.000Z
|
2022-02-12T05:04:07.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from flask import Blueprint, abort, request
from ...utils.docker_controller import DockerController
from ...utils.exception import CommandExecutionError
# Flask related.
blueprint = Blueprint(name="container", import_name=__name__)
URL_PREFIX = "/v1/containers"
# Api functions.
@blueprint.route(f"{URL_PREFIX}", methods=["POST"])
def create_container():
"""Create a container, aka 'docker run'.
Returns:
None.
"""
try:
create_config = request.json
return DockerController.create_container_with_config(create_config=create_config)
except CommandExecutionError:
abort(400)
@blueprint.route(f"{URL_PREFIX}/<container_name>", methods=["DELETE"])
def delete_container(container_name: str):
"""Delete a container, aka 'docker rm'.
Args:
container_name (str): Name of the container.
Returns:
None.
"""
try:
DockerController.remove_container(container_name=container_name)
return {}
except CommandExecutionError:
abort(400)
@blueprint.route(f"{URL_PREFIX}/<container_name>:stop", methods=["POST"])
def stop_container(container_name: str):
"""Stop a container, aka 'docker stop'.
Args:
container_name (str): Name of the container.
Returns:
None.
"""
try:
DockerController.stop_container(container_name=container_name)
return {}
except CommandExecutionError:
abort(400)
| 22.895522
| 89
| 0.683833
| 167
| 1,534
| 6.095808
| 0.329341
| 0.127701
| 0.086444
| 0.053045
| 0.413556
| 0.38998
| 0.38998
| 0.38998
| 0.38998
| 0.38998
| 0
| 0.00821
| 0.205997
| 1,534
| 66
| 90
| 23.242424
| 0.827586
| 0.249674
| 0
| 0.423077
| 0
| 0
| 0.103993
| 0.058496
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.153846
| 0
| 0.384615
| 0.192308
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f26c8fd4ac1dfad9af1cf8e92f70fe641af8f00
| 6,521
|
py
|
Python
|
src/licensedcode/saneyaml.py
|
chetanya-shrimali/scancode-toolkit
|
a1a22fb225cbeb211bd6f92272a46f1351f57d6b
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/licensedcode/saneyaml.py
|
chetanya-shrimali/scancode-toolkit
|
a1a22fb225cbeb211bd6f92272a46f1351f57d6b
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/licensedcode/saneyaml.py
|
chetanya-shrimali/scancode-toolkit
|
a1a22fb225cbeb211bd6f92272a46f1351f57d6b
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from collections import OrderedDict
from functools import partial
import yaml
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader
from yaml import SafeDumper
"""
Wrapper around PyYAML to provide sane defaults ensuring that dump/load does not
damage content, keeps ordering, use always block-style and use four spaces
indents to get readable YAML and quotes and folds texts in a sane way.
Use the `load` function to get a primitive type from a YAML string and the
`dump` function to get a YAML string from a primitive type.
Load and dump rely on subclasses of SafeLoader and SafeDumper respectively doing
all the dirty bidding to get PyYAML straight.
"""
# Check:
# https://github.com/ralienpp/reyaml/blob/master/reyaml/__init__.py
# https://pypi.python.org/pypi/PyYAML.Yandex/3.11.1
# https://pypi.python.org/pypi/ruamel.yaml/0.9.1
# https://pypi.python.org/pypi/yaml2rst/0.2
def load(s):
"""
Return an object safely loaded from YAML string `s`. `s` must be unicode
or be a string that converts to unicode without errors.
"""
return yaml.load(s, Loader=SaneLoader)
def dump(obj):
"""
Return a safe YAML unicode string representation from `obj`.
"""
return yaml.dump(
obj,
Dumper=SaneDumper,
default_flow_style=False,
default_style=None,
canonical=False,
allow_unicode=True,
# do not encode as Unicode
encoding=None,
indent=4,
width=90,
line_break='\n',
explicit_start=False,
explicit_end=False,
)
class SaneLoader(SafeLoader):
pass
def string_loader(loader, node):
"""
Ensure that a scalar type (a value) is returned as a plain unicode string.
"""
return loader.construct_scalar(node)
SaneLoader.add_constructor(u'tag:yaml.org,2002:str', string_loader)
# Load as strings most scalar types: nulls, ints, (such as in
# version 01) floats (such version 2.20) and timestamps conversion (in
# versions too) are all emitted as unicode strings. This avoid unwanted type
# conversions for unquoted strings and the resulting content damaging. This
# overrides the implicit resolvers. Callers must handle type conversion
# explicitly from unicode to other types in the loaded objects.
SaneLoader.add_constructor(u'tag:yaml.org,2002:null', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:timestamp', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:float', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:int', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:null', string_loader)
# keep boolean conversion
# SaneLoader.add_constructor(u'tag:yaml.org,2002:boolean', string_loader)
def ordered_loader(loader, node):
"""
Ensure that YAML maps ordered is preserved and loaded in an OrderedDict.
"""
assert isinstance(node, yaml.MappingNode)
omap = OrderedDict()
yield omap
for key, value in node.value:
key = loader.construct_object(key)
value = loader.construct_object(value)
omap[key] = value
SaneLoader.add_constructor(u'tag:yaml.org,2002:map', ordered_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:omap', ordered_loader)
class SaneDumper(SafeDumper):
"""
Ensure that lists items are always indented.
"""
def increase_indent(self, flow=False, indentless=False):
return super(SaneDumper, self).increase_indent(flow, indentless=False)
def ordered_dumper(dumper, data):
"""
Ensure that maps are always dumped in the items order.
"""
return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.items())
SaneDumper.add_representer(OrderedDict, ordered_dumper)
def null_dumper(dumper, value):
"""
Always dump nulls as empty string.
"""
return dumper.represent_scalar(u'tag:yaml.org,2002:null', u'')
SafeDumper.add_representer(type(None), null_dumper)
def string_dumper(dumper, value, _tag=u'tag:yaml.org,2002:str'):
"""
Ensure that all scalars are dumped as UTF-8 unicode, folded and quoted in
the sanest and most readable way.
"""
if not isinstance(value, basestring):
value = repr(value)
if isinstance(value, str):
value = value.decode('utf-8')
style = None
multilines = '\n' in value
if multilines:
literal_style = '|'
style = literal_style
return dumper.represent_scalar(_tag, value, style=style)
SaneDumper.add_representer(str, string_dumper)
SaneDumper.add_representer(unicode, string_dumper)
SaneDumper.add_representer(int, partial(string_dumper, _tag=u'tag:yaml.org,2002:int'))
SaneDumper.add_representer(float, partial(string_dumper, _tag=u'tag:yaml.org,2002:float'))
def boolean_dumper(dumper, value):
"""
Dump booleans as yes or no.
"""
value = u'yes' if value else u'no'
style = None
return dumper.represent_scalar(u'tag:yaml.org,2002:bool', value, style=style)
SaneDumper.add_representer(bool, boolean_dumper)
| 33.441026
| 90
| 0.73455
| 936
| 6,521
| 5.038462
| 0.317308
| 0.012723
| 0.025445
| 0.034987
| 0.23961
| 0.208651
| 0.15458
| 0.15458
| 0.129771
| 0.086726
| 0
| 0.016387
| 0.176507
| 6,521
| 194
| 91
| 33.613402
| 0.861825
| 0.408526
| 0
| 0.051948
| 0
| 0
| 0.101887
| 0.09717
| 0
| 0
| 0
| 0
| 0.012987
| 1
| 0.116883
| false
| 0.012987
| 0.12987
| 0.012987
| 0.376623
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f272919a0358c21a01d9a8008881e0d63626d7a
| 14,383
|
py
|
Python
|
tsutsuji/gui_tsutsuji.py
|
konawasabi/tsutsuji-trackcomputer
|
04469a8a9872e8bad3d661c5911b9c881fab8ca9
|
[
"Apache-2.0"
] | 1
|
2022-03-14T00:35:05.000Z
|
2022-03-14T00:35:05.000Z
|
tsutsuji/gui_tsutsuji.py
|
konawasabi/tsutsuji-trackcomputer
|
04469a8a9872e8bad3d661c5911b9c881fab8ca9
|
[
"Apache-2.0"
] | null | null | null |
tsutsuji/gui_tsutsuji.py
|
konawasabi/tsutsuji-trackcomputer
|
04469a8a9872e8bad3d661c5911b9c881fab8ca9
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021-2022 konawasabi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
'''
import sys
import pathlib
import os
import webbrowser
import tkinter as tk
from tkinter import ttk
import tkinter.filedialog as filedialog
import tkinter.simpledialog as simpledialog
import tkinter.font as font
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib import rcParams
import matplotlib.gridspec
from PIL import Image
import numpy as np
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Sans', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'VL PGothic', 'Noto Sans CJK JP']
from . import track_control
from . import drawcursor
from . import backimg
from . import measure
from ._version import __version__
class Catcher: # tkinter内で起きた例外をキャッチする
def __init__(self, func, subst, widget):
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except Exception as e:
if not __debug__: # デバッグモード(-O)なら素通し。pdbが起動する
raise e
else:
print(e) # 通常モードならダイアログ表示
tk.messagebox.showinfo(message=e)
class mainwindow(ttk.Frame):
def __init__(self, master):
super().__init__(master, padding='3 3 3 3')
self.master.title('Tsutsuji')
self.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
self.master.columnconfigure(0, weight=1)
self.master.rowconfigure(0, weight=1)
master.protocol('WM_DELETE_WINDOW', self.ask_quit)
self.backimgctrl = backimg.BackImgControl(self)
self.cursor = drawcursor.cursor(self)
self.measurewindow = measure.interface(self)
self.trackcontrol = track_control.TrackControl()
self.create_widgets()
self.create_menubar()
self.bind_keyevent()
def create_widgets(self):
font_title = font.Font(weight='bold',size=10)
# プロットフレーム
self.canvas_frame = ttk.Frame(self, padding='3 3 3 3')
self.canvas_frame.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
self.fig_plane = plt.figure(figsize=(9,7),tight_layout=True)
gs1 = self.fig_plane.add_gridspec(nrows=1,ncols=1)
self.ax_plane = self.fig_plane.add_subplot(gs1[0])
self.plt_canvas_base = tk.Canvas(self.canvas_frame, bg="white", width=900, height=700)
self.plt_canvas_base.grid(row = 0, column = 0)
def on_canvas_resize(event):
self.plt_canvas_base.itemconfigure(self.fig_frame_id, width=event.width, height=event.height)
#print(event)
self.fig_frame = tk.Frame(self.plt_canvas_base)
self.fig_frame_id = self.plt_canvas_base.create_window((0, 0), window=self.fig_frame, anchor="nw")
self.fig_frame.columnconfigure(0, weight=1)
self.fig_frame.rowconfigure(0, weight=1)
self.plt_canvas_base.bind("<Configure>", on_canvas_resize)
self.fig_canvas = FigureCanvasTkAgg(self.fig_plane, master=self.fig_frame)
self.fig_canvas.draw()
self.fig_canvas.get_tk_widget().grid(row=0, column=0, sticky='news')
self.canvas_frame.columnconfigure(0, weight=1)
#self.canvas_frame.columnconfigure(1, weight=1)
self.canvas_frame.rowconfigure(0, weight=1)
#self.canvas_frame.rowconfigure(1, weight=1)
#ボタンフレーム
self.button_frame = ttk.Frame(self, padding='3 3 3 3')
self.button_frame.grid(column=1, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
# ---
self.replot_btn = ttk.Button(self.button_frame, text="Replot", command = self.drawall)
self.replot_btn.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E))
self.plotarea_frame = ttk.Frame(self.button_frame, padding='3 3 3 3')
self.plotarea_frame.grid(column=0, row=1, sticky=(tk.N, tk.W, tk.E, tk.S))
self.plotarea_val_frame = ttk.Frame(self.plotarea_frame, padding='3 3 3 3')
self.plotarea_val_frame.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
self.viewpos_v = [tk.DoubleVar(value=0),tk.DoubleVar(value=0)]
self.viewp_scale_v = tk.DoubleVar(value=1000)
self.view_whole_v = tk.StringVar()
self.view_whole_v.set('False')
self.aspectratio_v = tk.DoubleVar(value=1)
self.viewp_x_l = ttk.Label(self.plotarea_val_frame, text='x')
self.viewp_y_l = ttk.Label(self.plotarea_val_frame, text='y')
self.viewp_sc_l = ttk.Label(self.plotarea_val_frame, text='scale')
self.viewp_asr_l = ttk.Label(self.plotarea_val_frame, text='Y mag.')
self.viewp_x_l.grid(column=0, row=0, sticky=(tk.E,tk.W))
self.viewp_y_l.grid(column=2, row=0, sticky=(tk.E,tk.W))
self.viewp_sc_l.grid(column=0, row=1, sticky=(tk.E,tk.W))
self.viewp_asr_l.grid(column=2, row=1, sticky=(tk.E,tk.W))
self.viewp_x_e = ttk.Entry(self.plotarea_val_frame, textvariable=self.viewpos_v[0],width=5)
self.viewp_y_e = ttk.Entry(self.plotarea_val_frame, textvariable=self.viewpos_v[1],width=5)
self.viewp_sc_e = ttk.Entry(self.plotarea_val_frame, textvariable=self.viewp_scale_v,width=5)
self.view_whole_e = ttk.Checkbutton(self.plotarea_val_frame, text='Whole', variable=self.view_whole_v, onvalue='True', offvalue='False')
self.viewp_asr_e = ttk.Entry(self.plotarea_val_frame, textvariable=self.aspectratio_v,width=5)
self.viewp_x_e.grid(column=1, row=0, sticky=(tk.E,tk.W))
self.viewp_y_e.grid(column=3, row=0, sticky=(tk.E,tk.W))
self.viewp_sc_e.grid(column=1, row=1, sticky=(tk.E,tk.W))
self.viewp_asr_e.grid(column=3, row=1, sticky=(tk.E,tk.W))
self.view_whole_e.grid(column=0, row=3, sticky=(tk.E,tk.W))
# ---
self.plotmove_frame = ttk.Frame(self.plotarea_frame, padding='3 3 3 3')
self.plotmove_frame.grid(column=0, row=1, sticky=(tk.N, tk.W, tk.E, tk.S))
self.plotmove_btn_up = ttk.Button(self.plotmove_frame, text="↑", command = lambda: self.move_xy(0,-1))
self.plotmove_btn_down = ttk.Button(self.plotmove_frame, text="↓", command = lambda: self.move_xy(0,1))
self.plotmove_btn_left = ttk.Button(self.plotmove_frame, text="←", command = lambda: self.move_xy(-1,0))
self.plotmove_btn_right = ttk.Button(self.plotmove_frame, text="→", command = lambda: self.move_xy(1,0))
self.plotmove_btn_up.grid(column=1, row=0, sticky=(tk.E,tk.W))
self.plotmove_btn_down.grid(column=1, row=2, sticky=(tk.E,tk.W))
self.plotmove_btn_left.grid(column=0, row=1, sticky=(tk.E,tk.W))
self.plotmove_btn_right.grid(column=2, row=1, sticky=(tk.E,tk.W))
# ---
self.measure_btn = ttk.Button(self.button_frame, text="Measure", command = self.measure)
self.measure_btn.grid(column=0, row=2, sticky=(tk.N, tk.W, tk.E))
self.getrelrad_btn = ttk.Button(self.button_frame, text="Generate", command = self.get_relativepos_rad)
self.getrelrad_btn.grid(column=0, row=3, sticky=(tk.N, tk.W, tk.E))
if not __debug__:
self.printtracks_btn = ttk.Button(self.button_frame, text="P. Tracks", command = self.trackcontrol.dump_trackdata)
self.printtracks_btn.grid(column=0, row=4, sticky=(tk.N, tk.W, tk.E))
self.printpos_btn = ttk.Button(self.button_frame, text="P. Pos", command = self.draw_tracks_cp)
self.printpos_btn.grid(column=0, row=5, sticky=(tk.N, tk.W, tk.E))
# ウィンドウリサイズに対する設定
self.columnconfigure(0, weight=1)
#self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=1)
def create_menubar(self):
self.master.option_add('*tearOff', False)
self.menubar = tk.Menu(self.master)
self.menu_file = tk.Menu(self.menubar)
self.menu_backimg = tk.Menu(self.menubar)
self.menu_help = tk.Menu(self.menubar)
self.menubar.add_cascade(menu=self.menu_file, label='ファイル')
self.menubar.add_cascade(menu=self.menu_backimg, label='背景画像')
self.menubar.add_cascade(menu=self.menu_help, label='ヘルプ')
self.menu_file.add_command(label='開く...', command=self.opencfg, accelerator='Control+O')
self.menu_file.add_command(label='リロード', command=self.reloadcfg, accelerator='F5')
self.menu_file.add_separator()
self.menu_file.add_command(label='終了', command=self.ask_quit, accelerator='Alt+F4')
self.menu_backimg.add_command(label='Window...', command=self.backimgctrl.create_window)
self.menu_backimg.add_separator()
self.menu_backimg.add_command(label='Load...', command=self.backimgctrl.load_setting)
self.menu_backimg.add_command(label='Save...', command=self.backimgctrl.save_setting)
self.menu_help.add_command(label='ヘルプ...', command=self.open_webdocument)
self.menu_help.add_command(label='Tsutsujiについて...', command=self.aboutwindow)
self.master['menu'] = self.menubar
def bind_keyevent(self):
self.bind_all("<Control-o>", self.opencfg)
self.bind_all("<F5>", self.reloadcfg)
self.bind_all("<Alt-F4>", self.ask_quit)
def ask_quit(self, event=None, ask=True):
if ask:
if tk.messagebox.askyesno(message='Tsutsuji を終了しますか?'):
self.quit()
else:
self.quit()
def opencfg(self, event=None, in_dir=None):
inputdir = filedialog.askopenfilename() if in_dir == None else in_dir
print('loading',inputdir)
self.trackcontrol.loadcfg(inputdir)
self.trackcontrol.loadmap()
if self.trackcontrol.conf.general['backimg'] is not None:
self.backimgctrl.load_setting(path = self.trackcontrol.conf.general['backimg'])
elif self.backimgctrl.conf_path is not None:
self.backimgctrl.load_setting(path = self.backimgctrl.conf_path)
self.measurewindow.reload_trackkeys()
self.drawall()
def reloadcfg(self, event=None):
if self.trackcontrol.path is not None:
self.opencfg(event=event,in_dir=self.trackcontrol.path)
def draw2dplot(self):
self.ax_plane.cla()
self.trackcontrol.plot2d(self.ax_plane)
self.fig_canvas.draw()
def drawall(self):
self.ax_plane.cla()
self.trackcontrol.plot2d(self.ax_plane)
self.measurewindow.drawall()
if self.view_whole_v.get() == 'True':
imgarea = self.backimgctrl.imgsarea()
imgarea = self.trackcontrol.drawarea(imgarea)
self.ax_plane.set_xlim(imgarea[0],imgarea[1])
self.ax_plane.set_ylim(imgarea[2],imgarea[3])
else:
center = [self.viewpos_v[0].get(),self.viewpos_v[1].get()]
#windowratio = self.ax_plane.bbox.height/self.ax_plane.bbox.width # 平面図のアスペクト比を取得
windowratio = 1/self.aspectratio_v.get()*7/9
scalex = self.viewp_scale_v.get()
scaley = windowratio * scalex
self.ax_plane.set_xlim(center[0]-scalex/2, center[0]+scalex/2)
self.ax_plane.set_ylim(center[1]-scaley/2, center[1]+scaley/2)
for i in self.backimgctrl.imgs.keys():
self.backimgctrl.imgs[i].show(self.ax_plane,as_ratio=7/9,ymag=self.aspectratio_v.get())
self.ax_plane.invert_yaxis()
self.fig_canvas.draw()
def move_xy(self,x,y):
nowpos = [self.viewpos_v[0].get(),self.viewpos_v[1].get()]
windowratio = 1/self.aspectratio_v.get()*7/9
scalex = self.viewp_scale_v.get()
scaley = windowratio * scalex
self.viewpos_v[0].set(nowpos[0] + x*scalex/5)
self.viewpos_v[1].set(nowpos[1] + y*scaley/5)
self.drawall()
def measure(self):
self.measurewindow.create_widgets()
def draw_tracks_cp(self):
self.trackcontrol.plot_controlpoints(self.ax_plane)
self.fig_canvas.draw()
def get_relativepos_rad(self):
self.trackcontrol.generate_mapdata()
def aboutwindow(self, event=None):
msg = 'Tsutsuji trackcomputer\n'
msg += 'Version '+__version__+'\n\n'
msg += 'Copyright © 2022 konawasabi\n'
msg += 'Released under the Apache License, Version 2.0 .\n'
msg += 'https://www.apache.org/licenses/LICENSE-2.0'
tk.messagebox.showinfo(message=msg)
def open_webdocument(self, event=None):
webbrowser.open('https://konawasabi.github.io/tsutsuji-trackcomputer/')
def sendtopmost(self,event=None):
self.master.lift()
self.master.focus_force()
def main():
if not __debug__:
# エラーが発生した場合、デバッガを起動 https://gist.github.com/podhmo/5964702e7471ccaba969105468291efa
def info(type, value, tb):
if hasattr(sys, "ps1") or not sys.stderr.isatty():
# You are in interactive mode or don't have a tty-like
# device, so call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback, pdb
# You are NOT in interactive mode; print the exception...
traceback.print_exception(type, value, tb)
# ...then start the debugger in post-mortem mode
pdb.pm()
sys.excepthook = info
print('Debug mode')
tk.CallWrapper = Catcher
root = tk.Tk()
app = mainwindow(master=root)
if len(sys.argv)>1:
app.opencfg(in_dir=sys.argv[1])
app.mainloop()
| 43.453172
| 144
| 0.643398
| 2,019
| 14,383
| 4.425458
| 0.193165
| 0.026861
| 0.010632
| 0.021936
| 0.378511
| 0.326133
| 0.260884
| 0.214326
| 0.178064
| 0.150196
| 0
| 0.019666
| 0.225753
| 14,383
| 330
| 145
| 43.584848
| 0.782238
| 0.081485
| 0
| 0.105042
| 0
| 0
| 0.050361
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.088235
| 0
| 0.189076
| 0.033613
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f2ccc8e4139330b0b1a1e4de76035b03e5fa0d0
| 1,011
|
py
|
Python
|
extra/uniq.py
|
JarryShaw/darc
|
0fc8782bb2f641ca3734c94666cbc36e3d9cb09f
|
[
"BSD-3-Clause"
] | 24
|
2020-07-08T06:16:52.000Z
|
2022-02-19T00:33:34.000Z
|
extra/uniq.py
|
JarryShaw/darc
|
0fc8782bb2f641ca3734c94666cbc36e3d9cb09f
|
[
"BSD-3-Clause"
] | 42
|
2020-05-29T12:56:10.000Z
|
2022-03-07T17:12:08.000Z
|
extra/uniq.py
|
JarryShaw/darc
|
0fc8782bb2f641ca3734c94666cbc36e3d9cb09f
|
[
"BSD-3-Clause"
] | 7
|
2020-07-11T18:57:24.000Z
|
2022-02-01T21:46:30.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import tempfile
def is_in(line: str, dest: str) -> bool:
if os.path.isfile(dest):
with open(dest) as file:
for content in filter(None, map(lambda s: s.strip(), file)):
if line == content:
return True
return False
def uniq(path: str, tempdir: str) -> None:
name = os.path.split(path)[1]
dest = os.path.join(tempdir, '%s.tmp' % name)
with open(path) as file:
for line in filter(None, map(lambda s: s.strip(), file)):
if line.startswith('#'):
continue
if is_in(line, dest):
continue
with open(dest, 'at') as out_file:
print(line, file=out_file)
os.rename(dest, path)
def main() -> int:
with tempfile.TemporaryDirectory() as tempdir:
for path in sys.argv[1:]:
uniq(path, tempdir)
return 0
if __name__ == "__main__":
sys.exit(main())
| 24.071429
| 72
| 0.547972
| 137
| 1,011
| 3.956204
| 0.40146
| 0.03321
| 0.02952
| 0.055351
| 0.140221
| 0.140221
| 0.140221
| 0.140221
| 0.140221
| 0.140221
| 0
| 0.007246
| 0.317507
| 1,011
| 41
| 73
| 24.658537
| 0.778261
| 0.042532
| 0
| 0.068966
| 0
| 0
| 0.017598
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.310345
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f2d5d68906150aa022de6e4c0b468cf3688673c
| 353
|
py
|
Python
|
tests.py
|
B1Z0N/turingmachine
|
4c6761ee52fd05071d675a8cab8558025a5c26d9
|
[
"MIT"
] | null | null | null |
tests.py
|
B1Z0N/turingmachine
|
4c6761ee52fd05071d675a8cab8558025a5c26d9
|
[
"MIT"
] | 3
|
2020-03-24T16:53:31.000Z
|
2021-02-02T21:58:25.000Z
|
tests.py
|
B1Z0N/turingmachine
|
4c6761ee52fd05071d675a8cab8558025a5c26d9
|
[
"MIT"
] | null | null | null |
"""
Script that runs all tests written
"""
import os
import pathlib
import pytest
cwd = pathlib.Path.cwd
os.chdir(cwd() / "tests")
def subfolders(dir):
return [x[0] for x in os.walk(dir)][1:] # without current directory
for subf in subfolders(cwd()):
if not subf.endswith("__pycache__"):
os.chdir(subf)
pytest.main()
| 13.576923
| 72
| 0.645892
| 51
| 353
| 4.392157
| 0.627451
| 0.0625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007246
| 0.21813
| 353
| 25
| 73
| 14.12
| 0.804348
| 0.172805
| 0
| 0
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0.090909
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f2da5398cfdb995da864f3b7f84a89bc1c2fda5
| 7,933
|
py
|
Python
|
sandbox/straws/loadstraws.py
|
mustaric/lambda-tess-search
|
1d48133f32c8a073cba5d221f30c2d44e8d06e4b
|
[
"BSD-3-Clause"
] | 2
|
2019-06-26T14:35:22.000Z
|
2020-05-12T15:26:27.000Z
|
sandbox/straws/loadstraws.py
|
mustaric/lambda-tess-search
|
1d48133f32c8a073cba5d221f30c2d44e8d06e4b
|
[
"BSD-3-Clause"
] | 7
|
2019-06-26T20:52:14.000Z
|
2020-12-16T21:08:20.000Z
|
sandbox/straws/loadstraws.py
|
mustaric/lambda-tess-search
|
1d48133f32c8a073cba5d221f30c2d44e8d06e4b
|
[
"BSD-3-Clause"
] | 2
|
2019-06-26T20:24:11.000Z
|
2020-05-12T19:36:04.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017-2018 Orbital Insight Inc., all rights reserved.
# Contains confidential and trade secret information.
# Government Users: Commercial Computer Software - Use governed by
# terms of Orbital Insight commercial license agreement.
"""
Created on Tue Oct 22 21:22:36 2019
@author: fergal
"""
from __future__ import print_function
from __future__ import division
import boto3
import numpy as np
import json
import os
import io
import common
class LoadTessCube(object):
"""
Load a datacube of TESS imagery from straws stored on disk.
"""
def __init__(self, path, sector):
#Set path to None for some testing
if path is not None:
self.path = path
self.sector = sector
self.loadMetadata()
def __repr__(self):
return "<TessCube object for sector %s. Data at %s>" %(self.sector, self.path)
def __call__(self, camera, ccd, col, row):
return self.get(camera, ccd, col, row, 20)
def loadMetadata(self):
"""Load metadata on the straws stored in `path`
Metadata is stored in a json file and contains details like ccd sizes,
number of cadences, strawsize, etc.
"""
sectorStr = "sector%02i" %(self.sector)
fn = os.path.join(self.path, sectorStr, common.METADATA_FILE)
with open(fn) as fp:
props = json.load(fp)
assert self.sector == props['sector']
self.setMetadataFromDict(props)
def setMetadataFromDict(self, props):
self.__dict__.update(props)
self.nCols, self.nRows = self.nColsRows
self.nCadences = len(self.datestampList)
def getMidTimestamps(self):
"""Return the cadence mid times as stored in the metadata
See make straws for the details of how this value is calculated
"""
try:
timestamps = self.midtimes_tbjd
except AttributeError:
raise AttributeError("metadata doesn't contain timestamps")
return np.array(timestamps)
def getRelativeCadenceNumbers(self):
"""Return a integers from zero to length of datacube"""
return np.arange(self.nCadences, dtype=int)
def get(self, camera, ccd, col, row, min_size_pix=None):
"""Get a data cube
The data cube is garaunteed to be square and at least `min_size_pix`
on a side. However, because it constructs that datacube whose bounding
box aligns with the straws its reading data from, the actual size
may be larger than `min_size_pix`, and the requested (`col`, `row`)
may not be at the centre of the image.
Inputs
-------------
camera, ccd, col, row
(int) Properties of the straw. col and row refer to coordinates of
the bottom-left corner of the straw.
Optional Inputs
-----------------
min_size_pix
(int) Minimum width and height of the returned datacube
Returns
-----------
cube
(np 3d array) of shape (nCadence, nRows, nCols)
target_col, target_row
(float) The index in `image` corresponding to (`col`, `row`).
For example, if the request is for a 30x30 pixel stamp around
the postion cr= 301, 602, the resulting target_col, _row might be
(1,2)
"""
if min_size_pix is None:
min_size_pix = self.strawSize
c0, c1, r0, r1 = self.pickBbox(col, row, min_size_pix)
colSize = c1 - c0
rowSize = r1 - r0
image = np.empty( (self.nCadences, rowSize, colSize) )
ds = self.strawSize
for i in range(c0, c1, ds):
for j in range(r0, r1, ds):
straw = self.getStraw(camera, ccd, i, j)
assert straw.shape == (self.nCadences, ds, ds)
dCol = i - c0
dRow = j - r0
sc = slice(dCol, dCol + ds)
sr = slice(dRow, dRow + ds)
image[:, sr, sc] = straw
target_col = col - c0
target_row = row - r0
return image, target_col, target_row
def pickBbox(self, col, row, size_pix):
"""Pick the bounding box around (col, row) for the returned data cube
The bounding box will be
* square
* The width will be > `size_pix`
* The width will be an integer times the `strawSize`
Inputs
-------
col, row
(float) Location of centre of region of interest
size_pix
(int) Minimum size of returned bounding box. The bounding box
will probably be bigger than this request.
Returns
----------
4-tuple of col and row values defining the bounding box.
"""
if not self.isInBounds(col, row):
raise ValueError("Requested col,row (%g, %g) is out of bounds" %(col, row))
assert(size_pix > 0)
ds = .5 * size_pix
c0 = common.roundToNearestBelow(max(col-ds, 0), self.strawSize)
c1 = common.roundToNearestAbove(min(col+ds, self.nCols), self.strawSize)
r0 = common.roundToNearestBelow(max(row-ds, 0), self.strawSize)
r1 = common.roundToNearestAbove(min(row+ds, self.nRows), self.strawSize)
return c0, c1, r0, r1
def isInBounds(self, col, row):
"""Test if the requested col,row actually fall on disk
Inputs
-------------
col, row
(int)
Returns
----------
boolean
"""
if col < 0 or col >= self.nCols:
return False
if row < 0 or row >= self.nRows:
return False
return True
def getStraw(self, camera, ccd, col, row):
""" Load a straw from disk
Inputs
-------------
camera, ccd, col, row
(int) Properties of the straw. col and row refer to coordinates of
the bottom-left corner of the straw.
"""
longPath, fn = common.makeStrawName(self.path,
self.sector,
camera,
ccd,
col,
row)
straw = self.loadStrawFromUri(longPath, fn)
return straw
def loadStrawFromUri(self, strawPath, fn):
if not os.path.exists(strawPath):
raise IOError("Path %s not found" %(strawPath))
fn = os.path.join(strawPath, fn)
if not os.path.exists(fn):
raise IOError("File %s not found" %(fn))
return np.load(fn)
class LoadTessCubeS3(LoadTessCube):
"""Load straws from S3 instead of a local disk"""
def __init__(self, bucket, path, sector, region='us-east-1'):
#bucket is a string. self.bucket is an object
self.bucketName = bucket
self.s3 = boto3.resource('s3', region_name=region)
self.path = path
self.sector = sector
self.loadMetadata()
def loadStrawFromUri(self, strawPath, fn):
#boto stuff goes here
uri = os.path.join(strawPath, fn)
obj = self.s3.Object(self.bucketName, uri)
thebytes = obj.get()['Body'].read()
return np.load(io.BytesIO(thebytes))
def loadMetadata(self):
"""Load metadata on the straws stored in `path`
Metadata is stored in a json file and contains details like ccd sizes,
number of cadences, strawsize, etc.
"""
uri = os.path.join(self.path, "sector%02i" % self.sector, common.METADATA_FILE)
print(uri)
obj = self.s3.Object(self.bucketName, uri)
print(obj)
thebytes = obj.get()['Body'].read()
props = json.loads(thebytes)
assert self.sector == props['sector']
self.setMetadataFromDict(props)
| 30.511538
| 87
| 0.573427
| 981
| 7,933
| 4.575943
| 0.294597
| 0.026732
| 0.018712
| 0.023391
| 0.234796
| 0.17955
| 0.17955
| 0.152818
| 0.128314
| 0.107374
| 0
| 0.015023
| 0.328753
| 7,933
| 259
| 88
| 30.629344
| 0.827981
| 0.33556
| 0
| 0.181818
| 0
| 0
| 0.043914
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 1
| 0.136364
| false
| 0
| 0.072727
| 0.018182
| 0.336364
| 0.027273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f30e7564e6c5decd42ff9ef937b6271af7e25ce
| 8,797
|
py
|
Python
|
MISC/opt_omega_ip.py
|
PHOTOX/photoxrepo
|
83ad3813e9c52926e6387afc76813e99d430a5f3
|
[
"MIT"
] | 4
|
2015-03-27T09:12:44.000Z
|
2022-01-18T08:45:29.000Z
|
MISC/opt_omega_ip.py
|
PHOTOX/photoxrepo
|
83ad3813e9c52926e6387afc76813e99d430a5f3
|
[
"MIT"
] | 5
|
2015-01-06T22:08:58.000Z
|
2021-04-12T07:56:34.000Z
|
MISC/opt_omega_ip.py
|
PHOTOX/photoxrepo
|
83ad3813e9c52926e6387afc76813e99d430a5f3
|
[
"MIT"
] | 2
|
2019-09-02T11:43:32.000Z
|
2022-01-18T08:45:30.000Z
|
#!/usr/bin/env python
import os
import sys
sys.path.append(os.getcwd())
import abinitio_driver as driver
from abinitio_driver import AUtoEV
import scipy.optimize as opt
from scipy.interpolate import interp1d
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except:
pass
# This is the driver script for omega tuning of long-range functionals such as BNL or wPBE
# The interface to ab initio programs is in separate file abinitio_driver.py
# and currently supports QCHEM and TeraChem
# Initial input files for ground and ionized state should be in files:
# optomega_gs.inp and optomega_is.inp
# OR
# optomega_scf.inp and optomega_na.inp in case you choose the "QCHEM_IEDC" PROGRAM option"
# This file can be directly submitted to the queue: qsub -V -cwd opt_omega_ip.py aq/nq
#For further details see our wiki pages...
####### USER INPUT PARAMETERS ############################
#PROGRAM = "QCHEM"
PROGRAM = "QCHEM_PCM"
#PROGRAM = "QCHEM_IEDC"
#PROGRAM = "QCHEM_IEDC_PCM"
#PROGRAM = "TERACHEM"
METHOD = 1
# 0 - minimization
# 1 - interpolation
# 2 - read omega-deltaIP function from file omegas.dat and interpolate
# Options for interpolation
MIN_OMEGA = 200
BEST_GUESS = 300
MAX_OMEGA = 400
STEP = 20
# for interpolation, one needs at least 2 starting points
# i.e. (MAX_OMEGA-MIN_OMEGA)/STEP >=2
# of course, this inequality should hold as well: MIN_OMEGA < BEST_GUESS < MAX_OMEGA
# OPTIONS for minimizer
# accuracy and maximum iterations for the minimizer
THR_OMEGA = 10.000 # absolute accuracy, omega*1000
MAXITER = 20
# These are bounds for the minimizer, can be tighter if you know where to look
MIN_OMEGA_DEF = 10
MAX_OMEGA_DEF = 250
####### END OF USER INPUT #########################################
# Whether to check SCF convergence (implemented only for TC at the moment)
driver.CHECK_SCF = True
if BEST_GUESS <= MIN_OMEGA or BEST_GUESS >= MAX_OMEGA:
print("ERROR:Incorrect input value for BEST_GUESS")
sys.exit(1)
if METHOD == 1 and (MAX_OMEGA-MIN_OMEGA)/STEP < 1:
print("ERROR: Wrong initial interpolation interval. I need at least 2 initial points")
print("Adjust MIN_OMEGA or MAX_OMEGA or STEP")
sys.exit(1)
def minimize(min_omega, max_omega, thr_omega):
"""Minimization of a general univariate function"""
# http://docs.scipy.org/doc/scipy/reference/optimize.html
try:
res = opt.minimize_scalar(f_optomega_ip,method="bounded",bounds=(MIN_OMEGA_DEF, MAX_OMEGA_DEF), \
options={"xatol":thr_omega,"maxiter": MAXITER,"disp": True})
except NameError:
print("Whoops, you probably have old version of SciPy that does not have minimize_scalar!")
print("Use interpolation instead and comment out this code!")
raise
print(res)
if "success" in res:
suc = res.success # older scipy versions do not have this attribute
else:
suc = True
if suc == True:
return res.x
else:
print("Minimization probably did not converge! Check results carefully.")
sys.exit(2)
def f_optomega_ip(omega):
if PROGRAM == "TERACHEM":
dr = driver.Abinitio_driver_terachem()
elif PROGRAM == "QCHEM":
dr = driver.Abinitio_driver_qchem()
elif PROGRAM == "QCHEM_PCM":
dr = driver.Abinitio_driver_qchem_pcm()
elif PROGRAM == "QCHEM_IEDC":
dr = driver.Abinitio_driver_qchem_IEDC_gas()
elif PROGRAM == "QCHEM_IEDC_PCM":
dr = driver.Abinitio_driver_qchem_IEDC_pcm()
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
f = (IP_dscf - IP_koop)**2
return f
def interpolate(min_omega, max_omega, step, best_guess):
"""Interpolate for fixed omega range using cubic spline
Then find the root."""
omega = min_omega
if PROGRAM == "TERACHEM":
dr = driver.Abinitio_driver_terachem()
elif PROGRAM == "QCHEM":
dr = driver.Abinitio_driver_qchem()
elif PROGRAM == "QCHEM_PCM":
dr = driver.Abinitio_driver_qchem_pcm()
elif PROGRAM == "QCHEM_IEDC":
dr = driver.Abinitio_driver_qchem_IEDC_gas()
elif PROGRAM == "QCHEM_IEDC_PCM":
dr = driver.Abinitio_driver_qchem_IEDC_pcm()
deltaIP = []
omegas = []
# Initial points for interpolation, determined by the user via MAX_OMEGA, MIN_OMEGA and STEP
while omega <= max_omega:
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
deltaIP.append(IP_dscf-IP_koop)
omegas.append(omega)
omega += step
# Check whether deltaIP crosses zero
# If not, extend the interpolation interval
# This assumes a monotonic dependence of deltaIP on omega
while deltaIP[0] * deltaIP[-1] > 0:
if (deltaIP[-1] < deltaIP[-2] and deltaIP[-1] > 0) \
or (deltaIP[-1] > deltaIP[-2] and deltaIP[-1] < 0):
best_guess = omegas[-1] + step / 2.0
omega = omegas[-1] + step
omegas.append(omega)
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
deltaIP.append(IP_dscf-IP_koop)
else:
best_guess = omegas[0] - step / 2.0
omega = omegas[0] - step
omegas.insert(0,omega)
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
deltaIP.insert(0,IP_dscf-IP_koop)
# Interpolate the computed points
if len(omegas) >=4:
f_omega = interp1d(omegas, deltaIP, kind='cubic')
elif len(omegas) == 3:
f_omega = interp1d(omegas, deltaIP, kind='quadratic')
elif len(omegas) == 2:
f_omega = interp1d(omegas, deltaIP, kind='linear')
else:
print("ERROR: I need at least 2 points for interpolation, and I only got "+str(len(omegas)))
sys.exit(1)
# Plot the interpolated function for later inspection
try:
x = [ x + omegas[0] for x in range((omegas[-1]-omegas[0]))]
plt.plot(omegas, deltaIP, 'o', x, f_omega(x), "-")
plt.savefig("omega-deltaIP.png")
except:
pass
# Find the root of interpolated function deltaIP(omega)
# Brent method should be superior to newton
# It is also guaranteed not to step out of a given interval,
# which is crucial here, since f_omega function throws an exception in that case
res = opt.brentq(f_omega, omegas[0], omegas[-1])
return res
def interpolate_read(min_omega, max_omega, step, best_guess):
"""Interpolate for fixed omega range using cubic spline
Then find the root. Read omegas from s file"""
deltaIP = []
omegas = []
with open("omegas.dat","r") as f:
comm_first = True
for line in f:
l = line.split()
if not len(l):
continue
if l[0][0] == '#':
if comm_first:
comm_first = False
continue
else:
break
else:
omegas.append(float(l[0]))
deltaIP.append(float(l[1]))
# Check whether deltaIP crosses zero. If not, exit
# This assumes a monotonic dependence of deltaIP on omega
if deltaIP[0] * deltaIP[-1] > 0:
print("ERROR:could not find optimal omega for a computed range.")
sys.exit(1)
# Interpolate the computed points
if len(omegas) >=4:
f_omega = interp1d(omegas, deltaIP, kind='cubic')
elif len(omegas) == 3:
f_omega = interp1d(omegas, deltaIP, kind='quadratic')
elif len(omegas) == 2:
f_omega = interp1d(omegas, deltaIP, kind='linear')
else:
print("ERROR: I need at least 2 points for interpolation, and I only got "+str(len(omegas)))
sys.exit(1)
# Plot the interpolated function for later inspection
try:
x = [ x + omegas[0] for x in range((omegas[-1]-omegas[0]))]
plt.plot(omegas, deltaIP, 'o', x, f_omega(x), "-")
plt.savefig("omega-deltaIP.png")
except:
pass
# Find the root of interpolated function deltaIP(omega)
res = opt.brentq(f_omega, omegas[0], omegas[-1])
return res
#### Actual calculation starts here!
if METHOD == 0:
omega = minimize(MIN_OMEGA, MAX_OMEGA, THR_OMEGA)
elif METHOD == 1:
omega = interpolate(MIN_OMEGA, MAX_OMEGA, STEP, BEST_GUESS)
elif METHOD == 2:
omega = interpolate_read(MIN_OMEGA, MAX_OMEGA, STEP, BEST_GUESS)
print("Final tuned omega = ",omega)
if METHOD == 2:
sys.exit(0)
# This can be skipped if you want to save time
print("Recomputing with final omega...")
if PROGRAM == "TERACHEM":
dr = driver.Abinitio_driver_terachem()
if PROGRAM == "QCHEM":
dr = driver.Abinitio_driver_qchem()
if PROGRAM == "QCHEM_PCM":
dr = driver.Abinitio_driver_qchem_pcm()
if PROGRAM == "QCHEM_IEDC":
dr = driver.Abinitio_driver_qchem_IEDC_gas()
if PROGRAM == "QCHEM_IEDC_PCM":
dr = driver.Abinitio_driver_qchem_IEDC_pcm()
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
err = IP_dscf - IP_koop
print("Final IP_dscf:",IP_dscf*AUtoEV)
print("Final IP_exc_na:",IP_koop*AUtoEV)
print("Final deltaIP:",err*AUtoEV)
| 32.581481
| 103
| 0.665909
| 1,265
| 8,797
| 4.49249
| 0.23083
| 0.044343
| 0.042231
| 0.058068
| 0.468063
| 0.446771
| 0.446771
| 0.416329
| 0.397501
| 0.35351
| 0
| 0.017649
| 0.220643
| 8,797
| 269
| 104
| 32.702602
| 0.81126
| 0.276685
| 0
| 0.494318
| 0
| 0
| 0.146267
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0.017045
| 0.045455
| 0
| 0.090909
| 0.085227
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f36e08746ee116943eb44bc9ccc08813b7b6dbe
| 415
|
py
|
Python
|
test/test_pbp.py
|
Galtozzy/basketball_reference_scraper
|
fb0081f2ae146f3a7da3a17d4e30af0c0dc1124a
|
[
"MIT"
] | 191
|
2020-01-14T19:32:54.000Z
|
2022-03-29T17:57:19.000Z
|
test/test_pbp.py
|
Galtozzy/basketball_reference_scraper
|
fb0081f2ae146f3a7da3a17d4e30af0c0dc1124a
|
[
"MIT"
] | 59
|
2020-01-14T18:55:09.000Z
|
2022-03-03T21:10:03.000Z
|
test/test_pbp.py
|
Galtozzy/basketball_reference_scraper
|
fb0081f2ae146f3a7da3a17d4e30af0c0dc1124a
|
[
"MIT"
] | 76
|
2020-01-08T19:50:31.000Z
|
2022-03-31T18:52:06.000Z
|
import unittest
from basketball_reference_scraper.pbp import get_pbp
class TestPbp(unittest.TestCase):
def test_pbp(self):
df = get_pbp('2020-01-06', 'DEN', 'ATL')
expected_columns = ['QUARTER', 'TIME_REMAINING', 'DENVER_ACTION', 'ATLANTA_ACTION', 'DENVER_SCORE', 'ATLANTA_SCORE']
self.assertListEqual(list(df.columns), expected_columns)
if __name__ == '__main__':
unittest.main()
| 34.583333
| 124
| 0.710843
| 51
| 415
| 5.392157
| 0.666667
| 0.043636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022792
| 0.154217
| 415
| 11
| 125
| 37.727273
| 0.760684
| 0
| 0
| 0
| 0
| 0
| 0.233735
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f376809bd6d755cb0caead50017abc148fc244a
| 978
|
py
|
Python
|
bin/grep.py
|
Blindfold/pk-mod
|
24f958b0d501a3b5d9393dcad1e69987c2448968
|
[
"Apache-2.0"
] | 1
|
2019-04-03T20:02:40.000Z
|
2019-04-03T20:02:40.000Z
|
bin/grep.py
|
Blindfold-Games/pk-mod
|
24f958b0d501a3b5d9393dcad1e69987c2448968
|
[
"Apache-2.0"
] | 3
|
2015-01-03T23:56:51.000Z
|
2015-01-15T09:16:46.000Z
|
bin/grep.py
|
Blindfold-Games/pk-mod
|
24f958b0d501a3b5d9393dcad1e69987c2448968
|
[
"Apache-2.0"
] | null | null | null |
import re
import os
from sys import argv
def grep(match):
def _do_grep_wrapper(match):
def _do_grep(lines):
if match(lines):
yield lines
return _do_grep
return _do_grep_wrapper(match)
def find(what, where, depth=True):
"""
:param what: str String to search for
:param where: str directory to start search in
:param regexp: bool If true then 'what' is a regexp, otherwise - use simple substring search
:return:
"""
r = re.compile(what, re.M)
res = []
for root, sub_dirs, files in os.walk(where, True):
if (not depth) and (root != where):
continue
for file_name in files:
f = open(os.path.join(root, file_name), 'r')
data = f.read()
if r.search(data):
res.append(os.path.join(root, file_name))
return res
if __name__ == '__main__':
if len(argv) > 2:
print(list(find(argv[1], argv[2], True)))
| 27.166667
| 96
| 0.5818
| 140
| 978
| 3.907143
| 0.464286
| 0.043876
| 0.036563
| 0.051188
| 0.157221
| 0.080439
| 0
| 0
| 0
| 0
| 0
| 0.004418
| 0.305726
| 978
| 36
| 97
| 27.166667
| 0.801178
| 0.190184
| 0
| 0
| 0
| 0
| 0.011765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.12
| 0
| 0.4
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f39f0a7bb12ceef46b29fb32101f2f558a75220
| 2,023
|
py
|
Python
|
solution.py
|
nandita16gupta/CSV-Reading-using-Dynamic-Programming
|
793f9a9b23c2b1ea45d9ec71ea7070690932f9aa
|
[
"Apache-2.0"
] | null | null | null |
solution.py
|
nandita16gupta/CSV-Reading-using-Dynamic-Programming
|
793f9a9b23c2b1ea45d9ec71ea7070690932f9aa
|
[
"Apache-2.0"
] | null | null | null |
solution.py
|
nandita16gupta/CSV-Reading-using-Dynamic-Programming
|
793f9a9b23c2b1ea45d9ec71ea7070690932f9aa
|
[
"Apache-2.0"
] | null | null | null |
import csv
def inner(cell, spreadsheet):
try:
parts = cell.split()
if len(parts) == 0:
return 0.0
stack = []
for part in parts:
if part[0].isalpha():
col = ord(part[0]) - ord('a')
row = int(part[1:]) - 1
cell = spreadsheet[row][col]
value = solve(cell, spreadsheet)
if value == "#ERR":
return "#ERR"
stack.append(value)
elif part[0].isdigit() or part[0] == '.':
value = float(part)
stack.append(value)
elif part in ('+', '-', '*', '/'):
a = stack.pop()
b = stack.pop()
if part == '+':
stack.append(a + b)
elif part == '-':
stack.append(b - a)
elif part == '*':
stack.append(a * b)
elif part == '/':
stack.append(b / a)
else:
return "#ERR"
if len(stack) != 1:
return "#ERR"
return stack.pop()
except:
return "#ERR"
visited = {}
def solve(cell, spreadsheet):
if cell in visited:
computed = visited[cell]
if computed is None:
# cycle detected
return "#ERR"
return computed
visited[cell] = None
value = inner(cell, spreadsheet)
visited[cell] = value
return value
if __name__ == "__main__":
rows = []
with open('input.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
rows.append(row)
output_rows = []
for row in rows:
output_row = []
for cell in row:
output_row.append(solve(cell, rows))
output_rows.append(output_row)
with open('solution_csv_write.csv', 'w') as f:
writer = csv.writer(f)
for row in output_rows:
writer.writerow(row)
| 24.373494
| 53
| 0.44439
| 213
| 2,023
| 4.14554
| 0.2723
| 0.074745
| 0.084938
| 0.064553
| 0.14043
| 0.08607
| 0.08607
| 0.08607
| 0.08607
| 0.08607
| 0
| 0.008711
| 0.432526
| 2,023
| 82
| 54
| 24.670732
| 0.760453
| 0.00692
| 0
| 0.109375
| 0
| 0
| 0.036889
| 0.010967
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.015625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f3ca2663f904f54aa3ffae1453e96545934c8ab
| 959
|
py
|
Python
|
tests/test_auth.py
|
ChukwuEmekaAjah/buycoins_python
|
86547aa742364a0e308b1dfb5f7c73b4467b1e06
|
[
"MIT"
] | 1
|
2021-03-25T19:28:48.000Z
|
2021-03-25T19:28:48.000Z
|
tests/test_auth.py
|
ChukwuEmekaAjah/buycoins_python
|
86547aa742364a0e308b1dfb5f7c73b4467b1e06
|
[
"MIT"
] | null | null | null |
tests/test_auth.py
|
ChukwuEmekaAjah/buycoins_python
|
86547aa742364a0e308b1dfb5f7c73b4467b1e06
|
[
"MIT"
] | null | null | null |
from buycoins_client import Auth
import unittest
class TestAuthMethods(unittest.TestCase):
def test_invalid_secret_key_setup(self):
"""
Should throw an exception for invalid secret key
"""
try:
Auth.setup("name",3)
except Exception as e:
self.assertEqual(str(e), "Invalid secret key. Secret key should be a string")
def test_invalid_public_key_setup(self):
"""
Should throw an exception for invalid secret key
"""
try:
Auth.setup(1,3)
except Exception as e:
self.assertEqual(str(e), "Invalid public key. Public key should be a string")
def test_valid_auth_setup(self):
"""
Should return public and secret key as username and password auth
"""
auth = Auth.setup("buycoins", "africa")
self.assertEqual(auth, True)
if __name__ == '__main__':
unittest.main()
| 26.638889
| 89
| 0.605839
| 116
| 959
| 4.836207
| 0.37069
| 0.096257
| 0.114082
| 0.064171
| 0.481283
| 0.481283
| 0.481283
| 0.392157
| 0.392157
| 0.392157
| 0
| 0.004511
| 0.306569
| 959
| 35
| 90
| 27.4
| 0.839098
| 0.169969
| 0
| 0.222222
| 0
| 0
| 0.173669
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f3d20a100b0201057cb5b8f77818cba1ad9e63b
| 6,328
|
py
|
Python
|
icn/plc/main.py
|
PMaynard/ndn-water-treatment-testbed
|
926db68237b06f43f6e736f035201ed71fc153bc
|
[
"MIT"
] | 3
|
2021-01-20T00:54:09.000Z
|
2021-06-02T01:54:02.000Z
|
icn/plc/main.py
|
PMaynard/ndn-water-treatment-testbed
|
926db68237b06f43f6e736f035201ed71fc153bc
|
[
"MIT"
] | null | null | null |
icn/plc/main.py
|
PMaynard/ndn-water-treatment-testbed
|
926db68237b06f43f6e736f035201ed71fc153bc
|
[
"MIT"
] | null | null | null |
# from ui import UI
# from ui import UI_Element
import sys
import time
import threading
import socket
from plcrpcservice import PLCRPCClient
import pyndn
from pyndn import Name
from pyndn import Face
from pyndn import Interest
from pyndn.security import KeyChain
from pyndn.security.identity import IdentityManager
from pyndn.security import AesKeyParams
from pyndn import Data
from pyndn import MetaInfo
from pyndn.util.common import Common
import logging
logging.basicConfig()
log = logging.getLogger('PLC')
log.setLevel(logging.DEBUG)
class store(object):
def __init__(self, slaveid, register, address, value):
self.slaveid = slaveid
self.register = register
self.address = address
self.value = value
def __str__(self):
return "{} {} {} {} {}".format(self.name, self.slaveid, self.register, self.address, self.value)
class PLC(object):
def __init__(self, name=None):
# PLC Simulation
self.slaveid = 0x00
self.name = name
if not name:
self.name = socket.gethostname()
self.plcrpcclient = PLCRPCClient(rpc_server="0.0.0.0", rpc_port=8000, plc=self.name)
self.registered = False
self.speed = 0.2
self.db = {}
# NDN
self._callbackCount = 0
self.primary_prefix = "/example"
self.names = []
self.freshnessPeriod = 2000 # in milliseconds (2000 = 2s).
self.identify_manager = IdentityManager()
self.keyChain = KeyChain(self.identify_manager)
def _get_sensor_data(self):
sensor_data = self.plcrpcclient.readSensors()
for sensor in sensor_data:
register = sensor_data[sensor]['register_type']
address = int(sensor_data[sensor]['data_address'])
if register in ['c', 'd']:
value = bool(sensor_data[sensor]['value'])
elif register in ['h', 'i']:
value = int(sensor_data[sensor]['value'])
address = address + 1 # section 4.4 of specification
self.db[sensor] = store(self.slaveid, address, register, value)
def _registerPLC(self):
self.slaveid = self.plcrpcclient.registerPLC()
self.registered = True
log.debug("[PLC][%s] Registered on scadasim rpc" % self.name)
return True
def update(self):
# log.debug("[PLC][%s] Updating PLC values with sensor values" % self)
# while not self.queue.empty():
# # Update scadasim with any new values from Master
# fx, address, values = self.queue.get()
# log.debug("[PLC][%s] setting fx: %s register:%s to value:%s" %
# (self.name, fx, address, values))
# self.plcrpcclient.setValues(fx=fx, address=address, values=values)
self._get_sensor_data()
delay = (-time.time() % self.speed)
t = threading.Timer(delay, self.update, ())
t.daemon = True
t.start()
def set_speed(self, speed):
self.speed = speed
def __repr__(self):
return "%s" % self.name
def main(self):
log.debug("[PLC][%s] Initialized" % self.name)
while not self.registered:
log.debug(
"[PLC][%s] Trying to register with scadasim rpc" % self.name)
try:
self._registerPLC()
except KeyError:
log.warn(
"""[PLC][%s] PLC not found within scadasim. Verify Docker
Compose container names match list of plcs in scadasim
config""")
time.sleep(1)
log.debug("[PLC][%s] Starting update service" % self.name)
self.update()
log.debug("[PLC][%s] Starting NDN Producer" % self.name)
# TODO: Move this setup stuff into a function and make dynamic.
log.info("Listening on: ")
for n in self.db:
# /ndn/plc2-site/plc2
name = Name("{0}/{1}-site/{1}/{2}".format(self.primary_prefix, self.name, n))
log.info("\t{}".format(name))
name_identiy = self.keyChain.createIdentityAndCertificate(name, self.keyChain.getDefaultKeyParams())
log.debug("Name Identify: {}".format(name_identiy))
self.face.setCommandSigningInfo(self.keyChain, name_identiy)
self.face.registerPrefix(name, self.onInterest, self.onRegisterFailed)
# log.debug("Registered Prefix: {} {}", str(self.primary_prefix), str(n))
# END LOOP
# Keep Running unless error.
while self._callbackCount < 1:
self.face.processEvents()
time.sleep(0.01)
self.face.shutdown()
# NDN Stuff
def ndnInit(self):
Interest.setDefaultCanBePrefix(True)
# TODO: Bug? Does not auto retry TCP if unix socket fails as says in docs.
# self.face = Face("localhost", 6363)
self.face = Face()
self.primary_prefix = "/ndn"
def onInterest(self, prefix, interest, face, interestFilterId, filter):
self._callbackCount = 0
# log.debug("prefix: '{}'".format(prefix))
# log.debug("interest: '{}'".format(interest))
# log.debug("face: '{}'".format(face))
# log.debug("interestFilterId: '{}'".format(interestFilterId))
# log.debug("filter: '{}'".format(filter))
data = Data()
#
# log.debug("----")
# for n in self.db:
# log.debug(n)
# log.debug(self.db[n].value)
# log.debug("----")
#
n = str(prefix).split("/")[-1]
log.debug("{} value '{}' ({})".format(prefix, self.db[n].value, self.freshnessPeriod))
data.setContent(str(self.db[n].value)) # TODO: Why does this need to be converted to string?
data.setName(prefix)
meta = MetaInfo()
meta.setFreshnessPeriod(self.freshnessPeriod)
data.setMetaInfo(meta)
self.keyChain.sign(data)
face.putData(data)
def onRegisterFailed(self, prefix):
self._callbackCount += 1
dump("Unable to register", prefix)
#
try:
plc = PLC(sys.argv[1])
except:
plc = PLC()
# Keep trying until we get a connection.
while True:
plc.ndnInit()
plc.main()
time.sleep(5)
| 31.326733
| 112
| 0.588021
| 731
| 6,328
| 5.023256
| 0.27907
| 0.041394
| 0.02097
| 0.022876
| 0.026144
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010191
| 0.286662
| 6,328
| 201
| 113
| 31.482587
| 0.803279
| 0.195006
| 0
| 0.032787
| 0
| 0
| 0.068684
| 0
| 0
| 0
| 0.000818
| 0.004975
| 0
| 1
| 0.098361
| false
| 0
| 0.131148
| 0.016393
| 0.270492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f3eb22adbac011762c8a0158ac669343f090557
| 2,876
|
py
|
Python
|
cogs/administration.py
|
tigersharkpr13/AnsuraBot
|
035797121d8e7952bc38e32834cdb655c15cb703
|
[
"Unlicense"
] | null | null | null |
cogs/administration.py
|
tigersharkpr13/AnsuraBot
|
035797121d8e7952bc38e32834cdb655c15cb703
|
[
"Unlicense"
] | null | null | null |
cogs/administration.py
|
tigersharkpr13/AnsuraBot
|
035797121d8e7952bc38e32834cdb655c15cb703
|
[
"Unlicense"
] | null | null | null |
from typing import Union
import discord
from discord.ext import commands
import cogs.gamertags
from ansura.ansurabot import AnsuraBot
from ansura.ansuracontext import AnsuraContext
class Administration(commands.Cog):
def error(self, title, message={}, color=0xff0000):
e = discord.Embed()
e.colour = color
e.title = title
for k in message.keys():
e.add_field(name=k, value=message[k])
return e
def __init__(self, bot: AnsuraBot):
self.bot = bot
@commands.is_owner()
@commands.command(aliases=["sgv"])
async def setgtval(self, ctx: AnsuraContext,
typ: str, user: Union[discord.Member, discord.User],
val: str):
ch: discord.TextChannel = \
ctx.channel
if not ch.permissions_for(ctx.author).administrator:
await ctx.send(embed=self.error("Permission error",
message={
"Message":
"You must have administrator permission"
})
)
return
if typ not in "xbox,mojang,youtube,twitch,mixer".split(","):
await ctx.send(embed=self.error("Invalid gametag type"))
await self.bot.get_cog("Help").help_(ctx, "setgtval")
return
util: cogs.gamertags.Util = self.bot.get_cog("Util")
db = util.db
if typ == "xbox":
typ = "xboxlive"
rec = db.lookup_gaming_record(user.id)
e = discord.Embed()
e.colour = user.color
e.title = user.display_name + " before"
e.add_field(name="XBox", value=rec[2] if rec[2] is not None else "N/A")
e.add_field(name="Mojang", value=rec[1] if rec[1] is not None else "N/A")
e.add_field(name="Youtube", value=rec[3] if rec[3] is not None else "N/A")
e.add_field(name="Twitch", value=rec[4] if rec[4] is not None else "N/A")
e.add_field(name="Mixer", value=rec[5] if rec[5] is not None else "N/A")
await ctx.send(embed=e)
db.set_gaming_record(user.id, typ, val)
rec = db.lookup_gaming_record(user.id)
e = discord.Embed()
e.colour = user.color
e.title = user.display_name + " after"
e.add_field(name="XBox", value=rec[2] if rec[2] is not None else "N/A")
e.add_field(name="Mojang", value=rec[1] if rec[1] is not None else "N/A")
e.add_field(name="Youtube", value=rec[3] if rec[3] is not None else "N/A")
e.add_field(name="Twitch", value=rec[4] if rec[4] is not None else "N/A")
e.add_field(name="Mixer", value=rec[5] if rec[5] is not None else "N/A")
await ctx.send(embed=e)
def setup(bot):
bot.add_cog(Administration(bot))
| 40.507042
| 92
| 0.561544
| 401
| 2,876
| 3.955112
| 0.239402
| 0.027743
| 0.062421
| 0.090164
| 0.474149
| 0.461538
| 0.428752
| 0.428752
| 0.428752
| 0.428752
| 0
| 0.012626
| 0.311544
| 2,876
| 70
| 93
| 41.085714
| 0.788384
| 0
| 0
| 0.33871
| 0
| 0
| 0.08484
| 0.011127
| 0
| 0
| 0.002782
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.096774
| 0
| 0.209677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f3f0f56c3a1c070b48e8fbce26fe6e40715c8ef
| 357
|
py
|
Python
|
project/celery.py
|
kunugoda/jobbrd
|
19debcac7673a85eda4a8d1eb00e5537268bd601
|
[
"MIT"
] | 1
|
2020-06-17T05:25:42.000Z
|
2020-06-17T05:25:42.000Z
|
project/celery.py
|
kunugoda/jobbrd
|
19debcac7673a85eda4a8d1eb00e5537268bd601
|
[
"MIT"
] | null | null | null |
project/celery.py
|
kunugoda/jobbrd
|
19debcac7673a85eda4a8d1eb00e5537268bd601
|
[
"MIT"
] | null | null | null |
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
app = Celery('jobboard')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0}'.format(self.request))
| 23.8
| 67
| 0.787115
| 50
| 357
| 5.48
| 0.6
| 0.072993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003049
| 0.081232
| 357
| 14
| 68
| 25.5
| 0.832317
| 0
| 0
| 0
| 0
| 0
| 0.218487
| 0.061625
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.4
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f407417b73374a6afc645fcceeb6ced94f54f5e
| 2,388
|
py
|
Python
|
EGGS_labrad/clients/cryovac_clients/fma1700a_client.py
|
EGGS-Experiment/EGGS_Control
|
c29b3ab0e30dcb6e01d1ca3212ac64ad1506143b
|
[
"MIT"
] | 2
|
2021-12-26T05:00:54.000Z
|
2021-12-30T17:15:49.000Z
|
EGGS_labrad/clients/cryovac_clients/fma1700a_client.py
|
EGGS-Experiment/EGGS_Control
|
c29b3ab0e30dcb6e01d1ca3212ac64ad1506143b
|
[
"MIT"
] | null | null | null |
EGGS_labrad/clients/cryovac_clients/fma1700a_client.py
|
EGGS-Experiment/EGGS_Control
|
c29b3ab0e30dcb6e01d1ca3212ac64ad1506143b
|
[
"MIT"
] | null | null | null |
from time import time
from datetime import datetime
from twisted.internet.defer import inlineCallbacks
from EGGS_labrad.clients import GUIClient
from EGGS_labrad.clients.cryovac_clients.fma1700a_gui import fma1700a_gui
class fma1700a_client(GUIClient):
name = 'FMA1700A Client'
FLOWID = 877920
servers = {'fma': 'FMA1700A Server'}
def getgui(self):
if self.gui is None:
self.gui = fma1700a_gui()
return self.gui
@inlineCallbacks
def initClient(self):
# set recording stuff
self.c_record = self.cxn.context()
self.recording = False
# connect to signals
yield self.fma.signal__flow_update(self.FLOWID)
yield self.fma.addListener(listener=self.updateFlow, source=None, ID=self.FLOWID)
# start device polling if not already started
poll_params = yield self.fma.polling()
if not poll_params[0]:
yield self.fma.polling(True, 5.0)
def initGUI(self):
self.gui.record_button.toggled.connect(lambda status: self.record_flow(status))
# SLOTS
@inlineCallbacks
def record_flow(self, status):
"""
Creates a new dataset to record flow and
tells polling loop to add data to data vault.
"""
# set up datavault
self.recording = status
if self.recording:
self.starttime = time()
date = datetime.now()
year = str(date.year)
month = '{:02d}'.format(date.month)
trunk1 = '{0:s}_{1:s}_{2:02d}'.format(year, month, date.day)
trunk2 = '{0:s}_{1:02d}:{2:02d}'.format(self.name, date.hour, date.minute)
yield self.dv.cd(['', year, month, trunk1, trunk2], True, context=self.c_record)
yield self.dv.new('FMA1700A Flowmeter', [('Elapsed time', 't')],
[('Flowmeter', 'Flow rate', 'L/min')], context=self.c_record)
@inlineCallbacks
def updateFlow(self, c, flow):
"""
Updates GUI when values are received from server.
"""
self.gui.flow_display.setText(str(flow))
if self.recording:
elapsedtime = time() - self.starttime
yield self.dv.add(elapsedtime, flow, context=self.c_record)
if __name__ == "__main__":
from EGGS_labrad.clients import runClient
runClient(fma1700a_client)
| 33.166667
| 100
| 0.620603
| 293
| 2,388
| 4.945392
| 0.389079
| 0.043478
| 0.030366
| 0.043478
| 0.037267
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033869
| 0.270519
| 2,388
| 71
| 101
| 33.633803
| 0.797933
| 0.101759
| 0
| 0.108696
| 0
| 0
| 0.067593
| 0.010067
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.130435
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f43bc58bc8f57d5639beefb900d57b125412748
| 1,440
|
py
|
Python
|
cd4ml/app.py
|
joragupra/CD4ML-Scenarios
|
8c8886388260147cd5248dfa1945f60ebabfaacc
|
[
"MIT"
] | 1
|
2020-12-24T19:52:58.000Z
|
2020-12-24T19:52:58.000Z
|
cd4ml/app.py
|
joragupra/CD4ML-Scenarios
|
8c8886388260147cd5248dfa1945f60ebabfaacc
|
[
"MIT"
] | null | null | null |
cd4ml/app.py
|
joragupra/CD4ML-Scenarios
|
8c8886388260147cd5248dfa1945f60ebabfaacc
|
[
"MIT"
] | 1
|
2020-05-04T18:21:41.000Z
|
2020-05-04T18:21:41.000Z
|
from flask import Flask, render_template, request
import cd4ml.app_utils as utils
from cd4ml.fluentd_logging import FluentdLogger
app = Flask(__name__, template_folder='webapp/templates',
static_folder='webapp/static')
fluentd_logger = FluentdLogger()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/replace_model', methods=["POST"])
def replace_model():
content = request.get_data(as_text=False)
utils.replace_model_file(content)
return "OK", 200
@app.route('/replace_encoder', methods=["POST"])
def replace_encoder():
content = request.get_data(as_text=False)
utils.replace_encoder_file(content)
return "OK", 200
@app.route('/prediction')
def get_prediction():
date_string = request.args.get('date')
item_nbr = request.args.get("item_nbr")
prediction_tuple = utils.get_prediction(item_nbr, date_string)
status = prediction_tuple[0]
prediction = prediction_tuple[1]
log_payload = {
'prediction': prediction,
'itemid': item_nbr,
'item_name': utils.get_product_name_from_id(item_nbr),
'date_string': date_string
}
log_prediction_console(log_payload)
fluentd_logger.log('prediction', log_payload)
if status == "ERROR":
return prediction, 503
else:
return "%d" % prediction, 200
def log_prediction_console(log_payload):
print('logging {}'.format(log_payload))
| 25.263158
| 66
| 0.702083
| 180
| 1,440
| 5.327778
| 0.333333
| 0.036496
| 0.031283
| 0.043796
| 0.216893
| 0.154327
| 0.154327
| 0.091762
| 0.091762
| 0
| 0
| 0.013468
| 0.175
| 1,440
| 56
| 67
| 25.714286
| 0.793771
| 0
| 0
| 0.1
| 0
| 0
| 0.116667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.075
| 0.025
| 0.325
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f44b877c61b52c5169fcc3dc901630593e11752
| 1,456
|
py
|
Python
|
api/crud/events.py
|
cnuland/tbml
|
8dca907011971a8ad21dfc5b5d6bec1ddbff0818
|
[
"MIT"
] | null | null | null |
api/crud/events.py
|
cnuland/tbml
|
8dca907011971a8ad21dfc5b5d6bec1ddbff0818
|
[
"MIT"
] | null | null | null |
api/crud/events.py
|
cnuland/tbml
|
8dca907011971a8ad21dfc5b5d6bec1ddbff0818
|
[
"MIT"
] | null | null | null |
from fastapi import HTTPException
from tortoise.exceptions import DoesNotExist
from db.models import Events
from schemas.events import EventsOutSchema
async def get_events():
return await EventsOutSchema.from_queryset(Events.all())
async def get_event(event_id) -> EventsOutSchema:
return await EventsOutSchema.from_queryset_single(Events.get(id=event_id))
async def create_event(event) -> EventsOutSchema:
event_dict = event.dict(exclude_unset=True)
event_obj = await Events.create(**event_dict)
return await EventsOutSchema.from_tortoise_orm(event_obj)
async def update_event(event_id, event) -> EventsOutSchema:
try:
db_event = await EventsOutSchema.from_queryset_single(Events.get(id=event_id))
except DoesNotExist:
raise HTTPException(status_code=404, detail=f"Event {event_id} not found")
await Events.filter(id=event_id).update(**event.dict(exclude_unset=True))
return await EventsOutSchema.from_queryset_single(Events.get(id=event_id))
async def delete_event(event_id):
try:
db_event = await EventsOutSchema.from_queryset_single(Events.get(id=event_id))
except DoesNotExist:
raise HTTPException(status_code=404, detail=f"Event {event_id} not found")
deleted_count = await Events.filter(id=event_id).delete()
if not deleted_count:
raise HTTPException(status_code=404, detail=f"Event {event_id} not found")
return f"Event {event_id} deleted"
| 35.512195
| 86
| 0.76511
| 198
| 1,456
| 5.414141
| 0.227273
| 0.084888
| 0.078358
| 0.149254
| 0.580224
| 0.498134
| 0.449627
| 0.449627
| 0.449627
| 0.449627
| 0
| 0.007223
| 0.144231
| 1,456
| 41
| 87
| 35.512195
| 0.85313
| 0
| 0
| 0.392857
| 0
| 0
| 0.070007
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f46b6c5cc84e0b05c2f63e339fe44af56c4515e
| 29,428
|
py
|
Python
|
sfx/sfx.py
|
Terry14/sfx
|
16bcf401ba3251b0de211276d97153469499515d
|
[
"MIT"
] | null | null | null |
sfx/sfx.py
|
Terry14/sfx
|
16bcf401ba3251b0de211276d97153469499515d
|
[
"MIT"
] | null | null | null |
sfx/sfx.py
|
Terry14/sfx
|
16bcf401ba3251b0de211276d97153469499515d
|
[
"MIT"
] | null | null | null |
import asyncio
import os
import unicodedata
import aiohttp
import discord
import lavalink
import unidecode
from redbot.core import Config, checks, commands
from redbot.core.utils.chat_formatting import pagify
from redbot.core.utils.predicates import MessagePredicate
from .api import generate_urls
try:
from redbot.core.utils._dpy_menus_utils import dpymenu
DPY_MENUS = True
except ImportError:
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
DPY_MENUS = False
from .voices import voices
class SFX(commands.Cog):
"""Plays uploaded sounds or text-to-speech."""
__version__ = "2.0.0"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=134621854878007296)
self.session = aiohttp.ClientSession()
user_config = {"voice": "Clara", "speed": 5}
guild_config = {"sounds": {}, "channels": []}
global_config = {"sounds": {}, "schema_version": 0}
self.config.register_user(**user_config)
self.config.register_guild(**guild_config)
self.config.register_global(**global_config)
lavalink.register_event_listener(self.ll_check)
self.bot.loop.create_task(self.check_config_version())
self.bot.loop.create_task(self.fill_channel_cache())
self.last_track_info = {}
self.current_sfx = {}
self.channel_cache = {}
# lag_time to compensate for skipping lavalink lag
self.lag_time = 1000
self.repeat_state = {}
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
lavalink.unregister_event_listener(self.ll_check)
def format_help_for_context(self, ctx):
"""Thanks Sinbad"""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\nCog Version: {self.__version__}"
async def check_config_version(self):
schema_version = await self.config.schema_version()
if schema_version == 0:
await self.config.clear_all_users()
await self.config.sounds.clear()
all_guilds = await self.config.all_guilds()
for guild in all_guilds:
await self.config.guild_from_id(guild).sounds.clear()
await self.config.schema_version.set(1)
async def fill_channel_cache(self):
all_guilds = await self.config.all_guilds()
for guild in all_guilds:
try:
self.channel_cache[guild] = all_guilds[guild]["channels"]
except KeyError:
pass # no channels set
# full credits to kable
# https://github.com/kablekompany/Kable-Kogs/blob/master/decancer/decancer.py#L67
@staticmethod
def decancer_text(text):
text = unicodedata.normalize("NFKC", text)
text = unicodedata.normalize("NFD", text)
text = unidecode.unidecode(text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
if text == "":
return
return text
@commands.command()
@commands.cooldown(
rate=1, per=1, type=discord.ext.commands.cooldowns.BucketType.guild
)
@commands.guild_only()
async def tts(self, ctx, *, text):
"""
Plays the given text as TTS in your current voice channel.
"""
if not ctx.author.voice or not ctx.author.voice.channel:
await ctx.send("You are not connected to a voice channel.")
return
author_data = await self.config.user(ctx.author).all()
author_voice = author_data["voice"]
author_speed = author_data["speed"]
text = self.decancer_text(text)
if text is None:
await ctx.send("That's not a valid message, sorry.")
return
char_number = len(text)
if char_number > 1000:
await ctx.send(
f"Sorry, I limit TTS to 1000 characters to avoid abuse. ({char_number}/1000)"
)
return
urls = generate_urls(author_voice, text, author_speed)
await self.play_sfx(ctx.author.voice.channel, ctx.channel, urls)
try:
1 + 1
except Exception:
await ctx.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
@commands.command()
@commands.cooldown(
rate=1, per=1, type=discord.ext.commands.cooldowns.BucketType.guild
)
@commands.guild_only()
async def sfx(self, ctx, sound: str):
"""
Plays an existing sound in your current voice channel.
If a guild SFX exists with the same name as a global one, the guild SFX will be played.
"""
if not ctx.author.voice or not ctx.author.voice.channel:
await ctx.send("You are not connected to a voice channel.")
return
guild_sounds = await self.config.guild(ctx.guild).sounds()
global_sounds = await self.config.sounds()
if sound not in guild_sounds.keys() and sound not in global_sounds.keys():
await ctx.send(
f"Sound **{sound}** does not exist. Try `{ctx.clean_prefix}listsfx` for a list."
)
return
if sound in guild_sounds.keys():
link = guild_sounds[sound]
else:
link = global_sounds[sound]
try:
await self.play_sfx(ctx.author.voice.channel, ctx.channel, [link])
except Exception:
await ctx.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
@commands.command()
@commands.cooldown(
rate=1, per=1, type=discord.ext.commands.cooldowns.BucketType.guild
)
@commands.guild_only()
async def qsfx(self, ctx, sound: str):
"""
Queues an existing sound in your current voice channel.
If a guild SFX exists with the same name as a global one, the guild SFX will be played.
"""
if not ctx.author.voice or not ctx.author.voice.channel:
await ctx.send("You are not connected to a voice channel.")
return
guild_sounds = await self.config.guild(ctx.guild).sounds()
global_sounds = await self.config.sounds()
if sound not in guild_sounds.keys() and sound not in global_sounds.keys():
await ctx.send(
f"Sound **{sound}** does not exist. Try `{ctx.clean_prefix}listsfx` for a list."
)
return
if sound in guild_sounds.keys():
link = guild_sounds[sound]
else:
link = global_sounds[sound]
try:
await self.queue_sfx(ctx.author.voice.channel, ctx.channel, [link])
except Exception:
await ctx.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
@commands.command()
@commands.admin_or_permissions(manage_guild=True)
@commands.guild_only()
async def addsfx(self, ctx, name: str, link: str = None):
"""
Adds a new SFX to this guild.
Either upload the file as a Discord attachment or use a link.
Syntax:`[p]addsfx <name>` or `[p]addsfx <name> <link>`.
"""
guild_sounds = await self.config.guild(ctx.guild).sounds()
attachments = ctx.message.attachments
if len(attachments) > 1 or (attachments and link):
await ctx.send("Please only try to add one SFX at a time.")
return
url = ""
filename = ""
if attachments:
attachment = attachments[0]
url = attachment.url
elif link:
url = "".join(link)
else:
await ctx.send(
"You must provide either a Discord attachment or a direct link to a sound."
)
return
filename = "".join(url.split("/")[-1:]).replace("%20", "_")
file_name, file_extension = os.path.splitext(filename)
if file_extension not in [".wav", ".mp3"]:
await ctx.send(
"Sorry, only SFX in .mp3 and .wav format are supported at this time."
)
return
if name in guild_sounds.keys():
await ctx.send(
f"A sound with that filename already exists. Either choose a new name or use {ctx.clean_prefix}delsfx to remove it."
)
return
guild_sounds[name] = url
await self.config.guild(ctx.guild).sounds.set(guild_sounds)
await ctx.send(f"Sound **{name}** has been added.")
@commands.command()
@commands.admin_or_permissions(manage_guild=True)
@commands.guild_only()
async def delsfx(self, ctx, soundname: str):
"""
Deletes an existing sound.
"""
guild_sounds = await self.config.guild(ctx.guild).sounds()
if soundname not in guild_sounds.keys():
await ctx.send(
f"Sound **{soundname}** does not exist. Try `{ctx.prefix}listsfx` for a list."
)
return
del guild_sounds[soundname]
await self.config.guild(ctx.guild).sounds.set(guild_sounds)
await ctx.send(f"Sound **{soundname}** deleted.")
@commands.command()
@commands.guild_only()
async def addglobalsfx(self, ctx, name: str, link: str = None):
"""
Adds a new SFX to this the bot globally.
Either upload the file as a Discord attachment or use a link.
Syntax:`[p]addsfx <name>` or `[p]addsfx <name> <link>`.
"""
global_sounds = await self.config.sounds()
attachments = ctx.message.attachments
if len(attachments) > 1 or (attachments and link):
await ctx.send("Please only try to add one SFX at a time.")
return
url = ""
if attachments:
attachment = attachments[0]
url = attachment.url
elif link:
url = "".join(link)
else:
await ctx.send(
"You must provide either a Discord attachment or a direct link to a sound."
)
return
filename = "".join(url.split("/")[-1:]).replace("%20", "_")
file_name, file_extension = os.path.splitext(filename)
if file_extension not in [".wav", ".mp3"]:
await ctx.send(
"Sorry, only SFX in .mp3 and .wav format are supported at this time."
)
return
if name in global_sounds.keys():
await ctx.send(
f"A sound with that filename already exists. Either choose a new name or use {ctx.clean_prefix}delglobalsfx to remove it."
)
return
global_sounds[name] = link
await self.config.sounds.set(global_sounds)
await ctx.send(f"Sound **{name}** has been added.")
@commands.command()
@checks.is_owner()
async def delglobalsfx(self, ctx, name: str):
"""
Deletes an existing global sound.
"""
global_sounds = await self.config.sounds()
if name not in global_sounds.keys():
await ctx.send(
f"Sound **{name}** does not exist. Try `{ctx.prefix}listsfx` for a list."
)
return
del global_sounds[name]
await self.config.sounds.set(global_sounds)
await ctx.send(f"Sound **{name}** deleted.")
@commands.command()
@commands.guild_only()
async def listsfx(self, ctx):
"""
Lists all available sounds for this server.
"""
guild_sounds = await self.config.guild(ctx.guild).sounds()
global_sounds = await self.config.sounds()
if (len(guild_sounds.items()) + len(global_sounds.items())) == 0:
await ctx.send(f"No sounds found. Use `{ctx.prefix}addsfx` to add one.")
return
txt = ""
if guild_sounds:
txt += "**Guild Sounds**:\n"
for sound in guild_sounds:
txt += sound + "\n"
if global_sounds:
txt += "\n**Global Sounds**:\n"
for sound in global_sounds:
if guild_sounds and sound in guild_sounds:
txt += sound + " (disabled)\n"
txt += sound + "\n"
pages = [p for p in pagify(text=txt, delims="\n")]
for page in pages:
await ctx.send(page)
@commands.command()
@commands.guild_only()
async def fplay(self, ctx, link: str = None):
"""
Adds a file to the music queue.
Either upload the file as a Discord attachment or use a link.
Syntax:`[p]fplay` or `[p]fplay <link>`.
"""
attachments = ctx.message.attachments
if len(attachments) > 1 or (attachments and link):
await ctx.send("Please only try to add one file at a time.")
return
url = ""
filename = ""
if attachments:
attachment = attachments[0]
url = attachment.url
elif link:
url = "".join(link)
else:
await ctx.send(
"You must provide either a Discord attachment or a direct link to a sound."
)
return
filename = "".join(url.split("/")[-1:]).replace("%20", "_")
file_name, file_extension = os.path.splitext(filename)
if file_extension not in [".wav", ".mp3"]:
await ctx.send(
"Sorry, only files in .mp3 and .wav format are supported at this time."
)
return
if not ctx.author.voice or not ctx.author.voice.channel:
await ctx.send("You are not connected to a voice channel.")
return
guild_sounds = await self.config.guild(ctx.guild).sounds()
global_sounds = await self.config.sounds()
try:
await self.queue_sfx(ctx.author.voice.channel, ctx.channel, [url])
except Exception:
await ctx.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
@commands.command(aliases=["setvoice"])
async def myvoice(self, ctx, voice: str = None):
"""
Changes your TTS voice.
Type `[p]listvoices` to view all possible voices.
If no voice is provided, it will show your current voice.
"""
current_voice = await self.config.user(ctx.author).voice()
if voice is None:
await ctx.send(f"Your current voice is **{current_voice}**")
return
voice = voice.title()
if voice in voices.keys():
await self.config.user(ctx.author).voice.set(voice)
await ctx.send(f"Your new TTS voice is: **{voice}**")
else:
await ctx.send(
f"Sorry, that's not a valid voice. You can view voices with the `{ctx.clean_prefix}listvoices` command."
)
@commands.command(aliases=["setspeed"])
async def myspeed(self, ctx, speed: int = None):
"""
Changes your TTS speed.
If no speed is provided, it will show your current speed.
The speed range is 0-10 (higher is faster, 5 is normal.)
"""
author_data = await self.config.user(ctx.author).all()
current_speed = author_data["speed"]
current_voice = author_data["voice"]
support_speed = voices[current_voice]["speed"]
if speed is None:
await ctx.send(f"Your current speed is **{current_speed}**")
return
if speed < 0:
await ctx.send("Your speed must be greater than or equal to 0.")
return
if speed > 10:
await ctx.send("Your speed must be less than or equal to 10.")
return
await self.config.user(ctx.author).speed.set(speed)
if support_speed:
await ctx.send(f"Your new speed is **{speed}**.")
else:
await ctx.send(
f"Your new speed is **{speed}**. "
"Keep in mind your current voice doesn't support speed changes, "
"so you won't see a difference until you change your voice to one that supports speed."
)
@commands.command()
async def listlangs(self, ctx):
"""
List all the valid language codes for TTS voices.
"""
langs = sorted(
set([voices[voice]["languageCode"] for voice in voices.keys()] + ["all"])
)
embed = discord.Embed(
title="Valid Language Codes",
color=await ctx.embed_color(),
description=", ".join(langs),
)
await ctx.send(embed=embed)
@commands.command()
async def listvoices(self, ctx, lang="en"):
"""
Lists all the TTS voices in the selected language.
If no language is provided, it will list sthe voices in English.
Use 'all' as the language code to view all voices.
"""
langs = set([voices[voice]["languageCode"] for voice in voices.keys()])
ALL_VOICES = False
if lang not in langs:
if lang == "all":
ALL_VOICES = True
else:
await ctx.send(
f"Sorry, that's not a valid language code. You can view all valid language codes with the `{ctx.clean_prefix}listlangs` command."
)
if ALL_VOICES:
voice_data = voices
else:
voice_data = {
voice: voices[voice]
for voice in voices.keys()
if voices[voice]["languageCode"] == lang
}
qs = {"low": [], "medium": [], "high": []}
for voice in voice_data:
embed = discord.Embed(color=await ctx.embed_color(), title=voice)
embed.description = (
"```yaml\n"
f"Gender: {voice_data[voice]['gender']}\n"
f"Language: {voice_data[voice]['languageName']}\n"
f"Quality: {voice_data[voice]['quality']}\n"
f"Supports Speed: {voice_data[voice]['speed']}\n"
f"Translates: {voice_data[voice]['translates']}\n"
f"Provider: {voice_data[voice]['provider']}"
"```"
)
q = voice_data[voice]["quality"].lower()
qs[q].append(embed)
pages = qs["high"] + qs["medium"] + qs["low"]
for index, embed in enumerate(pages):
if len(pages) > 1:
embed.set_footer(text=f"Voice {index + 1}/{len(pages)} | {lang} voices")
if DPY_MENUS:
await dpymenu(ctx, pages, timeout=60)
else:
if len(pages) == 1:
await ctx.send(embed=pages[0])
else:
await menu(ctx, pages, DEFAULT_CONTROLS, timeout=60)
@commands.group()
@commands.guild_only()
@commands.admin_or_permissions(manage_guild=True)
async def ttschannel(self, ctx):
"""
Configures automatic TTS channels.
"""
pass
@ttschannel.command()
async def add(self, ctx, channel: discord.TextChannel):
"""
Adds a channel for automatic TTS.
"""
channel_list = await self.config.guild(ctx.guild).channels()
if channel.id not in channel_list:
channel_list.append(channel.id)
await self.config.guild(ctx.guild).channels.set(channel_list)
self.channel_cache[ctx.guild.id] = channel_list
await ctx.send(
f"Okay, {channel.mention} will now be used as a TTS channel."
)
else:
await ctx.send(
f"{channel.mention} is already a TTS channel, did you mean use the `{ctx.clean_prefix}ttschannel remove` command?"
)
@ttschannel.command(aliases=["delete", "del"])
async def remove(self, ctx, channel: discord.TextChannel):
"""
Removes a channel for automatic TTS.
"""
channel_list = await self.config.guild(ctx.guild).channels()
if channel.id in channel_list:
channel_list.remove(channel.id)
await self.config.guild(ctx.guild).channels.set(channel_list)
self.channel_cache[ctx.guild.id] = channel_list
await ctx.send(f"Okay, {channel.mention} is no longer a TTS channel.")
else:
await ctx.send(
f"{channel.mention} isn't a TTS channel, did you mean use the `{ctx.clean_prefix}ttschannel add` command?"
)
@ttschannel.command()
async def clear(self, ctx):
"""
Removes all the channels for automatic TTS.
"""
channel_list = await self.config.guild(ctx.guild).channels()
if not channel_list:
await ctx.send("There's no channels in the config.")
else:
try:
await ctx.send(
"Are you sure you want to clear all this server's TTS channels? Respond with yes or no."
)
predictate = MessagePredicate.yes_or_no(ctx, user=ctx.author)
await ctx.bot.wait_for("message", check=predictate, timeout=30)
except asyncio.TimeoutError:
await ctx.send(
"You never responded, please use the command again to clear all of this server's TTS channels."
)
return
if predictate.result:
await self.config.guild(ctx.guild).channels.clear()
del self.channel_cache[ctx.guild.id]
await ctx.send("Okay, I've cleared all TTS channels for this server.")
else:
await ctx.send("Okay, I won't clear any TTS channels.")
@ttschannel.command()
async def list(self, ctx):
"""
Shows all the channels for automatic TTS.
"""
try:
channel_list = self.channel_cache[ctx.guild.id]
except KeyError:
channel_list = None
if not channel_list:
await ctx.send("This server doesn't have any TTS channels set up.")
else:
text = "".join(
"<#" + str(channel) + "> - " + str(channel) + "\n"
for channel in channel_list
)
pages = [p for p in pagify(text=text, delims="\n")]
embeds = []
for index, page in enumerate(pages):
embed = discord.Embed(
title="Automatic TTS Channels",
color=await ctx.embed_colour(),
description=page,
)
if len(embeds) > 1:
embed.set_footer(text=f"Page {index+1}/{len(pages)}")
embeds.append(embed)
if DPY_MENUS:
await dpymenu(ctx, embeds, timeout=60)
else:
if len(pages) == 1:
await ctx.send(embed=embeds[0])
else:
await menu(ctx, embeds, DEFAULT_CONTROLS, timeout=60)
@commands.Cog.listener()
async def on_message_without_command(self, message: discord.Message):
if not message.guild:
return
if message.author.bot:
return
if not message.channel.permissions_for(message.guild.me).send_messages:
return
if await self.bot.allowed_by_whitelist_blacklist(who=message.author) is False:
return
if await self.bot.cog_disabled_in_guild(self, message.guild):
return
try:
channel_list = self.channel_cache[message.guild.id]
except KeyError:
return
if not channel_list:
return
if message.channel.id not in channel_list:
return
if not message.author.voice or not message.author.voice.channel:
await message.channel.send("You are not connected to a voice channel.")
return
author_data = await self.config.user(message.author).all()
author_voice = author_data["voice"]
author_speed = author_data["speed"]
text = self.decancer_text(message.clean_content)
if text is None:
await message.channel.send("That's not a valid message, sorry.")
return
char_number = len(text)
if char_number > 1000:
await message.channel.send(
f"Sorry, I limit TTS to 1000 characters to avoid abuse. ({char_number}/1000)"
)
return
urls = generate_urls(author_voice, text, author_speed)
try:
await self.play_sfx(message.author.voice.channel, message.channel, urls)
except Exception:
await message.channel.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
async def play_sfx(self, vc, channel, link):
try:
player = lavalink.get_player(vc.guild.id)
except NoLavalinkNode: # Lavalink hasn't been initialised yet
if channel and type != "autotts":
await channel.send(
"Either the Audio cog is not loaded or lavalink has not been initialized yet. If this continues to happen, please contact the bot owner."
)
return
except KeyError:
player = await lavalink.connect(vc)
link = link[0] # could be rewritten to add ALL links
tracks = await player.load_tracks(query=link)
if not tracks.tracks:
await channel.send(
"Something went wrong. Either the SFX is invalid, or the TTS host is down."
)
return
track = tracks.tracks[0]
self.repeat_state[vc.guild.id] = player.repeat
player.repeat = False
if player.current is None and not player.queue:
player.queue.append(track)
self.current_sfx[vc.guild.id] = track
await player.play()
return
try:
csfx = self.current_sfx[vc.guild.id]
except KeyError:
csfx = None
if csfx is not None:
player.queue.insert(0, track)
await player.skip()
self.current_sfx[player.guild.id] = track
return
self.last_track_info[player.guild.id] = (player.current, player.position)
self.current_sfx[player.guild.id] = track
player.queue.insert(0, track)
player.queue.insert(1, player.current)
await player.skip()
async def queue_sfx(self, vc, channel, link):
try:
player = lavalink.get_player(vc.guild.id)
except NoLavalinkNode: # Lavalink hasn't been initialised yet
if channel and type != "autotts":
await channel.send(
"Either the Audio cog is not loaded or lavalink has not been initialized yet. If this continues to happen, please contact the bot owner."
)
return
except KeyError:
player = await lavalink.connect(vc)
link = link[0] # could be rewritten to add ALL links
tracks = await player.load_tracks(query=link)
if not tracks.tracks:
await channel.send(
"Something went wrong. Either the SFX is invalid, or the TTS host is down."
)
return
track = tracks.tracks[0]
if player.current is None and not player.queue:
player.queue.append(track)
self.current_sfx[vc.guild.id] = track
await player.play()
return
player.queue.append(track)
return
async def ll_check(self, player, event, reason):
try:
csfx = self.current_sfx[player.guild.id]
except KeyError:
csfx = None
try:
lti = self.last_track_info[player.guild.id]
except KeyError:
lti = None
if csfx is None and lti is None:
return
if (
event == lavalink.LavalinkEvents.TRACK_EXCEPTION
and csfx is not None
or event == lavalink.LavalinkEvents.TRACK_STUCK
and csfx is not None
):
del self.current_sfx[player.guild.id]
return
if (
event == lavalink.LavalinkEvents.TRACK_END
and player.current is None
and csfx is not None
):
del self.current_sfx[player.guild.id]
return
if (
event == lavalink.LavalinkEvents.TRACK_END
and lti is not None
and player.current is not None
and player.current.track_identifier == lti[0].track_identifier
):
if player.guild.id in self.current_sfx:
del self.current_sfx[player.guild.id]
await player.pause()
await player.seek(lti[1] + self.lag_time)
await player.pause(False)
if player.guild.id in self.last_track_info:
del self.last_track_info[player.guild.id]
if player.guild.id in self.repeat_state:
player.repeat = self.repeat_state[player.guild.id]
| 35.327731
| 157
| 0.571259
| 3,584
| 29,428
| 4.607422
| 0.118862
| 0.027615
| 0.038515
| 0.018107
| 0.628475
| 0.58245
| 0.538303
| 0.500696
| 0.486647
| 0.47502
| 0
| 0.006234
| 0.329584
| 29,428
| 832
| 158
| 35.370192
| 0.830757
| 0.012471
| 0
| 0.524409
| 0
| 0.014173
| 0.173608
| 0.018003
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006299
| false
| 0.00315
| 0.023622
| 0
| 0.113386
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f477811434f0fbba0fb2564e885e5ce2cde1027
| 581
|
py
|
Python
|
norm_files.py
|
jncraton/ipeds-data
|
e17b051bac3d4d112d83a85f38dc1422d4fb584b
|
[
"MIT"
] | null | null | null |
norm_files.py
|
jncraton/ipeds-data
|
e17b051bac3d4d112d83a85f38dc1422d4fb584b
|
[
"MIT"
] | null | null | null |
norm_files.py
|
jncraton/ipeds-data
|
e17b051bac3d4d112d83a85f38dc1422d4fb584b
|
[
"MIT"
] | null | null | null |
"""
Normalizes contents for all data files.
- Converts column names to uppercase
- Converts data values to uppercase
- Converts to Unix line endings
- Removes trailing whitespace from all lines
"""
import os
csvs = ['data/' + f for f in os.listdir('data') if f.endswith('.csv')]
for f in csvs:
lf = f.lower()
os.rename(f,lf)
print(lf)
content = ''
with open(lf,'r',encoding='cp1252') as fr:
content = fr.read()
content = '\n'.join([l.strip() for l in content.splitlines()])
with open(lf,'w',encoding='cp1252') as fw:
fw.write(content.upper())
| 19.366667
| 70
| 0.650602
| 89
| 581
| 4.247191
| 0.573034
| 0.058201
| 0.100529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017167
| 0.197935
| 581
| 29
| 71
| 20.034483
| 0.793991
| 0.327022
| 0
| 0
| 0
| 0
| 0.075916
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f528ea187b09ea162a161716dde9aff8b7b565d
| 1,042
|
py
|
Python
|
examples/chain_mdp.py
|
kngwyu/rlpy
|
329166de28d311d8f87358a62c38f40a7318fe07
|
[
"BSD-3-Clause"
] | 3
|
2019-12-07T13:34:02.000Z
|
2021-03-29T10:20:05.000Z
|
examples/chain_mdp.py
|
kngwyu/rlpy
|
329166de28d311d8f87358a62c38f40a7318fe07
|
[
"BSD-3-Clause"
] | 14
|
2019-09-29T03:09:09.000Z
|
2022-01-13T03:17:48.000Z
|
examples/chain_mdp.py
|
kngwyu/rlpy3
|
329166de28d311d8f87358a62c38f40a7318fe07
|
[
"BSD-3-Clause"
] | null | null | null |
import click
from rlpy.domains import ChainMDP
from rlpy.tools.cli import run_experiment
import methods
def select_domain(chain_size):
return ChainMDP(chain_size=chain_size)
def select_agent(name, domain, max_steps, seed, **kwargs):
if name is None or name == "lspi":
return methods.tabular_lspi(domain, max_steps)
elif name == "nac":
return methods.tabular_nac(domain)
elif name == "tabular-q":
return methods.tabular_q(domain, initial_learn_rate=0.1)
elif name == "ifddk-q":
return methods.ifddk_q(domain, initial_learn_rate=0.1)
elif name == "psrl":
return methods.tabular_psrl(domain, seed=seed)
else:
raise NotImplementedError("Method {} is not supported".format(name))
if __name__ == "__main__":
run_experiment(
select_domain,
select_agent,
default_max_steps=10000,
default_num_policy_checks=10,
default_checks_per_policy=50,
other_options=[click.Option(["--chain-size"], type=int, default=4)],
)
| 28.944444
| 76
| 0.681382
| 138
| 1,042
| 4.876812
| 0.442029
| 0.096582
| 0.118871
| 0.056464
| 0.098068
| 0.098068
| 0.098068
| 0.098068
| 0.098068
| 0
| 0
| 0.017032
| 0.211132
| 1,042
| 35
| 77
| 29.771429
| 0.801703
| 0
| 0
| 0
| 0
| 0
| 0.070058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0.035714
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f5495f299c3cac72ffba7fb46905bf9c811295d
| 694
|
py
|
Python
|
migrations/versions/6e2656ef034b_.py
|
haichungcn/h-ticketbox
|
37d3a3054a92fbb3702cac10f87621762b68bae2
|
[
"Apache-2.0"
] | null | null | null |
migrations/versions/6e2656ef034b_.py
|
haichungcn/h-ticketbox
|
37d3a3054a92fbb3702cac10f87621762b68bae2
|
[
"Apache-2.0"
] | 1
|
2021-06-02T00:42:03.000Z
|
2021-06-02T00:42:03.000Z
|
migrations/versions/6e2656ef034b_.py
|
haichungcn/h-ticketbox
|
37d3a3054a92fbb3702cac10f87621762b68bae2
|
[
"Apache-2.0"
] | null | null | null |
"""empty message
Revision ID: 6e2656ef034b
Revises: f8f949ce4522
Create Date: 2019-11-26 11:05:54.376467
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6e2656ef034b'
down_revision = 'f8f949ce4522'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('tickettypes_name_key', 'tickettypes', type_='unique')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('tickettypes_name_key', 'tickettypes', ['name'])
# ### end Alembic commands ###
| 23.931034
| 80
| 0.708934
| 82
| 694
| 5.865854
| 0.573171
| 0.056133
| 0.087318
| 0.095634
| 0.345114
| 0.182952
| 0.182952
| 0.182952
| 0
| 0
| 0
| 0.089655
| 0.164265
| 694
| 28
| 81
| 24.785714
| 0.739655
| 0.425072
| 0
| 0
| 0
| 0
| 0.265193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f54a8fce56dc2266fdcba4960db2d6b32f72f6a
| 1,940
|
py
|
Python
|
python/heterocl/mlir/context.py
|
chhzh123/heterocl
|
856e9b8ad877d11280a7e457e91ca89803c05570
|
[
"Apache-2.0"
] | null | null | null |
python/heterocl/mlir/context.py
|
chhzh123/heterocl
|
856e9b8ad877d11280a7e457e91ca89803c05570
|
[
"Apache-2.0"
] | null | null | null |
python/heterocl/mlir/context.py
|
chhzh123/heterocl
|
856e9b8ad877d11280a7e457e91ca89803c05570
|
[
"Apache-2.0"
] | null | null | null |
from contextvars import ContextVar
from hcl_mlir.dialects import hcl as hcl_d
from hcl_mlir.ir import *
ImperativeLoopNestCount = ContextVar("ImperativeLoopNestCount", default=1)
ImperativeLoopDepth = ContextVar("ImperativeLoopDepth", default=0)
StageName = ContextVar("StageName", default="")
NestedCompute = ContextVar("NestedCompute", default=0)
class UniqueName(object):
scalar_idx = 0
loop_idx = 0
tensor_idx = 0
stage_idx = 0
schedule_idx = 0
reduction_axis_idx = 0
def __init__(self):
pass
@classmethod
def get(cls, case="stage"):
if case == "stage":
# Imperative computing stage
name = "stage_" + str(cls.stage_idx)
cls.stage_idx += 1
elif case == "loop":
name = "loop_" + str(cls.loop_idx)
cls.loop_idx += 1
elif case == "scalar":
name = "scalar_" + str(cls.scalar_idx)
cls.scalar_idx += 1
elif case == "tensor":
name = "compute_" + str(cls.tensor_idx)
cls.tensor_idx += 1
elif case == "schedule":
name = "schedule_" + str(cls.schedule_idx)
cls.schedule_idx += 1
elif case == "reduction_axis":
name = "reduction_axis_" + str(cls.loop_idx)
cls.reduction_axis_idx += 1
else:
raise RuntimeError(f"Unrecognized case in get_unique_name: {case}")
return name
class GlobalContext(object):
def __init__(self):
self.ctx = None
self.loc = None
def get_context(self):
return self.ctx
def set_context(self):
self.ctx = Context()
hcl_d.register_dialect(self.ctx)
self.loc = Location.unknown(self.ctx)
def get_location(self):
return self.loc
global_ctx = GlobalContext()
get_context = global_ctx.get_context
set_context = global_ctx.set_context
get_location = global_ctx.get_location
| 27.714286
| 79
| 0.619588
| 232
| 1,940
| 4.939655
| 0.267241
| 0.020942
| 0.034904
| 0.052356
| 0.027923
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01073
| 0.279381
| 1,940
| 69
| 80
| 28.115942
| 0.809013
| 0.013402
| 0
| 0.036364
| 0
| 0
| 0.107741
| 0.012029
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109091
| false
| 0.018182
| 0.054545
| 0.036364
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f572ce92d4767535e92d6069a13c0b878ad4d2b
| 1,216
|
py
|
Python
|
378. Kth Smallest Element in a Sorted Matrix.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
378. Kth Smallest Element in a Sorted Matrix.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
378. Kth Smallest Element in a Sorted Matrix.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
from typing import List
import heapq
# 排序
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
res = sorted(sum(matrix,[]))
return res[k-1]
# 最小堆维护归并排序
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
n = len(matrix)
hpq = [(matrix[i][0], i, 0) for i in range(n)]
heapq.heapify(hpq)
for i in range(k-1):
num, x, y = heapq.heappop(hpq)
if y != n-1:
heapq.heappush(hpq, (matrix[x][y+1], x, y+1))
return heapq.heappop(hpq)[0]
# 二分法
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
def check(mid):
i, j = n-1, 0
num = 0
while i >= 0 and j < n:
if matrix[i][j] <= mid:
num += i + 1
j += 1
else:
i -= 1
return num >= k
n = len(matrix)
left, right = matrix[0][0], matrix[-1][-1]
while left<right:
mid = (left+right)//2
if check(mid):
right = mid
else:
left = mid+1
return left
| 27.022222
| 66
| 0.449836
| 162
| 1,216
| 3.376543
| 0.277778
| 0.071298
| 0.087751
| 0.14808
| 0.301645
| 0.301645
| 0.301645
| 0.301645
| 0.301645
| 0.301645
| 0
| 0.029371
| 0.412007
| 1,216
| 44
| 67
| 27.636364
| 0.735664
| 0.013158
| 0
| 0.27027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.054054
| 0
| 0.351351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f5774372518e14045e4add17d37c16fbf360cfe
| 10,289
|
py
|
Python
|
episim/model.py
|
jm-begon/episim
|
705f80b782c5653a0d8b6e53614f34c12917cb43
|
[
"BSD-3-Clause"
] | null | null | null |
episim/model.py
|
jm-begon/episim
|
705f80b782c5653a0d8b6e53614f34c12917cb43
|
[
"BSD-3-Clause"
] | null | null | null |
episim/model.py
|
jm-begon/episim
|
705f80b782c5653a0d8b6e53614f34c12917cb43
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import datetime
from collections import defaultdict
import numpy as np
from scipy import sparse
from episim.ontology import Ontology
from episim.plot.modeling import System, Accumulator
from .data import State
class EulerSimulator(object):
"""
Explicit Euler method
"""
def __init__(self, *dx_dt, step_size=1.):
self.step_size = step_size
self.dx_dt = dx_dt
self.N = len(dx_dt)
def __call__(self, *x, dt=1):
dx = np.zeros(self.N)
h = self.step_size
x = np.array(x)
n_steps_per_dt = int(1. / self.step_size)
for i in range(int(dt)):
for t in range(n_steps_per_dt):
for i, dxi_dt in enumerate(self.dx_dt):
dx[i] = dxi_dt(*x)
x = x + h * dx
yield x
class LinNonLinEulerSimulator(object):
"""
P : p
"""
def __init__(self, dx_dt_lin, dx_dt_dict, step_size=1.):
if hasattr(M, "tocsr"):
dx_dt_lin = dx_dt_lin.tocsr()
self.dx_dt_matrix = dx_dt_lin
self.dx_dt_dict = dx_dt_dict
self.N = len(dx_dt_lin)
self.step_size = step_size
def __call__(self, *x, dt=1):
dx = np.zeros(self.N)
x = np.array(x)
h = self.step_size
n_steps_per_dt = int(1. / self.step_size)
for i in range(int(dt)):
for t in range(n_steps_per_dt):
dx *= 0
# Linear part
dx[:] = self.dx_dt_matrix.dot(x)
# Non linear
for i, f in self.dx_dt_dict.items():
dx[i] += f(*x)
x = x + h * dx
yield x
class F(object):
def __init__(self, callable, label):
self.label = label
self.callable = callable
def __call__(self, *args, **kwargs):
return self.callable(*args, **kwargs)
def __str__(self):
return self.label
class Dynamic(object):
@classmethod
def from_nodes(cls, *node_and_time_deriv):
nodes = []
dx_dt = []
for node, dxi_dt in node_and_time_deriv:
nodes.append(node)
dx_dt.append(dxi_dt)
sorted_nodes = [x for x in nodes]
sorted_nodes.sort(key=lambda n: n.index)
names = [x.name for x in sorted_nodes]
dynamic = cls(*names)
for name, dxi_dt in zip(names, dx_dt):
dynamic[name] = dxi_dt
return dynamic
def __init__(self, *variable_names):
self.variable_names = variable_names
self.var2idx = {s: i for i, s in enumerate(variable_names)}
self.dx_dt = [F(lambda *x: 0, "0") for _ in range(len(variable_names))]
def _idx(self, key):
try:
idx = int(key)
except (TypeError, ValueError):
idx = self.var2idx[key]
return idx
def __setitem__(self, key, value):
self.dx_dt[self._idx(key)] = value
def __getitem__(self, item):
return self.dx_dt[self._idx(item)]
def long_repr(self):
s = ""
for idx, name in enumerate(self.variable_names):
s += "d{}/dt = {}{}".format(name, self.dx_dt[idx], os.linesep)
return s
def __iter__(self):
return iter(self.dx_dt)
class Model(object):
@classmethod
def compute_parameters(cls, virus, population):
return tuple()
@classmethod
def factory(cls, initial_state, virus, population, resolution=0.1):
t = cls.compute_parameters(virus, population)
model = cls(*t, resolution=resolution)
return model.set_state(initial_state)
def __init__(self, resolution=0.1):
self.current_state = None
self.resolution = resolution
self.ontology = Ontology.default_ontology()
def _compute_reproduction_number(self, n_susceptible, n_total):
return 0
def set_state(self, state):
queriable = self.ontology(state)
R = self._compute_reproduction_number(queriable.susceptible,
queriable.population)
state.reproduction_number = R
if state.n_infection is None:
state.n_infection = queriable.infected
self.current_state = state
return self
def _state2variables(self, state):
return tuple()
def _variables2state(self, date, *values):
return State(date)
def run(self, n_steps=1):
variables = self._state2variables(self.current_state)
date = self.current_state.date
plus_one = datetime.timedelta(days=1)
for variables in self.simulator(*variables, dt=n_steps):
date = date + plus_one
state = self._variables2state(date, *variables)
self.set_state(state)
yield state
class SEIRS(Model):
"""
beta: float
transmission coefficient: average number of contact per person per time,
multiplied by the probability of disease transmission at a contact
between a susceptible person and an infectious person
gamma: float
1/D, where D is the average time infectious time
ksi:
re-susceptibility rate (depends on the fraction of alive, recovered
people will not develop a lasting immunity and depends on the time
before the immunity drops)
"""
@classmethod
def compute_parameters(cls, virus, population):
beta = population.contact_frequency * virus.transmission_rate
kappa = 1. / virus.exposed_duration
gamma = 1. / virus.infectious_duration
ksi = virus.immunity_drop_rate
return beta, kappa, gamma, ksi
def __init__(self, beta=0, kappa=0, gamma=0, ksi=0, resolution=0.1):
if resolution is None:
resolution = EulerSimulator
super().__init__(resolution=resolution)
self.beta = beta
self.kappa = kappa
self.gamma = gamma
self.ksi = ksi
self.current_state = None
S, E, I, R = System.new("S", "E", "I", "R")
N = S + E + I + R
N.override_name("N")
S2E = self.beta * S * I / N
S2E_acc = Accumulator(S2E, self.resolution)
E2I = self.kappa * E
I2R = self.gamma * I
R2S = self.ksi * R
dS_dt = -S2E + R2S
dE_dt = S2E_acc - E2I
dI_dt = E2I - I2R
dR_dt = I2R - R2S
self.dynamic = Dynamic.from_nodes((S, dS_dt), (E, dE_dt),
(I, dI_dt), (R, dR_dt))
self.acc_n_infect = S2E_acc
self.simulator = EulerSimulator(*iter(self.dynamic),
step_size=resolution)
def __repr__(self):
s = "{}(beta={}, kappa={}, gamma={}, ksi={}, resolution={})".format(
self.__class__.__name__,
repr(self.beta),
repr(self.kappa),
repr(self.gamma),
repr(self.ksi),
repr(self.resolution),
)
if self.current_state is None:
return s
return s + ".set_state({})".format(repr(self.current_state))
def __str__(self):
return "{}(beta={:.2e}, kappa={:.2e}, gamma={:.2e}, ksi={:.2e})" \
"".format(self.__class__.__name__,
self.beta, self.kappa,
self.gamma, self.ksi)
# def __str__(self):
# return self.dynamic.long_repr()
def _compute_reproduction_number(self, n_susceptible, n_total):
return self.beta / self.gamma * n_susceptible / float(n_total)
def _state2variables(self, state):
zero = lambda x: 0 if x is None else x
S = zero(state.susceptible)
E = zero(state.exposed)
I = zero(state.infectious)
R = zero(state.recovered)
return S, E, I, R
def _variables2state(self, date, *values):
S, E, I, R = values
n_infection = self.current_state.n_infection
n_infection += self.acc_n_infect.value
self.acc_n_infect.reset()
state = State(date)
state.susceptible = S
state.exposed = E
state.infectious = I
state.recovered = R
state.n_infection = n_infection
return state
class SIR(Model):
@classmethod
def compute_parameters(cls, virus, population):
beta = population.contact_frequency * virus.transmission_rate
gamma = 1. / (virus.exposed_duration + virus.infectious_duration)
return beta, gamma
def __init__(self, beta, gamma, resolution=0.1):
super().__init__(resolution)
self.beta = beta
self.gamma = gamma
S, I, R = System.new("S", "I", "R")
N = S + I + R
N.override_name("N")
S2I = self.beta * S * I / N
I2R = self.gamma * I
dS_dt = -S2I
dI_dt = S2I - I2R
dR_dt = I2R
self.dynamic = Dynamic.from_nodes((S, dS_dt), (I, dI_dt), (R, dR_dt))
self.simulator = EulerSimulator(iter(self.dynamic), resolution)
def __repr__(self):
s = "{}(beta={}, gamma={}, resolution={})".format(
self.__class__.__name__,
repr(self.beta),
repr(self.gamma),
repr(self.resolution),
)
if self.current_state is None:
return s
return s + ".set_state({})".format(repr(self.current_state))
def __str__(self):
return "{}(beta={:.2e}, gamma={:.2e})" \
"".format(self.__class__.__name__,
self.beta, self.gamma)
def _compute_reproduction_number(self, n_susceptible, n_total):
return self.beta / self.gamma * n_susceptible / float(n_total)
def _state2variables(self, state):
zero = lambda x: 0 if x is None else x
S = zero(state.susceptible)
I = zero(state.infectious)
R = zero(state.recovered)
return S, I, R
def _variables2state(self, date, *values):
S, I, R = values
n_infection = self.current_state.n_infection
n_infection += (self.current_state.susceptible - S)
state = State(date)
state.susceptible = S
state.infectious = I
state.recovered = R
state.n_infection = n_infection
return state
| 26.180662
| 80
| 0.57197
| 1,297
| 10,289
| 4.298381
| 0.157286
| 0.01722
| 0.018655
| 0.003587
| 0.4287
| 0.357309
| 0.327892
| 0.307085
| 0.259731
| 0.259731
| 0
| 0.009597
| 0.321508
| 10,289
| 392
| 81
| 26.247449
| 0.788999
| 0.055593
| 0
| 0.388
| 0
| 0
| 0.023884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144
| false
| 0
| 0.032
| 0.048
| 0.312
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f5af941019a09b58bc8c7a46b832a62890985af
| 2,446
|
py
|
Python
|
db/schema.py
|
aatrubilin/sqlalchemy_sessions
|
8f99c3bf42da7224bbb6622ab23222ee1ebf1627
|
[
"MIT"
] | null | null | null |
db/schema.py
|
aatrubilin/sqlalchemy_sessions
|
8f99c3bf42da7224bbb6622ab23222ee1ebf1627
|
[
"MIT"
] | null | null | null |
db/schema.py
|
aatrubilin/sqlalchemy_sessions
|
8f99c3bf42da7224bbb6622ab23222ee1ebf1627
|
[
"MIT"
] | null | null | null |
import logging
from datetime import datetime
import sqlalchemy as sa
import sqlalchemy.orm as so
from .base import Base, Session
__all__ = ["User", "Message"]
logger = logging.getLogger(__name__)
class User(Base):
__tablename__ = "users"
id = sa.Column(sa.Integer, primary_key=True)
nickname = sa.Column(sa.String, unique=True)
first_name = sa.Column(sa.String, nullable=True)
last_name = sa.Column(sa.String, nullable=True)
utc_created_at = sa.Column(sa.DateTime, default=datetime.utcnow)
messages = so.relationship("Message", lazy='dynamic')
query = Session.query_property()
def __init__(self, nickname, first_name=None, last_name=None):
self.nickname = nickname
self.first_name = first_name
self.last_name = last_name
def __repr__(self):
return "<User({s.id!r}, {s.nickname!r})>".format(s=self)
def __str__(self):
full_name = ""
if self.first_name:
full_name += self.first_name
if self.last_name:
if full_name:
full_name += " "
full_name += self.last_name
return full_name or self.nickname
@classmethod
def get_or_create(cls, nickname, **kwargs):
user = cls.query.filter(cls.nickname == nickname).one_or_none()
if user is None:
user = cls(nickname, **kwargs)
Session.add(user)
Session.flush()
logger.info("Created %r", user)
else:
logger.debug("Got %r", user)
return user
def create_message(self, text):
return Message.create(self.id, str(text))
class Message(Base):
__tablename__ = "messages"
id = sa.Column(sa.Integer, primary_key=True)
user_id = sa.Column(sa.Integer, sa.ForeignKey(User.id, ondelete="CASCADE"), nullable=False)
text = sa.Column(sa.String, default=str)
utc_created_at = sa.Column(sa.DateTime, default=datetime.utcnow)
query = Session.query_property()
def __init__(self, user_id, text):
self.user_id = user_id
self.text = text
def __repr__(self):
return "<Message({s.id!r}, {s.user_id!r}, {s.text!r})>".format(s=self)
def __str__(self):
return self.text
@classmethod
def create(cls, user_id, text):
message = cls(user_id, text)
Session.add(message)
Session.flush()
logger.info("Created %r", message)
return message
| 27.795455
| 95
| 0.629191
| 320
| 2,446
| 4.565625
| 0.234375
| 0.049281
| 0.061602
| 0.043806
| 0.292266
| 0.279261
| 0.238193
| 0.11499
| 0.069815
| 0.069815
| 0
| 0
| 0.248978
| 2,446
| 87
| 96
| 28.114943
| 0.795318
| 0
| 0
| 0.215385
| 0
| 0.015385
| 0.061325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138462
| false
| 0
| 0.076923
| 0.061538
| 0.569231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f5e5337671f2aa26669d1f985e1feb6f9bb2487
| 3,075
|
py
|
Python
|
app/eventFrameTemplates/forms.py
|
DeschutesBrewery/brewerypi
|
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
|
[
"MIT"
] | 27
|
2017-11-27T05:01:05.000Z
|
2020-11-14T19:52:26.000Z
|
app/eventFrameTemplates/forms.py
|
DeschutesBrewery/brewerypi
|
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
|
[
"MIT"
] | 259
|
2017-11-23T00:43:26.000Z
|
2020-11-03T01:07:30.000Z
|
app/eventFrameTemplates/forms.py
|
DeschutesBrewery/brewerypi
|
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
|
[
"MIT"
] | 8
|
2018-10-29T04:39:29.000Z
|
2020-10-01T22:18:12.000Z
|
from flask_wtf import FlaskForm
from wtforms import HiddenField, IntegerField, SelectField, StringField, SubmitField, ValidationError
from wtforms.validators import Length, Required
from .. models import EventFrameTemplate
class CopyEventFrameTemplateForm(FlaskForm):
name = StringField("Name", validators = [Required(), Length(1, 45)])
description = StringField("Description", validators = [Length(0, 255)])
toElementTemplate = SelectField("To Element Template", validators = [Required()], coerce = int)
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
eventFrameTemplate = EventFrameTemplate.query.filter_by(ElementTemplateId = self.toElementTemplate.data, Name = field.data).first()
if eventFrameTemplate is not None:
# Trying to copy an eventFrameTemplate using a name that already exists.
validationError = True
if validationError:
raise ValidationError('The name "{}" already exists.'.format(field.data))
class EventFrameTemplateForm(FlaskForm):
parentEventFrameTemplateId = HiddenField()
name = StringField("Name", validators = [Required(), Length(1, 45)])
order = IntegerField("Order", validators = [Required()])
description = StringField("Description", validators = [Length(0, 255)])
eventFrameTemplateId = HiddenField()
elementTemplateId = HiddenField()
parentEventFrameTemplateId = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
if self.elementTemplateId.data == "":
eventFrameTemplate = EventFrameTemplate.query.filter_by(Name = field.data,
ParentEventFrameTemplateId = self.parentEventFrameTemplateId.data).first()
else:
eventFrameTemplate = EventFrameTemplate.query.filter_by(ElementTemplateId = self.elementTemplateId.data, Name = field.data).first()
if eventFrameTemplate:
if self.eventFrameTemplateId.data == "":
# Trying to add a new eventFrameTemplate using a name that already exists.
validationError = True
else:
if int(self.eventFrameTemplateId.data) != eventFrameTemplate.EventFrameTemplateId:
# Trying to change the name of an eventFrameTemplate to a name that already exists.
validationError = True
if validationError:
raise ValidationError('The name "{}" already exists.'.format(field.data))
def validate_order(self, field):
validationError = False
eventFrameTemplate = EventFrameTemplate.query.filter_by(Order = field.data, ParentEventFrameTemplateId = self.parentEventFrameTemplateId.data).first()
if eventFrameTemplate:
if self.eventFrameTemplateId.data == "":
# Trying to add a new eventFrameTemplate using an order that already exists.
validationError = True
else:
if int(self.eventFrameTemplateId.data) != eventFrameTemplate.EventFrameTemplateId:
# Trying to change the order of an eventFrameTemplate to an order that already exists.
validationError = True
if validationError:
raise ValidationError('The order "{}" already exists.'.format(field.data))
| 45.220588
| 152
| 0.766504
| 309
| 3,075
| 7.601942
| 0.226537
| 0.044274
| 0.036186
| 0.068114
| 0.707961
| 0.675181
| 0.675181
| 0.502767
| 0.463602
| 0.397616
| 0
| 0.005285
| 0.138537
| 3,075
| 67
| 153
| 45.895522
| 0.881465
| 0.125203
| 0
| 0.641509
| 0
| 0
| 0.055887
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.075472
| 0
| 0.433962
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f605b59e4b42a83b06301dd95460d66a85a140f
| 3,751
|
py
|
Python
|
flask_demo.py
|
tlinc/cyber-ng-18
|
40dd088b5785e75e59afded17f71ea50d64ae77f
|
[
"MIT"
] | null | null | null |
flask_demo.py
|
tlinc/cyber-ng-18
|
40dd088b5785e75e59afded17f71ea50d64ae77f
|
[
"MIT"
] | null | null | null |
flask_demo.py
|
tlinc/cyber-ng-18
|
40dd088b5785e75e59afded17f71ea50d64ae77f
|
[
"MIT"
] | null | null | null |
import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.backends import default_backend
from stegano import lsb
from flask import Flask, render_template, request, redirect, url_for
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = '/home/pi/Destktop/StegyCat/pics'
app = Flask(__name__, template_folder='templates')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def stego_in(ct, mac, nonce, picture):
secret_message = {'msg': ct, 'nc': nonce, 'mc': mac}
secret_message = str(secret_message)
secret_image = lsb.hide('./pics/cat.png', secret_message)
secret_image.save('./secretpics/secret_image.png')
#print(var)
def stego_out(picture):
hidden_ct = lsb.reveal(picture)
#Parse here
dt = eval(hidden_ct)
message = dt['msg']
nonce = dt['nc']
mac = dt['mc']
return message, nonce, mac
def decrypt(message, nonce, mac):
f = open("key.txt", "r")
string = f.read()
dict = eval(string)
key = dict['key']
#ctlength = len(hidden_ct)
#nonce = hidden_ct[ctlength:]
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), backend=backend)
decryptor = cipher.decryptor()
msg = decryptor.update(message) + decryptor.finalize()
print(msg)
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(msg)
cmpmac = digest.finalize()
if mac != cmpmac:
return 0
else:
return msg
def encrypt(msg, email):
backend = default_backend()
# Salts should be randomly generated
salt = os.urandom(16)
nonce = os.urandom(16)
# derive
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=backend
)
key = kdf.derive(email.encode('UTF-8'))
dict = {'key': key}
f = open("key.txt" ,"w")
f.write(str(dict))
# verify
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=backend
)
#kdf.verify(b"tim@gmail.com", key)
cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), backend=backend)
encryptor = cipher.encryptor()
ct = encryptor.update(msg.encode('UTF-8')) + encryptor.finalize()
#newct = ct + nonce
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(msg.encode('UTF-8'))
mac = digest.finalize()
return ct, mac, nonce
@app.route('/')
def index():
return render_template('create.html')
@app.route('/get-info', methods=['POST', 'GET'])
def get_info():
if request.method == 'POST':
result = request.form
picture = result.getlist('file')
msg = result.get('message')
email = result.get('email')
#write key(email) to file
msg, mac, nonce = encrypt(msg, email)
stego_in(msg, mac, nonce, picture)
#redirect(url_for('encrypt', msg=msg, email=email))
return render_template("decrypt.html")
@app.route('/get_decrypt', methods=['POST', 'GET'])
def get_decrypt():
if request.method == 'POST':
# picture = request.form['file']
# filename = secure_filename(file.filename)
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
message, nonce, mac = stego_out('./secretpics/secret_image.png')
#get key from file
pt = decrypt(message, nonce, mac)
return render_template("display.html", message = pt)
#read key from file
if __name__ == '__main__':
app.run(debug=True)
| 27.379562
| 76
| 0.643295
| 467
| 3,751
| 5.062099
| 0.30621
| 0.029611
| 0.037225
| 0.040609
| 0.197547
| 0.168359
| 0.168359
| 0.168359
| 0.168359
| 0.168359
| 0
| 0.013633
| 0.217809
| 3,751
| 136
| 77
| 27.580882
| 0.792093
| 0.111704
| 0
| 0.222222
| 0
| 0
| 0.084163
| 0.026848
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077778
| false
| 0
| 0.088889
| 0.011111
| 0.244444
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f61bacc0966d711145c05f1a6526934fd3ce1d0
| 1,585
|
py
|
Python
|
ex0095.py
|
EwertonRosendo/PastaDeExercicios
|
68d23194b87ce1c8405c70fcceb3378955815d7d
|
[
"MIT"
] | null | null | null |
ex0095.py
|
EwertonRosendo/PastaDeExercicios
|
68d23194b87ce1c8405c70fcceb3378955815d7d
|
[
"MIT"
] | null | null | null |
ex0095.py
|
EwertonRosendo/PastaDeExercicios
|
68d23194b87ce1c8405c70fcceb3378955815d7d
|
[
"MIT"
] | null | null | null |
jogador = dict()
lista_de_jogadores = []
lista = []
print("_"*38)
contador = 0
while True:
jogador["nome"] = str(input("Informe o nome do jogador: ")).strip()
jogador["partidas"] = int(input("Informe quantas partidas foram jogadas: "))
jogador["gols marcados"] = []
for c in range(0, jogador["partidas"]):
jogador["gols marcados"].append((int(input("Partida {}: ".format(c)))))
lista.append(jogador.copy())
lista_de_jogadores.append(lista[:])
lista.clear()
print("=-" * 20)
print("Ultimo jogador cadastrado:")
for k, v in jogador.items():
print(f"{k}: {v}")
jogador.clear()
print("=-"*20), print()
print(lista_de_jogadores), print()
print("=-" * 20)
continuar = str(input("Deseja continuar? [S/N]")).strip().upper()
while continuar not in "S N NAO SIM NÃO":
continuar = str(input("Informe um valor valido[S/N]: ")).upper().strip()
if continuar in "NAO N NÃO":
break
for cod, j in enumerate(lista_de_jogadores):
print("{} ---- {}".format(cod, j))
while True:
contador = int(input("Mostrar dados de qual jogador[999 PARA PARAR]? "))
if contador == 999:
break
print(f"-- LEVANTAMENTO DO JOGADOR {lista_de_jogadores[contador][0]['nome']}:")
while contador > (len(lista_de_jogadores)-1) or contador < 0:
contador = int(input("Informe um valor válido: "))
for p, g in enumerate(lista_de_jogadores[contador][0]['gols marcados']):
print("No jogo {:>3} fez {:>3} gols".format(p, g))
# print(lista_de_jogadores[contador][0]['gols marcados'])
| 40.641026
| 83
| 0.620189
| 212
| 1,585
| 4.556604
| 0.353774
| 0.057971
| 0.132505
| 0.074534
| 0.141822
| 0.076605
| 0.076605
| 0
| 0
| 0
| 0
| 0.018053
| 0.196215
| 1,585
| 39
| 84
| 40.641026
| 0.740188
| 0.0347
| 0
| 0.157895
| 0
| 0
| 0.284872
| 0.027505
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.263158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f634bdb1c7a7c3154dda573b13beb16dfe4e289
| 8,568
|
py
|
Python
|
slide/models.py
|
AICAN-Research/learn-pathology
|
663f9c5f125857badf5bb41b6bfa2d9100578e2e
|
[
"MIT"
] | 2
|
2021-09-16T08:38:10.000Z
|
2021-09-16T10:46:53.000Z
|
slide/models.py
|
AICAN-Research/learn-pathology
|
663f9c5f125857badf5bb41b6bfa2d9100578e2e
|
[
"MIT"
] | 6
|
2021-09-20T10:56:21.000Z
|
2022-01-05T08:25:17.000Z
|
slide/models.py
|
AICAN-Research/learn-pathology
|
663f9c5f125857badf5bb41b6bfa2d9100578e2e
|
[
"MIT"
] | null | null | null |
import threading
from io import BytesIO
from django.db import models
import fast
import time
import numpy as np
from PIL import Image
from django.conf import settings
from slide.timing import Timer
from tag.models import Tag
class Slide(models.Model):
"""
Model for whole slide image
"""
name = models.CharField(max_length=255)
path = models.CharField(max_length=1024)
description = models.TextField()
pathology = models.BooleanField(default=False, help_text='Does the slide show pathology or not')
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.name
def load_image(self):
if not hasattr(self, '_image'):
self.timers = {
'import': Timer('Importing WSI'),
'getPatchImage': Timer('getPatchImage function'),
'sharpening': Timer('Tile sharpening'),
'conversion': Timer('Tile FAST->PIL conversion'),
'resize': Timer('Tile resize'),
'jpeg': Timer('JPEG Conversion'),
}
self.timers['import'].start()
importer = fast.WholeSlideImageImporter.create(self.path)
try:
image = importer.runAndGetOutputData()
except:
raise RuntimeError('Failed to load slide image pyramid from ' + self.path)
self._image = image
self.timers['import'].stop()
# Count how many OSD levels we need: OSD requires that every level is downsampled by a factor of 2
# TODO This assumes that every level size of WSI in FAST is a multiple of 2
current_width = image.getFullWidth()
current_height = image.getFullHeight()
levels = image.getNrOfLevels()
smallest_width = image.getLevelWidth(levels-1)
smallest_height = image.getLevelHeight(levels-1)
osd_level = 0
tile_width = 256
tile_height = 256
if self.path.endswith('.vsi'): # TODO Hack for now
tile_width = image.getLevelTileWidth(0)
tile_height = image.getLevelTileHeight(0)
osd_tile_width = {0: tile_width}
osd_tile_height = {0: tile_height}
osd_to_fast_level_map = {0: 0}
print('Smallest width', smallest_width)
while abs(current_width - smallest_width/2) > 1:
print(osd_level, current_width, current_height)
current_width = int(current_width/2)
current_height = int(current_height/2)
if self.path.endswith('.vsi'): # TODO Hack for now
current_width += current_width % tile_width
current_height += current_height % tile_height
osd_level += 1
# If current_width is closer to previous FAST level width, than the next FAST level width, then use that.
if osd_to_fast_level_map[osd_level-1] < levels-1 and abs(current_width - image.getLevelWidth(osd_to_fast_level_map[osd_level-1]+1)) < 1:
osd_tile_width[osd_level] = tile_width
osd_tile_height[osd_level] = tile_height
osd_to_fast_level_map[osd_level] = osd_to_fast_level_map[osd_level - 1] + 1
print('Map to next: ', osd_to_fast_level_map[osd_level])
else:
osd_tile_width[osd_level] = osd_tile_width[osd_level-1]*2
osd_tile_height[osd_level] = osd_tile_height[osd_level-1]*2
osd_to_fast_level_map[osd_level] = osd_to_fast_level_map[osd_level - 1]
print('Map to previous', osd_to_fast_level_map[osd_level])
if current_width < 1024:
break
print('Total OSD levels', osd_level+1)
self._fast_levels = image.getNrOfLevels()
self._osd_levels = osd_level+1
self._width = image.getFullWidth()
self._height = image.getFullHeight()
self._tile_width = tile_width
self._tile_height = tile_height
self._osd_tile_width = osd_tile_width
self._osd_tile_height = osd_tile_height
self._osd_to_fast_level = osd_to_fast_level_map
@property
def image(self):
self.load_image()
return self._image
@property
def width(self):
self.load_image()
return self._width
@property
def height(self):
self.load_image()
return self._height
@property
def osd_levels(self):
self.load_image()
return self._osd_levels
@property
def tile_width(self):
self.load_image()
return self._tile_width
@property
def tile_height(self):
self.load_image()
return self._tile_height
def get_fast_level(self, osd_level):
"""
Get FAST image pyramid level from OSD level
"""
self.load_image()
return self._osd_to_fast_level[osd_level]
def get_osd_tile_size(self, osd_level):
self.load_image()
return self._osd_tile_width[osd_level], self._osd_tile_height[osd_level]
def get_fast_tile_size(self):
self.load_image()
return self._tile_width, self._tile_height
def get_osd_tile_as_buffer(self, osd_level, x, y):
fast_level = self.get_fast_level(osd_level)
width, height = self.get_osd_tile_size(osd_level)
access = self._image.getAccess(fast.ACCESS_READ)
tile_width = width
tile_height = height
if x*width + tile_width >= self._image.getLevelWidth(fast_level):
tile_width = self._image.getLevelWidth(fast_level) - x*width - 1
if y*height + tile_height >= self._image.getLevelHeight(fast_level):
tile_height = self._image.getLevelHeight(fast_level) - y*height - 1
self.timers['getPatchImage'].start()
image = access.getPatchAsImage(fast_level, x*width, y*height, tile_width, tile_height)
self.timers['getPatchImage'].stop()
self.timers['sharpening'].start()
sharpening = fast.ImageSharpening.create(1.5).connect(image)
image = sharpening.runAndGetOutputData()
self.timers['sharpening'].stop()
#tileAccess = image.getImageAccess(fast.ACCESS_READ)
#return Image.frombytes(size=(tile_width, tile_height), data=tileAccess.get(), mode='RGB')
# TODO get rid of asarray conversion, and read directly from bytes instead somehow
self.timers['conversion'].start()
image = np.asarray(image)
tile = Image.fromarray(image, mode='RGB')
self.timers['conversion'].stop()
if tile.width != self._tile_width: # TODO What about edges cases here.
self.timers['resize'].start()
tile.thumbnail((self._tile_height, self._tile_width), resample=Image.BICUBIC)
self.timers['resize'].stop()
# Convert PIL image to JPEG byte buffer and send back
self.timers['jpeg'].start()
buffer = BytesIO()
tile.save(buffer, 'jpeg', quality=75) # TODO Set quality
self.timers['jpeg'].stop()
if settings.PRINT_RUNTIME:
print('Runtimes')
print('==============================')
for timer in self.timers.values():
timer.print()
return buffer
class AnnotatedSlide(models.Model):
"""
Model for an annotated slide.
A slide can have multiple annotations.
A task uses an annotated slide.
"""
slide = models.ForeignKey(Slide, on_delete=models.CASCADE)
def get_html(self):
"""
Get HTML for all annotations
"""
html = ''
for pointer in Pointer.objects.filter(annotated_slide=self):
html += f'<div id="pointer-{pointer.id}" class="overlay"> {pointer.text} →</div>'
return html
def get_js(self):
"""
Get JS for all annotations
"""
js = ''
for pointer in Pointer.objects.filter(annotated_slide=self):
js += f"{{id: 'pointer-{pointer.id}', x: {pointer.position_x}, y: {pointer.position_y}, placement: 'RIGHT', checkResize: false }},"
return js
class Pointer(models.Model):
"""
A pointer on a slide consisting of a position (x,y) and a text
"""
annotated_slide = models.ForeignKey(AnnotatedSlide, on_delete=models.CASCADE)
position_x = models.FloatField()
position_y = models.FloatField()
text = models.CharField(max_length=256)
| 38.25
| 152
| 0.615079
| 1,043
| 8,568
| 4.819751
| 0.198466
| 0.044559
| 0.021484
| 0.03342
| 0.256018
| 0.193754
| 0.163915
| 0.092699
| 0.059678
| 0.02029
| 0
| 0.009794
| 0.285014
| 8,568
| 223
| 153
| 38.421525
| 0.810806
| 0.108193
| 0
| 0.114458
| 0
| 0.012048
| 0.085109
| 0.015983
| 0
| 0
| 0
| 0.008969
| 0
| 1
| 0.084337
| false
| 0
| 0.090361
| 0.006024
| 0.331325
| 0.048193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f641a14add400abd8e0ed7c75835db3c0d6d277
| 742
|
py
|
Python
|
xpresso/_utils/endpoint_dependant.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 75
|
2022-01-18T02:17:57.000Z
|
2022-03-24T02:30:04.000Z
|
xpresso/_utils/endpoint_dependant.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 73
|
2022-01-18T03:01:27.000Z
|
2022-03-27T16:41:38.000Z
|
xpresso/_utils/endpoint_dependant.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 3
|
2022-01-18T22:47:06.000Z
|
2022-01-25T02:03:53.000Z
|
from __future__ import annotations
import typing
from di.api.providers import CallableProvider, CoroutineProvider
from di.dependant import Dependant
from xpresso.dependencies._dependencies import Depends, DependsMarker
Endpoint = typing.Union[CallableProvider[typing.Any], CoroutineProvider[typing.Any]]
class EndpointDependant(Dependant[typing.Any]):
def __init__(
self,
endpoint: Endpoint,
sync_to_thread: bool = False,
) -> None:
super().__init__(
call=endpoint,
scope="endpoint",
use_cache=False,
wire=True,
sync_to_thread=sync_to_thread,
)
def get_default_marker(self) -> DependsMarker[None]:
return Depends()
| 25.586207
| 84
| 0.677898
| 76
| 742
| 6.328947
| 0.526316
| 0.056133
| 0.074844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.239892
| 742
| 28
| 85
| 26.5
| 0.852837
| 0
| 0
| 0
| 0
| 0
| 0.010782
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.238095
| 0.047619
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f64ad352e9b9691d83fdce5ed744e84a89c5372
| 13,330
|
py
|
Python
|
create_pretraining_data_lm.py
|
twilightdema/ALBERT_Thai
|
2c5612237a6843c4949dd941dbcd01ca91f82f2b
|
[
"Apache-2.0"
] | null | null | null |
create_pretraining_data_lm.py
|
twilightdema/ALBERT_Thai
|
2c5612237a6843c4949dd941dbcd01ca91f82f2b
|
[
"Apache-2.0"
] | 4
|
2020-09-25T22:35:29.000Z
|
2022-02-09T23:37:24.000Z
|
create_pretraining_data_lm.py
|
twilightdema/ALBERT_Thai
|
2c5612237a6843c4949dd941dbcd01ca91f82f2b
|
[
"Apache-2.0"
] | 1
|
2020-10-17T01:36:03.000Z
|
2020-10-17T01:36:03.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
# coding=utf-8
"""Create Language Model TF examples for ALBERT (Decoder-Only)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import tokenization
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None,
"Input raw text file (or comma-separated list of files).")
flags.DEFINE_string(
"output_file", None,
"Output TF example file (or comma-separated list of files).")
flags.DEFINE_string(
"vocab_file", None,
"The vocabulary file that the ALBERT model was trained on.")
flags.DEFINE_string("spm_model_file", None,
"The model file for sentence piece tokenization.")
flags.DEFINE_string("input_file_mode", "r",
"The data format of the input file.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"do_whole_word_mask", True,
"Whether to use whole word masking rather than per-WordPiece masking.")
flags.DEFINE_integer("max_seq_length", 256, "Maximum sequence length.")
flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
flags.DEFINE_float(
"short_seq_prob", 0.1,
"Probability of creating sequences which are shorter than the "
"maximum length.")
class LMTrainingInstance(object):
"""A single training instance."""
def __init__(self, tokens, token_boundary):
self.tokens = tokens
self.token_boundary = token_boundary
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "token_boundary: %s\n" % (" ".join(
[str(x) for x in self.token_boundary]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def write_instance_to_example_files(instances, tokenizer, max_seq_length,
output_files):
"""Create TF example files from `LMTrainingInstance`s."""
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
print('Saving instance ' + str(inst_index))
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
# For LM, input mask is 2D Array with Transformer Decoder masking style.
# In order to save space, we will expand the data to 2D when feeding to model.
# Here we just need to store ID of sequence so we can reconstruct the 2D map corresponding to the sequence later,
input_mask = [1] * len(input_ids)
token_boundary = list(instance.token_boundary)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
token_boundary.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(input_ids)
features["input_mask"] = create_int_feature(input_mask)
features["token_boundary"] = create_int_feature(token_boundary)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info(
"%s: %s" % (feature_name, " ".join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info("Wrote %d total instances", total_written)
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
def create_training_instances(input_files, tokenizer, max_seq_length,
short_seq_prob, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
line_num = 0
with tf.gfile.GFile(input_file, FLAGS.input_file_mode) as reader:
while True:
print('Reading line ' + str(line_num))
line = reader.readline()
if not FLAGS.spm_model_file:
line = tokenization.convert_to_unicode(line)
if not line:
break
if FLAGS.spm_model_file:
line = tokenization.preprocess_text(line, lower=FLAGS.do_lower_case)
else:
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
line_num = line_num + 1
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
print('all_documents length = ' + str(len(all_documents)))
vocab_words = list(tokenizer.vocab.keys())
instances = []
for document_index in range(len(all_documents)):
print('Creating instance for doc ' + str(document_index))
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP]
# Note than in LM, [CLS] is at the end of string (because attention constraint)
max_num_tokens = max_seq_length - 2
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# In LM, we only have tokens_a
tokens_a = []
for j in range(len(current_chunk)):
tokens_a.extend(current_chunk[j])
truncate_seq(tokens_a, max_num_tokens, rng)
assert len(tokens_a) >= 1
tokens = []
for token in tokens_a:
tokens.append(token)
tokens.append("[SEP]")
tokens.append("[CLS]")
(tokens, token_boundary) = create_lm_predictions(
tokens, vocab_words, rng)
instance = LMTrainingInstance(
tokens=tokens,
token_boundary=token_boundary,
)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
def _is_start_piece_sp(piece):
"""Check if the current word piece is the starting piece (sentence piece)."""
special_pieces = set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~'))
special_pieces.add(u"€".encode("utf-8"))
special_pieces.add(u"£".encode("utf-8"))
# Note(mingdachen):
# For foreign characters, we always treat them as a whole piece.
english_chars = set(list("abcdefghijklmnopqrstuvwxyz"))
if (six.ensure_str(piece).startswith("▁") or
six.ensure_str(piece).startswith("<") or piece in special_pieces or
not all([str(i).lower() in english_chars.union(special_pieces)
for i in piece])):
return True
else:
return False
def _is_start_piece_bert(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not six.ensure_str(piece).startswith("##")
def is_start_piece(piece):
if FLAGS.spm_model_file:
return _is_start_piece_sp(piece)
else:
return _is_start_piece_bert(piece)
def create_lm_predictions(tokens, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
# Note(mingdachen): We create a list for recording if the piece is
# the starting piece of current token, where 1 means true, so that
# on-the-fly whole word masking is possible.
token_boundary = [0] * len(tokens)
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if (FLAGS.do_whole_word_mask and not is_start_piece(token)):
pass
else:
if is_start_piece(token):
token_boundary[i] = 1
output_tokens = list(tokens)
return (output_tokens, token_boundary)
def truncate_seq(tokens_a, max_num_tokens, rng):
"""Truncates a sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
print('Create tokenizer')
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case,
spm_model_file=FLAGS.spm_model_file)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
print('Start reading input files')
tf.logging.info("*** Reading from input files ***")
for input_file in input_files:
tf.logging.info(" %s", input_file)
rng = random.Random(FLAGS.random_seed)
instances = create_training_instances(
input_files, tokenizer, FLAGS.max_seq_length,
FLAGS.short_seq_prob,
rng)
print('Number of instance = ' + str(len(instances)))
tf.logging.info("number of instances: %i", len(instances))
output_files = FLAGS.output_file.split(",")
tf.logging.info("*** Writing to output files ***")
for output_file in output_files:
tf.logging.info(" %s", output_file)
print('Writing output files')
write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length, output_files)
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("vocab_file")
tf.app.run()
| 33.076923
| 117
| 0.692798
| 1,866
| 13,330
| 4.755627
| 0.229904
| 0.018256
| 0.018932
| 0.003944
| 0.208249
| 0.146833
| 0.091053
| 0.057471
| 0.04192
| 0.04192
| 0
| 0.006722
| 0.207577
| 13,330
| 402
| 118
| 33.159204
| 0.833097
| 0.25994
| 0
| 0.14
| 0
| 0
| 0.122476
| 0.005842
| 0
| 0
| 0
| 0
| 0.02
| 1
| 0.056
| false
| 0.004
| 0.044
| 0.004
| 0.152
| 0.044
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f6706c7305503eebcfb4dc0e941eec4fd99c3fd
| 3,260
|
py
|
Python
|
src/libcore/tests/test_qmc.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 7
|
2020-07-24T03:19:59.000Z
|
2022-03-30T10:56:12.000Z
|
src/libcore/tests/test_qmc.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 1
|
2021-04-07T22:30:23.000Z
|
2021-04-08T00:55:36.000Z
|
src/libcore/tests/test_qmc.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 2
|
2020-06-08T08:25:09.000Z
|
2021-04-05T22:13:08.000Z
|
import enoki as ek
import pytest
import mitsuba
def r_inv(divisor, index):
factor = 1
value = 0
recip = 1.0 / divisor
while index != 0:
next_val = index // divisor
factor *= recip
value = value * divisor + index - next_val * divisor
index = next_val
return value * factor
def gen_primes():
# http://code.activestate.com/recipes/117119/
D = {}
q = 2
while True:
if q not in D:
yield q
D[q * q] = [q]
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
def test01_radical_inverse(variant_scalar_rgb):
from mitsuba.core import RadicalInverse
v = RadicalInverse()
assert(v.eval(0, 0) == 0)
assert(v.eval(0, 1) == 0.5)
assert(v.eval(0, 2) == 0.25)
assert(v.eval(0, 3) == 0.75)
for index, prime in enumerate(gen_primes()):
if index >= 1024:
break
for i in range(10):
assert ek.abs(r_inv(prime, i) - v.eval(index, i)) < 1e-7
@pytest.mark.skip(reason="RadicalInverse has no vectorized bindings")
def test02_radical_inverse_vectorized(variant_scalar_rgb):
from mitsuba.core import RadicalInverse
v = RadicalInverse()
for index, prime in enumerate(gen_primes()):
if index >= 1024:
break
result = v.eval(index, ek.arange(10, dtype=ek.uint64))
for i in range(len(result)):
assert ek.abs(r_inv(prime, i) - result[i]) < 1e-7
def test03_faure_permutations(variant_scalar_rgb):
from mitsuba.core import RadicalInverse
p = RadicalInverse()
assert (p.permutation(0) == [0, 1]).all()
assert (p.permutation(1) == [0, 1, 2]).all()
assert (p.permutation(2) == [0, 3, 2, 1, 4]).all()
assert (p.permutation(3) == [0, 2, 5, 3, 1, 4, 6]).all()
def test04_scrambled_radical_inverse(variant_scalar_rgb):
from mitsuba.core import RadicalInverse
from mitsuba.core import math
p = RadicalInverse(10, -1)
assert (p.permutation(0) == [0, 1]).all()
values = [
0.0, 0.5, 0.25, 0.75, 0.125, 0.625, 0.375, 0.875, 0.0625, 0.5625,
0.3125, 0.8125, 0.1875, 0.6875, 0.4375
]
for i in range(len(values)):
assert(p.eval_scrambled(0, i) == values[i])
p = RadicalInverse(10, 3)
assert (p.permutation(0) == [1, 0]).all()
values_scrambled = [
math.OneMinusEpsilon,
0.5, 0.75, 0.25, 0.875, 0.375, 0.625, 0.125, 0.9375, 0.4375,
0.6875, 0.1875, 0.8125, 0.3125, 0.5625
]
for i in range(len(values_scrambled)):
assert(p.eval_scrambled(0, i) == values_scrambled[i])
@pytest.mark.skip(reason="RadicalInverse has no vectorized bindings")
def test02_radical_inverse_vectorized(variant_scalar_rgb):
from mitsuba.core import RadicalInverse
try:
from mitsuba.packet_rgb.core.qmc import RadicalInverseP
except ImportError:
pytest.skip("packet_rgb mode not enabled")
v = RadicalInverse()
v_p = RadicalInverseP()
for index in range(1024):
result = v_p.eval_scrambled(index, ek.arange(10, dtype=ek.uint64))
for i in range(len(result)):
assert ek.abs(v.eval_scrambled(index, i) - result[i]) < 1e-7
| 28.347826
| 74
| 0.60184
| 481
| 3,260
| 3.989605
| 0.234927
| 0.029182
| 0.046899
| 0.065659
| 0.4716
| 0.460657
| 0.439812
| 0.369463
| 0.342887
| 0.342887
| 0
| 0.092646
| 0.261656
| 3,260
| 114
| 75
| 28.596491
| 0.704612
| 0.01319
| 0
| 0.252874
| 0
| 0
| 0.033904
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 1
| 0.08046
| false
| 0
| 0.126437
| 0
| 0.218391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f69bfc2c5f28e5c08c2ff64bb83de310333e32a
| 14,656
|
py
|
Python
|
train.py
|
ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer
|
93871bed9078d5bf6b4bb37407c9dce87c569b55
|
[
"MIT"
] | null | null | null |
train.py
|
ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer
|
93871bed9078d5bf6b4bb37407c9dce87c569b55
|
[
"MIT"
] | null | null | null |
train.py
|
ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer
|
93871bed9078d5bf6b4bb37407c9dce87c569b55
|
[
"MIT"
] | null | null | null |
import warnings
import torch
from torch.utils.data.dataloader import DataLoader
from torch.optim import lr_scheduler
import numpy as np
from models import *
from dataloader import Aff2CompDataset, SubsetSequentialSampler, SubsetRandomSampler, Prefetcher
from tqdm import tqdm
import os
import time
from sklearn.metrics import f1_score, accuracy_score
from metrics import AccF1Metric, CCCMetric, MultiLabelAccF1
from collections import defaultdict
import opts
from utils import setup_seed, save_checkpoint, AverageMeter
import random
import logging
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
class RecorderMeter(object):
"""Computes and stores the minimum loss value and its epoch index"""
def __init__(self, total_epoch):
self.reset(total_epoch)
def reset(self, total_epoch):
self.total_epoch = total_epoch
self.current_epoch = 0
self.epoch_losses = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
self.epoch_accuracy = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
def update(self, idx, train_loss, train_acc, val_loss, val_acc):
self.epoch_losses[idx, 0] = train_loss * 50
self.epoch_losses[idx, 1] = val_loss * 50
self.epoch_accuracy[idx, 0] = train_acc
self.epoch_accuracy[idx, 1] = val_acc
self.current_epoch = idx + 1
def plot_curve(self, save_path):
title = 'the accuracy/loss curve of train/val'
dpi = 80
width, height = 1600, 800
legend_fontsize = 10
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
x_axis = np.array([i for i in range(self.total_epoch)]) # epochs
y_axis = np.zeros(self.total_epoch)
plt.xlim(0, self.total_epoch)
plt.ylim(0, 100)
interval_y = 5
interval_x = 1
plt.xticks(np.arange(0, self.total_epoch + interval_x, interval_x))
plt.yticks(np.arange(0, 100 + interval_y, interval_y))
plt.grid()
plt.title(title, fontsize=20)
plt.xlabel('the training epoch', fontsize=16)
plt.ylabel('accuracy', fontsize=16)
y_axis[:] = self.epoch_accuracy[:, 0]
plt.plot(x_axis, y_axis, color='g', linestyle='-', label='train-accuracy', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_accuracy[:, 1]
plt.plot(x_axis, y_axis, color='y', linestyle='-', label='valid-accuracy', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 0]
plt.plot(x_axis, y_axis, color='g', linestyle=':', label='train-loss-x50', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 1]
plt.plot(x_axis, y_axis, color='y', linestyle=':', label='valid-loss-x50', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
if save_path is not None:
fig.savefig(save_path, dpi=dpi, bbox_inches='tight')
# print('Curve was saved')
plt.close(fig)
class EarlyStopper(object):
def __init__(self, num_trials, save_path):
self.num_trials = num_trials
self.trial_counter = 0
self.best_accuracy = 0
self.save_path = save_path
os.makedirs(os.path.dirname(self.save_path), exist_ok=True)
def is_continuable(self, model, accuracy):
if accuracy > self.best_accuracy:
self.best_accuracy = accuracy
self.trial_counter = 0
torch.save(model.state_dict(), self.save_path)
return True
elif self.trial_counter + 1 < self.num_trials:
self.trial_counter += 1
return True
else:
return False
@torch.no_grad()
def evaluate(model, loader, loader_iter, device, num_step=1000):
model.eval()
bar = tqdm(range(int(num_step)), desc=f'Validation, {model.task}', colour='green', position=0, leave=False)
metric_ex = AccF1Metric(ignore_index=7)
metric_va = CCCMetric(ignore_index=-5.0)
metric_au = MultiLabelAccF1(ignore_index=-1)
total_loss = 0
scores = defaultdict()
for step in bar:
t1 = time.time()
try:
data = next(loader_iter)
except StopIteration as e:
print(e)
loader_iter = iter(loader)
break
t2 = time.time()
data_time = t2 - t1
label_ex = data['EX'].long().to(device)
label_ex[label_ex == -1] = 7
labels = {
'VA': data['VA'].float().to(device),
'AU': data['AU'].float().to(device),
'EX': label_ex,
}
x = {}
for modality in data:
x[modality] = data[modality].to(device)
result = model(x) # batchx22 12 + 8 + 2
logits_ex = result[:, 12:19]
logits_au = result[:, :12]
logits_va = result[:, 19:21] #tanh??
if model.task.lower() == 'ex':
loss = model.get_ex_loss(result, labels['EX'])
elif model.task.lower() == 'au':
loss = model.get_au_loss(result, labels['AU'])
elif model.task.lower() == 'va':
loss = model.get_va_loss(result, labels['VA'])
else:
losses = model.get_mt_loss(result, labels)
loss = losses[0] + losses[1] + losses[2]
total_loss += loss.item()
pred = torch.argmax(logits_ex, dim=1).detach().cpu().numpy().reshape(-1)
label = label_ex.detach().cpu().numpy().reshape(-1)
metric_ex.update(pred, label)
metric_va.update(y_pred=torch.tanh(logits_va).detach().cpu().numpy(), y_true=labels['VA'].detach().cpu().numpy())
metric_au.update(y_pred=np.round(torch.sigmoid(logits_au).detach().cpu().numpy()), y_true=labels['AU'].detach().cpu().numpy())
acc_ex = accuracy_score(y_true=label, y_pred=pred)
bar.set_postfix(data_fetch_time=data_time, batch_loss=loss.item(), avg_loss=total_loss / (step + 1), acc=acc_ex)
acc_ex, f1_ex = metric_ex.get()
acc_au, f1_au = metric_au.get()
scores['EX'] = {'EX:acc': acc_ex, 'f1': f1_ex, 'score': 0.67 * f1_ex + 0.33 * acc_ex}
scores['AU'] = {'AU:acc': acc_au, 'f1': f1_au, 'score': 0.5 * f1_au + 0.5 * acc_au}
scores['VA'] = {'VA:ccc_v': metric_va.get()[0],'ccc_a': metric_va.get()[1], 'score': metric_va.get()[2]}
model.train()
metric_va.clear()
metric_au.clear()
metric_ex.clear()
return scores, loader_iter
def train(args, model, dataset, optimizer, epochs, device):
early_stopper = EarlyStopper(num_trials=args['early_stop_step'], save_path=f'{args["checkpoint_path"]}/best.pth')
downsample_rate = args.get('downsample_rate')
downsample = np.zeros(len(dataset), dtype=int)
downsample[np.arange(0, len(dataset) - 1, downsample_rate)] = 1
start_epoch = 0
if args['resume'] == True:
start_epoch = args['start_epoch']
learning_rate = args['learning_rate']
for epoch in range(start_epoch,epochs):
if epoch == 30:
learning_rate = learning_rate*0.1
if epoch == 60:
learning_rate = learning_rate*0.1
random.shuffle(downsample)
dataset.set_aug(True)
train_sampler = SubsetSequentialSampler(np.nonzero(dataset.train_ids*downsample)[0], shuffle=True)
train_loader = DataLoader(dataset, batch_size=args['batch_size'], sampler=train_sampler, num_workers=0,
pin_memory=False,
drop_last=True)
print('Training set length: ' + str(sum(dataset.train_ids*downsample)))
bar = tqdm(train_loader, desc=f'Training {model.task}, Epoch:{epoch}', colour='blue', position=0, leave=True)
logging.info(f'Training {model.task}, Epoch:{epoch}')
t1 = time.time()
total_loss, ex_loss_record,au_loss_record,va_loss_record = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
prefetcher = Prefetcher(bar)
data = prefetcher.next()
step = -1
while data is not None:
step += 1
t2 = time.time()
data_time = t2 - t1
optimizer.zero_grad()
label_ex = data['EX'].long().to(device)
label_ex[label_ex == -1] = 7
labels = {
'VA': data['VA'].float().to(device),
'AU': data['AU'].float().to(device),
'EX': label_ex,
}
# ids = data['Index'].long()
x = {}
for modality in data:
x[modality] = data[modality].to(device)
#x['clip'] = data['clip'].to(device)
#x['audio_features'] = data['audio_features'].to(device)
result = model(x) # batchx22 12 + 8 + 2
if model.task.lower() == 'ex':
loss = model.get_ex_loss(result, labels['EX'])
elif model.task.lower() == 'au':
loss = model.get_au_loss(result, labels['AU'])
elif model.task.lower() == 'va':
loss = model.get_va_loss(result, labels['VA'])
else:
losses = model.get_mt_loss(result, labels, normalize = False)
loss = 3*losses[0] + losses[1] + losses[2]
ex_loss_record.update(losses[0].item())
au_loss_record.update(losses[1].item())
va_loss_record.update(losses[2].item())
loss.backward()
optimizer.step()
total_loss.update(loss.item())
if model.task.lower() == 'all':
bar.set_postfix(total = total_loss.avg, ex=ex_loss_record.avg, au=au_loss_record.avg, va=va_loss_record.avg)
else:
bar.set_postfix(data_fetch_time=data_time, batch_loss=loss.item(), avg_loss=total_loss.avg)
t1 = time.time()
data = prefetcher.next()
logging.info(f'Total Loss,{total_loss.avg}, Ex:{ex_loss_record.avg}, AU:{au_loss_record.avg}, VA:{va_loss_record.avg}')
save_checkpoint(state=model.state_dict(), filepath=args["checkpoint_path"], filename='latest.pth')
#if step % eval_step == 0 and step != 0:
dataset.set_aug(False)
val_sampler = SubsetSequentialSampler(np.nonzero(dataset.val_ids*downsample)[0], shuffle=True)
val_loader = DataLoader(dataset, batch_size=args['batch_size'] * 4, sampler=val_sampler, num_workers=0,
pin_memory=False,
drop_last=True)
print('Validation set length: ' + str(sum(dataset.val_ids*downsample)))
val_loader_iter = iter(val_loader)
scores, val_loader_iter = evaluate(model, val_loader, val_loader_iter, device,
num_step=int(sum(dataset.val_ids*downsample)/(args['batch_size']*4)))
score_str = ''
if model.task == 'ALL':
total_score = 0
for task in ['EX','AU','VA']:
score_dict = scores[task]
for k, v in score_dict.items():
score_str += f'{k}:{v:.3},'
total_score = total_score + score_dict["score"]
else:
score_dict = scores[model.task]
for k, v in score_dict.items():
score_str += f'{k}:{v:.3}, '
total_score = score_dict["score"]
print(f'Training,{args["task"]}, Epoch:{epoch}, {score_str}')
logging.info(f'Training,{args["task"]}, Epoch:{epoch}, {score_str}')
if not early_stopper.is_continuable(model, total_score):
print(f'validation: best score: {early_stopper.best_accuracy}')
logging.info(f'validation: best score: {early_stopper.best_accuracy}')
break
def main(args):
setup_seed(args.get('seed'))
task = args.get('task')
print(f'Task: {task}')
print('Model:',opt['model_name'])
print('Modality:',opt['modality'])
print('clip size',opt['n_frames'],opt['image_size'])
log_file_name = opt['model_name']+'_'+opt['modality']+'_log.txt'
logging.basicConfig(filename=os.path.join(args['exp_dir'],log_file_name), level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger()
# model
if opt['model_name'] == 'avformer':
model = TwoStreamAuralVisualFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'vformer':
model = VisualFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'vggformer':
model = VGGVisualFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'emonet':
model = ImageEmoNetModel(modality=args['modality'], task=task)
elif opt['model_name'] == 'tformer':
model = SpatialTemporalFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'sformer':
model = SpatialFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'dsformer':
model = DualSpatialFormer(modality=args['modality'], task=task)
elif opt['model_name'] == 'i3d':
model = VisualI3DModel(modality=args['modality'], task=task)
elif opt['model_name'] == 'mc3d':
model = VisualMC3DModel(modality=args['modality'], task=task)
elif opt['model_name'] == 'van':
model = SpatialVAN(modality=args['modality'], task=task)
elif opt['model_name'] == 'audio':
model = Audio_only(modality=args['modality'], task=task)
else:
model = ImageResNetModel(task)
modes = model.modes
model = model.to(torch.cuda.current_device())
args['checkpoint_path'] = os.path.join(args['exp_dir'], 'pretrain')
if args['resume'] and os.path.exists(f'{args["checkpoint_path"]}/latest.pth'):
print('Loading weight from:{}'.format(f'{args["checkpoint_path"]}/latest.pth'))
pretrained_dict = torch.load(f'{args["checkpoint_path"]}/latest.pth')
model.load_state_dict(pretrained_dict,strict= False)
model.train()
# load dataset (first time this takes longer)
dataset = Aff2CompDataset(args)
dataset.set_modes(modes)
optimizer = torch.optim.Adam(params=model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay'])
#train(args, model, train_loader, val_loader, optimizer, epochs=args['epochs'], device=torch.cuda.current_device())
train(args, model, dataset, optimizer, epochs=args['epochs'], device=torch.cuda.current_device())
if __name__ == '__main__':
opt = opts.parse_opt()
torch.cuda.set_device(opt.gpu_id)
opt = vars(opt)
main(opt)
| 42.604651
| 134
| 0.60917
| 1,905
| 14,656
| 4.501312
| 0.177428
| 0.012128
| 0.018192
| 0.030787
| 0.395102
| 0.338542
| 0.294111
| 0.28898
| 0.260058
| 0.189854
| 0
| 0.016751
| 0.246452
| 14,656
| 343
| 135
| 42.728863
| 0.759689
| 0.034047
| 0
| 0.236301
| 0
| 0.003425
| 0.107582
| 0.025888
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030822
| false
| 0
| 0.061644
| 0
| 0.113014
| 0.034247
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f6f98de6468e928dedff399ac6db135e5b7f2ec
| 18,002
|
py
|
Python
|
src/agent.py
|
Lukeeeeee/DataCenterJobSchedulingSolution
|
9c62c0039b2dd9e0a1ca5474dc46c8be98a972b3
|
[
"MIT"
] | null | null | null |
src/agent.py
|
Lukeeeeee/DataCenterJobSchedulingSolution
|
9c62c0039b2dd9e0a1ca5474dc46c8be98a972b3
|
[
"MIT"
] | null | null | null |
src/agent.py
|
Lukeeeeee/DataCenterJobSchedulingSolution
|
9c62c0039b2dd9e0a1ca5474dc46c8be98a972b3
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import tensorlayer as tl
import datetime
from log import LOG_PATH
import os
import src.visualization as vis
from src.config import Config as con
import tensorflow.contrib as tfcontrib
server_count = con.server_count
server_state_dim = con.server_state_dim
total_server_state_dim = con.total_server_state_dim
server_feature_dim = con.server_feature_dim
job_state_dim = con.job_state_dim
dc_state_dim = con.dc_state_dim
action_dim = con.action_dim
# NET SIZE
server_feature_layer1_size = con.server_feature_layer1_size
q_net_layer1_size = con.q_net_layer1_size
q_net_layer2_size = con.q_net_layer2_size
# TRAIN PARAMETERS
gamma = con.gamma
learning_rate = con.learning_rate
batch_size = con.batch_size
epsilon = con.epsilon
update_target_q_every_iter = con.update_target_q_every_iter
ti = datetime.datetime.now()
log_dir = (LOG_PATH + '/' + str(ti.month) + '-' + str(ti.day) + '-' + str(ti.hour) + '-' + str(ti.minute) + '-' + str(
ti.second) + '/')
if os.path.exists(log_dir) is False:
os.mkdir(log_dir)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope(name):
tf.summary.scalar('value', var)
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
# tf.summary.scalar('max', tf.reduce_max(var))
# tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
class Agent(object):
def __init__(self):
self.sess = tf.InteractiveSession()
self.server_state_input = tf.placeholder(tf.float32, shape=[None, server_count, server_state_dim])
# self.server_state_input_flatten = contrib.layers.flatten(inputs=self.server_state_input)
self.job_state_input = tf.placeholder(tf.float32, shape=[None, job_state_dim])
self.dc_state_input = tf.placeholder(tf.float32, shape=[None, dc_state_dim])
self.action_input = tf.placeholder(tf.uint8, shape=[None])
self.reward_input = tf.placeholder(tf.float32, shape=[None, server_count])
self.action_is_valid = tf.placeholder(tf.float32, shape=[None, server_count])
self.target_q_off_by_action_input = tf.placeholder(tf.float32, shape=[None, server_count])
self.action_one_hot = tf.one_hot(indices=self.action_input, depth=server_count)
self.q_net = self.create_q_network()
self.q = self.q_net.outputs
self.target_q_net = self.create_q_network(prefix='TARGET_')
self.target_q = self.target_q_net.outputs
self.update_target_q_op = self.create_target_update_op_list()
# Define greedy policy to choose a valid action
temp = tf.multiply(x=self.action_is_valid,
y=tf.constant(1000.0, shape=[batch_size, server_count]))
self.temp = tf.add(x=self.q, y=temp)
self.greedy_policy_action = tf.argmax(self.temp, axis=1)
# Define op for q and target q with corresponding action
self.q_off_by_action = tf.multiply(self.q, tf.cast(self.action_one_hot, tf.float32))
# self.q_off_by_action = self.q
self.target_q_off_by_action = tf.multiply(self.reward_input + gamma * self.q,
tf.cast(self.action_one_hot, tf.float32))
# self.target_q_off_by_action = self.reward_input + gamma * self.target_q,
self.loss, self.optimizer, self.optimize_op, self.compute_gradients_op = self.create_training_method(
target_q_off_by_action=self.target_q_off_by_action_input)
self.gradients = self.optimizer.compute_gradients(loss=self.loss)
# Some op for test and visualization
self.max_q = tf.reduce_max(self.q, axis=1)
self.action = tf.argmax(self.q, axis=1)
self.mean_max_q = tf.reduce_mean(self.max_q)
variable_summaries(self.mean_max_q, 'mean_q')
# variable_summaries(self.compute_gradients_op, 'gradients')
# variable_summaries(self.loss, 'loss')
self.merged_summary = tf.summary.merge_all()
self.file_writer = tf.summary.FileWriter(log_dir, self.sess.graph)
# Init op
tl.layers.initialize_global_variables(sess=self.sess)
self.q_net.print_params()
self.q_net.print_layers()
# def eplison_greedy_action_selection(self):
# temp = tf.multiply(x=self.action_is_valid,
# y=tf.constant(1000.0, shape=[batch_size, server_count]))
# self.temp = tf.add(x=self.q, y=temp)
# unpacked_q = tf.unstack(self.temp, axis=0)
#
# greedy_policy_action_list = []
#
# for tensor in unpacked_q:
# if np.random.uniform(0, 1.0) < epsilon:
# greedy_policy_action_list.append(tf.argmax(tensor, axis=1))
# else:
# k = np.random.randint(0, server_count)
# greedy_policy_action_list.append(k)
# self.greedy_policy_action = tf.argmax(self.temp, axis=1)
def define_server_feature_extraction_net(self, input, reuse=False, prefix=''):
with tf.variable_scope("SEVER_STATE", reuse=reuse):
tl.layers.set_name_reuse(reuse)
server_feature_extraction_net = tl.layers.InputLayer(inputs=input,
name=prefix + 'SERVER_STATE_INPUT')
server_feature_extraction_net = tl.layers.DenseLayer(layer=server_feature_extraction_net,
n_units=server_feature_layer1_size,
act=tf.nn.leaky_relu,
name=prefix + 'SERVER_STATE_LAYER_1')
server_feature_extraction_net = tl.layers.DenseLayer(layer=server_feature_extraction_net,
n_units=server_feature_dim,
name=prefix + 'SERVER_STATE_LAYER_2')
return server_feature_extraction_net
def create_q_network(self, prefix=''):
server_state_tensor_list = tf.split(self.server_state_input, server_count, axis=1)
server_feature_tensor_layer_list = []
for i in range(server_count):
tensor = tf.reshape(server_state_tensor_list[i], shape=(-1, server_state_dim))
if i == 0:
reuse = False
else:
reuse = True
server_feature_tensor_layer_list.append(self.define_server_feature_extraction_net(input=tensor,
reuse=reuse,
prefix=prefix))
job_input_layer = tl.layers.InputLayer(inputs=self.job_state_input,
name=prefix + 'JOB_STATE_INPUT')
dc_input_layer = tl.layers.InputLayer(inputs=self.dc_state_input,
name=prefix + 'DC_STATE_INPUT')
all_state_layer = tl.layers.ConcatLayer(
layer=server_feature_tensor_layer_list + [job_input_layer, dc_input_layer],
concat_dim=1,
name=prefix + 'SERVER_FEATURE')
q_net = tl.layers.DenseLayer(layer=all_state_layer,
n_units=q_net_layer1_size,
act=tf.nn.leaky_relu,
name=prefix + 'Q_NET_LAYER_1')
q_net = tl.layers.DenseLayer(layer=q_net,
n_units=q_net_layer2_size,
act=tf.nn.leaky_relu,
name=prefix + 'Q_NET_LAYER_2')
q_net = tl.layers.DenseLayer(layer=q_net,
n_units=server_count,
name=prefix + 'Q_NET_LAYER_3')
return q_net
def create_training_method(self, target_q_off_by_action):
loss = tf.reduce_mean(tf.squared_difference(target_q_off_by_action, self.q_off_by_action))
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate, momentum=0.3)
optimize = optimizer.minimize(loss=loss, var_list=self.q_net.all_params)
compute_gradients = optimizer.compute_gradients(loss=loss, var_list=self.q_net.all_params)
regularizer = tfcontrib.layers.l1_l2_regularizer()
loss = loss + tfcontrib.layers.apply_regularization(regularizer, weights_list=self.q_net.all_params)
return loss, optimizer, optimize, compute_gradients
def create_target_update_op_list(self):
op = []
for (q_para, target_q_para) in zip(self.q_net.all_params, self.target_q_net.all_params):
op.append(target_q_para.assign(q_para))
return op
def eval_some_tensor(self, tensor, mini_batch):
# For test and visual
res = self.sess.run(fetches=[tensor],
feed_dict={
self.server_state_input: mini_batch['STATE']['SERVER_STATE'],
self.job_state_input: mini_batch['STATE']['JOB_STATE'],
self.dc_state_input: mini_batch['STATE']['DC'],
self.action_input: mini_batch['ACTION'],
})
return res
def eval_q_off_by_action(self, state_dict, action):
return self.sess.run(fetches=[self.q_off_by_action],
feed_dict={
self.server_state_input: state_dict['SERVER_STATE'],
self.job_state_input: state_dict['JOB_STATE'],
self.dc_state_input: state_dict['DC'],
self.action_input: action
})
def eval_greedy_policy_action(self, state_dict):
res, temp = self.sess.run(fetches=[self.greedy_policy_action, self.temp],
feed_dict={
self.server_state_input: state_dict['SERVER_STATE'],
self.job_state_input: state_dict['JOB_STATE'],
self.dc_state_input: state_dict['DC'],
self.action_is_valid: state_dict['VALID_ACTION']
})
return np.reshape(np.array(res), [-1])
def eval_action(self, state_dict):
# For test and visual
res = self.sess.run(fetches=[self.action],
feed_dict={
self.server_state_input: state_dict['SERVER_STATE'],
self.job_state_input: state_dict['JOB_STATE'],
self.dc_state_input: state_dict['DC'],
self.action_is_valid: state_dict['VALID_ACTION']
})
return np.reshape(np.array(res), [-1])
def eval_target_q_off_by_action(self, next_state_dict, next_action, reward):
res = self.sess.run(fetches=[self.target_q_off_by_action],
feed_dict={
self.reward_input: reward,
self.server_state_input: next_state_dict['SERVER_STATE'],
self.job_state_input: next_state_dict['JOB_STATE'],
self.dc_state_input: next_state_dict['DC'],
self.action_input: next_action
})
return np.reshape(np.array(res), newshape=[-1, server_count])
def eval_gradients(self, mini_batch):
next_action = self.eval_greedy_policy_action(state_dict=mini_batch['NEXT_STATE'])
target_q_off_by_action = self.eval_target_q_off_by_action(next_state_dict=mini_batch['NEXT_STATE'],
next_action=next_action,
reward=mini_batch['REWARD'])
gradients = self.sess.run(fetches=[self.compute_gradients_op],
feed_dict={
self.server_state_input: mini_batch['STATE']['SERVER_STATE'],
self.job_state_input: mini_batch['STATE']['JOB_STATE'],
self.dc_state_input: mini_batch['STATE']['DC'],
self.action_input: mini_batch['ACTION'],
self.target_q_off_by_action_input: target_q_off_by_action
})
return gradients
def train(self, mini_batch):
next_action = self.eval_greedy_policy_action(state_dict=mini_batch['NEXT_STATE'])
target_q_off_by_action = self.eval_target_q_off_by_action(next_state_dict=mini_batch['NEXT_STATE'],
next_action=next_action,
reward=mini_batch['REWARD'])
_, loss = self.sess.run(fetches=[self.optimize_op, self.loss],
feed_dict={
self.server_state_input: mini_batch['STATE']['SERVER_STATE'],
self.job_state_input: mini_batch['STATE']['JOB_STATE'],
self.dc_state_input: mini_batch['STATE']['DC'],
self.action_input: mini_batch['ACTION'],
self.target_q_off_by_action_input: target_q_off_by_action
})
# gradients = self.sess.run(fetches=[self.compute_gradients_op],
# feed_dict={
# self.server_state_input: mini_batch['STATE']['SERVER_STATE'],
# self.job_state_input: mini_batch['STATE']['JOB_STATE'],
# self.dc_state_input: mini_batch['STATE']['DC'],
# self.action_input: mini_batch['ACTION'],
# self.target_q_off_by_action_input: target_q_off_by_action
# })
# print(target_q_off_by_action)
# print(self.eval_some_tensor(tensor=self.q_off_by_action, mini_batch=mini_batch))
# print(self.eval_some_tensor(tensor=self.reward_input, mini_batch=mini_batch))
# print(self.eval_some_tensor(tensor=self.target_q_off_by_action))
# print (gradients)
return loss
def update_target_net(self):
res = self.sess.run(self.update_target_q_op)
# res = self.sess.run(self.target_q_net.all_params[0])
# print(res)
def do_summary(self, mini_batch, epoch):
summary = self.sess.run(fetches=[self.merged_summary, self.max_q, self.action],
feed_dict={
self.server_state_input: mini_batch['STATE']['SERVER_STATE'],
self.job_state_input: mini_batch['STATE']['JOB_STATE'],
self.dc_state_input: mini_batch['STATE']['DC'],
self.action_input: mini_batch['ACTION']
})
self.file_writer.add_summary(summary=summary[0], global_step=epoch)
training_data_list = []
def do_print(test_batch, epoch, iter, print_flag=False):
global training_data_dict
server_state = np.array(test_batch['STATE']['SERVER_STATE'])
action = a.eval_action(state_dict=test_batch['STATE'])
q = a.eval_some_tensor(a.q, mini_batch=test_batch)[0]
q_off_by_action = a.eval_some_tensor(tensor=a.q_off_by_action, mini_batch=test_batch)
next_action = a.eval_greedy_policy_action(state_dict=test_batch['NEXT_STATE'])
target_q_off_by_action = a.eval_target_q_off_by_action(next_state_dict=test_batch['NEXT_STATE'],
next_action=next_action,
reward=test_batch['REWARD'])
grad = a.eval_gradients(test_batch)
if print_flag is True:
print("choosed action", action)
print("Q", q)
print("Input Action", test_batch['ACTION'])
print("Q off by action", q_off_by_action)
print ("target Q off by action", target_q_off_by_action)
dict = {
'EPOCH': epoch,
'ITER': iter,
'SERVER_STATE': server_state,
'ACTION': action,
'Q': q
}
training_data_list.append(dict)
pass
if __name__ == '__main__':
from src.environment import Environment
global training_data_list
import src.visualization as vis
a = Agent()
env = Environment(file_name="1-21-1-21-57.data")
batch_iter = con.batch_iter
epoch = con.epoch
for T in range(epoch):
print("Epoch %d" % T)
total_loss = 0.0
for i in range(batch_iter):
if i % update_target_q_every_iter == 0:
a.update_target_net()
data_batch = env.return_mini_batch(i, batch_size)
loss = a.train(mini_batch=data_batch)
total_loss = total_loss + loss
if T % con.save_data_every_epoch == 0:
do_print(test_batch=data_batch, epoch=T, iter=i, print_flag=True)
print("Aver loss = %f" % (total_loss / batch_iter))
res = np.array(training_data_list)
np.save(file=log_dir + '/training_data', arr=res)
vis.visual(res)
| 47.750663
| 118
| 0.575436
| 2,175
| 18,002
| 4.407816
| 0.106667
| 0.028476
| 0.021905
| 0.043809
| 0.520601
| 0.434964
| 0.383227
| 0.358611
| 0.329822
| 0.297695
| 0
| 0.006376
| 0.32913
| 18,002
| 376
| 119
| 47.87766
| 0.787447
| 0.119876
| 0
| 0.241509
| 0
| 0
| 0.04755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060377
| false
| 0.003774
| 0.041509
| 0.003774
| 0.14717
| 0.045283
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f74a2f22700c0cecd865a836091b95cf438f84d
| 536
|
py
|
Python
|
ch06/data.py
|
stoneflyop1/py_machine_learning
|
18fd635d312f957ca4fcc23d856a1bcd4cf95f48
|
[
"MIT"
] | null | null | null |
ch06/data.py
|
stoneflyop1/py_machine_learning
|
18fd635d312f957ca4fcc23d856a1bcd4cf95f48
|
[
"MIT"
] | null | null | null |
ch06/data.py
|
stoneflyop1/py_machine_learning
|
18fd635d312f957ca4fcc23d856a1bcd4cf95f48
|
[
"MIT"
] | null | null | null |
import pandas as pd
#####################
# Load Dataset
# https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data
df = pd.read_csv('../data/wdbc.data', header=None)
from sklearn.preprocessing import LabelEncoder
X = df.loc[:, 2:].values
y = df.loc[:,1].values
le = LabelEncoder()
y = le.fit_transform(y)
print(repr(le.transform(['M', 'B'])))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.20, random_state=1)
| 28.210526
| 93
| 0.699627
| 85
| 536
| 4.258824
| 0.611765
| 0.044199
| 0.077348
| 0.082873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012579
| 0.110075
| 536
| 19
| 94
| 28.210526
| 0.746331
| 0.19403
| 0
| 0
| 0
| 0
| 0.046455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f75dc40de3440e94dfc62ec31434b5e0206507e
| 733
|
py
|
Python
|
src/tga_to_jpg.py
|
NicolasGrosjean/HoI4_Stats
|
b2b6341e8a0b400255302b277407ea33c1a9833f
|
[
"MIT"
] | null | null | null |
src/tga_to_jpg.py
|
NicolasGrosjean/HoI4_Stats
|
b2b6341e8a0b400255302b277407ea33c1a9833f
|
[
"MIT"
] | null | null | null |
src/tga_to_jpg.py
|
NicolasGrosjean/HoI4_Stats
|
b2b6341e8a0b400255302b277407ea33c1a9833f
|
[
"MIT"
] | null | null | null |
import argparse
import os
from PIL import Image
def get_args():
parser = argparse.ArgumentParser(description='Transform tga files to jpg')
parser.add_argument('input_dir', type=str, help='Path of input directory containing tga files')
parser.add_argument('output_dir', type=str, help='Path of output directory containing jpg files')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
os.makedirs(args.output_dir, exist_ok=True)
for file in os.listdir(args.input_dir):
if file.endswith('.tga'):
im = Image.open(os.path.join(args.input_dir, file))
rgb_im = im.convert('RGB')
rgb_im.save(os.path.join(args.output_dir, file[:-4] + '.jpg'))
| 34.904762
| 101
| 0.682128
| 107
| 733
| 4.46729
| 0.46729
| 0.050209
| 0.07113
| 0.058577
| 0.083682
| 0.083682
| 0
| 0
| 0
| 0
| 0
| 0.001684
| 0.189632
| 733
| 20
| 102
| 36.65
| 0.80303
| 0
| 0
| 0
| 0
| 0
| 0.208731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f78bf747e413822fce9fdf17d1c1fc1b0c7a165
| 3,052
|
py
|
Python
|
src/construction_finder/coderack.py
|
juliakzn/construction_finder
|
92e9f044163fbe8bde3a6c5f9ec125a7ecf96de8
|
[
"MIT"
] | null | null | null |
src/construction_finder/coderack.py
|
juliakzn/construction_finder
|
92e9f044163fbe8bde3a6c5f9ec125a7ecf96de8
|
[
"MIT"
] | null | null | null |
src/construction_finder/coderack.py
|
juliakzn/construction_finder
|
92e9f044163fbe8bde3a6c5f9ec125a7ecf96de8
|
[
"MIT"
] | null | null | null |
import logging
import random
from typing import Dict, List, Tuple, Union
from construction_finder import codelets, frame
logger = logging.getLogger(f"{__name__}")
class SpinResult:
def __init__(
self,
temp_modifier: float,
workspace_modifiers: Union[List[codelets.WorkSpaceModifier], None] = None,
new_active_frames: Union[Tuple[str, frame.Frame], None] = None,
):
self.temp_modifier = temp_modifier
self.workspace_modifiers = workspace_modifiers
self.new_active_frames = new_active_frames
def __str__(self):
return f"""<SpinResult>: temp_modifier={self.temp_modifier}, workspace_modifiers={self.workspace_modifiers}"""
class CodeRack:
def __init__(self, urgency_levels: List = [1, 2, 3, 4, 5]):
self.urgency_levels = urgency_levels
self.urgency_bins: Dict = dict()
for urgency_level in urgency_levels:
self.urgency_bins[urgency_level]: List = []
def add_codelet(self, codelet):
urgency_level = min(codelet.urgency_level, max(self.urgency_levels))
self.urgency_bins[urgency_level].append(codelet)
def assess_urgency(self):
urgency = list()
for urgency_level in self.urgency_levels:
n = len(self.urgency_bins[urgency_level])
urgency.extend([urgency_level] * n * urgency_level)
return urgency
def empty(self):
total_codelets = 0
for urgency_level in self.urgency_levels:
n = len(self.urgency_bins[urgency_level])
total_codelets += n
return total_codelets == 0
def __contains__(self, codelet):
result = False
for urgency_level in self.urgency_levels:
if codelet in self.urgency_bins[urgency_level]:
result = True
return result
def spin_codelet(self):
logger.info("Spinning a new codelet")
urgency = self.assess_urgency()
logger.info(f"Current urgency = {urgency}")
workspace_modifiers = None
new_active_frames = None
if len(urgency) > 0:
chosen_bin = random.choice(urgency)
random_codelet_index = random.randint(
0, len(self.urgency_bins[chosen_bin]) - 1
)
chosen_codelet = self.urgency_bins[chosen_bin].pop(random_codelet_index)
logger.info(f"Chose codelet {chosen_codelet} from urgency bin {chosen_bin}")
codelet_result = chosen_codelet.run()
temp_modifier = codelet_result.temp_modifier
for new_codelet in codelet_result.new_codelets:
self.add_codelet(new_codelet)
if hasattr(codelet_result, "workspace_modifiers"):
workspace_modifiers = codelet_result.workspace_modifiers
if hasattr(codelet_result, "new_active_codelets"):
new_active_frames = codelet_result.new_active_frames
else:
temp_modifier = 0
return SpinResult(temp_modifier, workspace_modifiers, new_active_frames)
| 35.08046
| 118
| 0.656619
| 355
| 3,052
| 5.323944
| 0.219718
| 0.087302
| 0.063492
| 0.058201
| 0.183598
| 0.129101
| 0.129101
| 0.068783
| 0.068783
| 0.068783
| 0
| 0.00488
| 0.261468
| 3,052
| 86
| 119
| 35.488372
| 0.833629
| 0
| 0
| 0.073529
| 0
| 0
| 0.082896
| 0.02654
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0.014706
| 0.279412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f7babebb7eb438c1f113d421ddd85e8d4dce5ed
| 1,713
|
py
|
Python
|
configuration.py
|
ewellchen/STIN
|
0612a0b56d8caf1f8771ce13a3d8827d26a38f30
|
[
"MIT"
] | null | null | null |
configuration.py
|
ewellchen/STIN
|
0612a0b56d8caf1f8771ce13a3d8827d26a38f30
|
[
"MIT"
] | null | null | null |
configuration.py
|
ewellchen/STIN
|
0612a0b56d8caf1f8771ce13a3d8827d26a38f30
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Default configurations of model configuration, training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
from typing import Dict
CONFIG = {
'is_train': True,
'src_train_set_path': './train_data_source',
'tgt_train_set_path': './train_data_target',
'test_set_small_path': './test_data/low_resolution/P2-100',
'test_set_large_path': './test_data/high_resolution/P2-100',
'test_size_small': [72,88],
'test_size_large': [512, 512],
'checkpoint_dir': './checkpoint',
'result_dir_small': './results/STIN-small',
'result_dir_large': './results/STIN-large',
'resume': True,
'train_config': {'epoch': 5,
'batch_size': 4,
'device': 'cuda:0',
'learning_rate': 0.0005,},
'train_config_adv': {'epoch': 5,
'batch_size': 2,
'device': 'cuda:0',
'learning_rate': 0.0005, },
'test_config': {'batch_size': 1,
'device': 'cuda:0', },
}
CONFIG_NONLOCAL = {
'test_set_path': './test_data/low_resolution/P2-100',
'test_size': [72,88],
'result_dir': './result/non-local-small',
'test_config': {'batch_size': 1,
'device': 'cuda:0', },
}
CONFIG_UNETPP = {
'test_set_path': './test_data/low_resolution/P2-100',
'test_size': [72,88],
'result_dir': './result/unetpp-small',
'test_config': {'batch_size': 1,
'device': 'cuda:0', },
}
| 23.148649
| 65
| 0.549329
| 195
| 1,713
| 4.461538
| 0.364103
| 0.051724
| 0.063218
| 0.087356
| 0.457471
| 0.382759
| 0.382759
| 0.318391
| 0.27931
| 0.147126
| 0
| 0.047502
| 0.287215
| 1,713
| 73
| 66
| 23.465753
| 0.665029
| 0.058377
| 0
| 0.333333
| 0
| 0
| 0.458877
| 0.116188
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.119048
| 0
| 0.119048
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f7d838dc8f88dc8eef76ebba1d92fdbf66fdaf5
| 54,959
|
py
|
Python
|
util/configurejson2cmake.py
|
chentoz/occQt
|
9738c26a18ac7757201342a69f95483d435a39fa
|
[
"MIT"
] | null | null | null |
util/configurejson2cmake.py
|
chentoz/occQt
|
9738c26a18ac7757201342a69f95483d435a39fa
|
[
"MIT"
] | null | null | null |
util/configurejson2cmake.py
|
chentoz/occQt
|
9738c26a18ac7757201342a69f95483d435a39fa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#############################################################################
##
## Copyright (C) 2018 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the plugins of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import json_parser
import posixpath
import re
import sys
from typing import Optional, Set
from textwrap import dedent
import os
from special_case_helper import SpecialCaseHandler
from helper import (
map_qt_library,
featureName,
map_platform,
find_3rd_party_library_mapping,
generate_find_package_info,
get_compile_test_dependent_library_mapping,
)
knownTests = set() # type: Set[str]
class LibraryMapping:
def __init__(self, package: str, resultVariable: str, appendFoundSuffix: bool = True) -> None:
self.package = package
self.resultVariable = resultVariable
self.appendFoundSuffix = appendFoundSuffix
def map_tests(test: str) -> Optional[str]:
testmap = {
"c99": "c_std_99 IN_LIST CMAKE_C_COMPILE_FEATURES",
"c11": "c_std_11 IN_LIST CMAKE_C_COMPILE_FEATURES",
"x86SimdAlways": "ON", # FIXME: Make this actually do a compile test.
"aesni": "TEST_subarch_aesni",
"avx": "TEST_subarch_avx",
"avx2": "TEST_subarch_avx2",
"avx512f": "TEST_subarch_avx512f",
"avx512cd": "TEST_subarch_avx512cd",
"avx512dq": "TEST_subarch_avx512dq",
"avx512bw": "TEST_subarch_avx512bw",
"avx512er": "TEST_subarch_avx512er",
"avx512pf": "TEST_subarch_avx512pf",
"avx512vl": "TEST_subarch_avx512vl",
"avx512ifma": "TEST_subarch_avx512ifma",
"avx512vbmi": "TEST_subarch_avx512vbmi",
"avx512vbmi2": "TEST_subarch_avx512vbmi2",
"avx512vpopcntdq": "TEST_subarch_avx512vpopcntdq",
"avx5124fmaps": "TEST_subarch_avx5124fmaps",
"avx5124vnniw": "TEST_subarch_avx5124vnniw",
"bmi": "TEST_subarch_bmi",
"bmi2": "TEST_subarch_bmi2",
"cx16": "TEST_subarch_cx16",
"f16c": "TEST_subarch_f16c",
"fma": "TEST_subarch_fma",
"fma4": "TEST_subarch_fma4",
"fsgsbase": "TEST_subarch_fsgsbase",
"gfni": "TEST_subarch_gfni",
"ibt": "TEST_subarch_ibt",
"libclang": "TEST_libclang",
"lwp": "TEST_subarch_lwp",
"lzcnt": "TEST_subarch_lzcnt",
"mmx": "TEST_subarch_mmx",
"movbe": "TEST_subarch_movbe",
"mpx": "TEST_subarch_mpx",
"no-sahf": "TEST_subarch_no_shaf",
"pclmul": "TEST_subarch_pclmul",
"popcnt": "TEST_subarch_popcnt",
"prefetchwt1": "TEST_subarch_prefetchwt1",
"prfchw": "TEST_subarch_prfchw",
"pdpid": "TEST_subarch_rdpid",
"rdpid": "TEST_subarch_rdpid",
"rdseed": "TEST_subarch_rdseed",
"rdrnd": "TEST_subarch_rdrnd",
"rtm": "TEST_subarch_rtm",
"shani": "TEST_subarch_shani",
"shstk": "TEST_subarch_shstk",
"sse2": "TEST_subarch_sse2",
"sse3": "TEST_subarch_sse3",
"ssse3": "TEST_subarch_ssse3",
"sse4a": "TEST_subarch_sse4a",
"sse4_1": "TEST_subarch_sse4_1",
"sse4_2": "TEST_subarch_sse4_2",
"tbm": "TEST_subarch_tbm",
"xop": "TEST_subarch_xop",
"neon": "TEST_subarch_neon",
"iwmmxt": "TEST_subarch_iwmmxt",
"crc32": "TEST_subarch_crc32",
"vis": "TEST_subarch_vis",
"vis2": "TEST_subarch_vis2",
"vis3": "TEST_subarch_vis3",
"dsp": "TEST_subarch_dsp",
"dspr2": "TEST_subarch_dspr2",
"altivec": "TEST_subarch_altivec",
"spe": "TEST_subarch_spe",
"vsx": "TEST_subarch_vsx",
"openssl11": '(OPENSSL_VERSION VERSION_GREATER_EQUAL "1.1.0")',
"libinput_axis_api": "ON",
"xlib": "X11_FOUND",
"wayland-scanner": "WaylandScanner_FOUND",
"3rdparty-hunspell": "VKB_HAVE_3RDPARTY_HUNSPELL",
"t9write-alphabetic": "VKB_HAVE_T9WRITE_ALPHA",
"t9write-cjk": "VKB_HAVE_T9WRITE_CJK",
}
if test in testmap:
return testmap.get(test, None)
if test in knownTests:
return f"TEST_{featureName(test)}"
return None
def cm(ctx, *output):
txt = ctx["output"]
if txt != "" and not txt.endswith("\n"):
txt += "\n"
txt += "\n".join(output)
ctx["output"] = txt
return ctx
def readJsonFromDir(path: str) -> str:
path = posixpath.join(path, "configure.json")
print(f"Reading {path}...")
assert posixpath.exists(path)
parser = json_parser.QMakeSpecificJSONParser()
return parser.parse(path)
def processFiles(ctx, data):
print(" files:")
if "files" in data:
for (k, v) in data["files"].items():
ctx[k] = v
return ctx
def parseLib(ctx, lib, data, cm_fh, cmake_find_packages_set):
newlib = find_3rd_party_library_mapping(lib)
if not newlib:
print(f' XXXX Unknown library "{lib}".')
return
if newlib.packageName is None:
print(f' **** Skipping library "{lib}" -- was masked.')
return
print(f" mapped library {lib} to {newlib.targetName}.")
# Avoid duplicate find_package calls.
if newlib.targetName in cmake_find_packages_set:
return
# If certain libraries are used within a feature, but the feature
# is only emitted conditionally with a simple condition (like
# 'on Windows' or 'on Linux'), we should enclose the find_package
# call for the library into the same condition.
emit_if = newlib.emit_if
# Only look through features if a custom emit_if wasn't provided.
if not emit_if:
for feature in data["features"]:
feature_data = data["features"][feature]
if (
"condition" in feature_data
and f"libs.{lib}" in feature_data["condition"]
and "emitIf" in feature_data
and "config." in feature_data["emitIf"]
):
emit_if = feature_data["emitIf"]
break
if emit_if:
emit_if = map_condition(emit_if)
cmake_find_packages_set.add(newlib.targetName)
find_package_kwargs = {"emit_if": emit_if}
if newlib.is_bundled_with_qt:
# If a library is bundled with Qt, it has 2 FindFoo.cmake
# modules: WrapFoo and WrapSystemFoo.
# FindWrapSystemFoo.cmake will try to find the 'Foo' library in
# the usual CMake locations, and will create a
# WrapSystemFoo::WrapSystemFoo target pointing to the library.
#
# FindWrapFoo.cmake will create a WrapFoo::WrapFoo target which
# will link either against the WrapSystemFoo or QtBundledFoo
# target depending on certain feature values.
#
# Because the following qt_find_package call is for
# configure.cmake consumption, we make the assumption that
# configure.cmake is interested in finding the system library
# for the purpose of enabling or disabling a system_foo feature.
find_package_kwargs["use_system_package_name"] = True
find_package_kwargs["module"] = ctx["module"]
cm_fh.write(generate_find_package_info(newlib, **find_package_kwargs))
if "use" in data["libraries"][lib]:
use_entry = data["libraries"][lib]["use"]
if isinstance(use_entry, str):
print(f"1use: {use_entry}")
cm_fh.write(f"qt_add_qmake_lib_dependency({newlib.soName} {use_entry})\n")
else:
for use in use_entry:
print(f"2use: {use}")
indentation = ""
has_condition = False
if "condition" in use:
has_condition = True
indentation = " "
condition = map_condition(use["condition"])
cm_fh.write(f"if({condition})\n")
cm_fh.write(
f"{indentation}qt_add_qmake_lib_dependency({newlib.soName} {use['lib']})\n"
)
if has_condition:
cm_fh.write("endif()\n")
run_library_test = False
mapped_library = find_3rd_party_library_mapping(lib)
if mapped_library:
run_library_test = mapped_library.run_library_test
if run_library_test and "test" in data["libraries"][lib]:
test = data["libraries"][lib]["test"]
write_compile_test(
ctx, lib, test, data, cm_fh, manual_library_list=[lib], is_library_test=True
)
def lineify(label, value, quote=True):
if value:
if quote:
escaped_value = value.replace('"', '\\"')
return f' {label} "{escaped_value}"\n'
return f" {label} {value}\n"
return ""
def map_condition(condition):
# Handle NOT:
if isinstance(condition, list):
condition = "(" + ") AND (".join(condition) + ")"
if isinstance(condition, bool):
if condition:
return "ON"
else:
return "OFF"
assert isinstance(condition, str)
mapped_features = {"gbm": "gbm_FOUND"}
# Turn foo != "bar" into (NOT foo STREQUAL 'bar')
condition = re.sub(r"([^ ]+)\s*!=\s*('.*?')", "(! \\1 == \\2)", condition)
# Turn foo != 156 into (NOT foo EQUAL 156)
condition = re.sub(r"([^ ]+)\s*!=\s*([0-9]?)", "(! \\1 EQUAL \\2)", condition)
condition = condition.replace("!", "NOT ")
condition = condition.replace("&&", " AND ")
condition = condition.replace("||", " OR ")
condition = condition.replace("==", " STREQUAL ")
# explicitly handle input.sdk == '':
condition = re.sub(r"input\.sdk\s*==\s*''", "NOT INPUT_SDK", condition)
last_pos = 0
mapped_condition = ""
has_failed = False
for match in re.finditer(r"([a-zA-Z0-9_]+)\.([a-zA-Z0-9_+-]+)", condition):
substitution = None
# appendFoundSuffix = True
if match.group(1) == "libs":
libmapping = find_3rd_party_library_mapping(match.group(2))
if libmapping and libmapping.packageName:
substitution = libmapping.packageName
if libmapping.resultVariable:
substitution = libmapping.resultVariable
if libmapping.appendFoundSuffix:
substitution += "_FOUND"
# Assume that feature conditions are interested whether
# a system library is found, rather than the bundled one
# which we always know we can build.
if libmapping.is_bundled_with_qt:
substitution = substitution.replace("Wrap", "WrapSystem")
elif match.group(1) == "features":
feature = match.group(2)
if feature in mapped_features:
substitution = mapped_features.get(feature)
else:
substitution = f"QT_FEATURE_{featureName(match.group(2))}"
elif match.group(1) == "subarch":
substitution = f"TEST_arch_{'${TEST_architecture_arch}'}_subarch_{match.group(2)}"
elif match.group(1) == "call":
if match.group(2) == "crossCompile":
substitution = "CMAKE_CROSSCOMPILING"
elif match.group(1) == "tests":
substitution = map_tests(match.group(2))
elif match.group(1) == "input":
substitution = f"INPUT_{featureName(match.group(2))}"
elif match.group(1) == "config":
substitution = map_platform(match.group(2))
elif match.group(1) == "module":
substitution = f"TARGET {map_qt_library(match.group(2))}"
elif match.group(1) == "arch":
if match.group(2) == "i386":
# FIXME: Does this make sense?
substitution = "(TEST_architecture_arch STREQUAL i386)"
elif match.group(2) == "x86_64":
substitution = "(TEST_architecture_arch STREQUAL x86_64)"
elif match.group(2) == "arm":
# FIXME: Does this make sense?
substitution = "(TEST_architecture_arch STREQUAL arm)"
elif match.group(2) == "arm64":
# FIXME: Does this make sense?
substitution = "(TEST_architecture_arch STREQUAL arm64)"
elif match.group(2) == "mips":
# FIXME: Does this make sense?
substitution = "(TEST_architecture_arch STREQUAL mips)"
if substitution is None:
print(f' XXXX Unknown condition "{match.group(0)}"')
has_failed = True
else:
mapped_condition += condition[last_pos : match.start(1)] + substitution
last_pos = match.end(2)
mapped_condition += condition[last_pos:]
# Space out '(' and ')':
mapped_condition = mapped_condition.replace("(", " ( ")
mapped_condition = mapped_condition.replace(")", " ) ")
# Prettify:
condition = re.sub("\\s+", " ", mapped_condition)
condition = condition.strip()
# Special case for WrapLibClang in qttools
condition = condition.replace("TEST_libclang.has_clangcpp", "TEST_libclang")
if has_failed:
condition += " OR FIXME"
return condition
def parseInput(ctx, sinput, data, cm_fh):
skip_inputs = {
"prefix",
"hostprefix",
"extprefix",
"archdatadir",
"bindir",
"datadir",
"docdir",
"examplesdir",
"external-hostbindir",
"headerdir",
"hostbindir",
"hostdatadir",
"hostlibdir",
"importdir",
"libdir",
"libexecdir",
"plugindir",
"qmldir",
"settingsdir",
"sysconfdir",
"testsdir",
"translationdir",
"android-arch",
"android-ndk",
"android-ndk-host",
"android-ndk-platform",
"android-sdk",
"android-toolchain-version",
"android-style-assets",
"appstore-compliant",
"avx",
"avx2",
"avx512",
"c++std",
"ccache",
"commercial",
"confirm-license",
"dbus",
"dbus-runtime",
"debug",
"debug-and-release",
"developer-build",
"device",
"device-option",
"f16c",
"force-asserts",
"force-debug-info",
"force-pkg-config",
"framework",
"gc-binaries",
"gdb-index",
"gcc-sysroot",
"gcov",
"gnumake",
"gui",
"headersclean",
"incredibuild-xge",
"libudev",
"ltcg",
"make",
"make-tool",
"mips_dsp",
"mips_dspr2",
"mp",
"nomake",
"opensource",
"optimize-debug",
"optimize-size",
"optimized-qmake",
"optimized-tools",
"pch",
"pkg-config",
"platform",
"plugin-manifests",
"profile",
"qreal",
"reduce-exports",
"reduce-relocations",
"release",
"rpath",
"sanitize",
"sdk",
"separate-debug-info",
"shared",
"silent",
"qdbus",
"sse2",
"sse3",
"sse4.1",
"sse4.2",
"ssse3",
"static",
"static-runtime",
"strip",
"syncqt",
"sysroot",
"testcocoon",
"use-gold-linker",
"warnings-are-errors",
"Werror",
"widgets",
"xplatform",
"zlib",
"eventfd",
"glib",
"icu",
"inotify",
"journald",
"pcre",
"posix-ipc",
"pps",
"slog2",
"syslog",
}
if sinput in skip_inputs:
print(f" **** Skipping input {sinput}: masked.")
return
dtype = data
if isinstance(data, dict):
dtype = data["type"]
if dtype == "boolean":
print(f" **** Skipping boolean input {sinput}: masked.")
return
if dtype == "enum":
values_line = " ".join(data["values"])
cm_fh.write(f"# input {sinput}\n")
cm_fh.write(f'set(INPUT_{featureName(sinput)} "undefined" CACHE STRING "")\n')
cm_fh.write(
f"set_property(CACHE INPUT_{featureName(sinput)} PROPERTY STRINGS undefined {values_line})\n\n"
)
return
print(f" XXXX UNHANDLED INPUT TYPE {dtype} in input description")
return
def get_library_usage_for_compile_test(library):
result = {}
mapped_library = find_3rd_party_library_mapping(library)
if not mapped_library:
result["fixme"] = f"# FIXME: use: unmapped library: {library}\n"
return result
if mapped_library.test_library_overwrite:
target_name = mapped_library.test_library_overwrite
else:
target_name = mapped_library.targetName
result["target_name"] = target_name
result["package_name"] = mapped_library.packageName
result["extra"] = mapped_library.extra
return result
# Handles config.test/foo/foo.pro projects.
def write_standalone_compile_test(cm_fh, ctx, data, config_test_name, is_library_test):
rel_test_project_path = f"{ctx['test_dir']}/{config_test_name}"
if posixpath.exists(f"{ctx['project_dir']}/{rel_test_project_path}/CMakeLists.txt"):
label = ""
libraries = []
packages = []
if "label" in data:
label = data["label"]
if is_library_test and config_test_name in data["libraries"]:
if "label" in data["libraries"][config_test_name]:
label = data["libraries"][config_test_name]["label"]
# If a library entry in configure.json has a test, and
# the test uses a config.tests standalone project, we
# need to get the package and target info for the
# library, and pass it to the test so compiling and
# linking succeeds.
library_usage = get_library_usage_for_compile_test(config_test_name)
if "target_name" in library_usage:
libraries.append(library_usage["target_name"])
if "package_name" in library_usage:
find_package_arguments = []
find_package_arguments.append(library_usage["package_name"])
if "extra" in library_usage:
find_package_arguments.extend(library_usage["extra"])
package_line = "PACKAGE " + " ".join(find_package_arguments)
packages.append(package_line)
cm_fh.write(
f"""
qt_config_compile_test("{config_test_name}"
LABEL "{label}"
PROJECT_PATH "${{CMAKE_CURRENT_SOURCE_DIR}}/{rel_test_project_path}"
"""
)
if libraries:
libraries_string = " ".join(libraries)
cm_fh.write(f" LIBRARIES {libraries_string}\n")
if packages:
packages_string = " ".join(packages)
cm_fh.write(f" PACKAGES {packages_string}")
cm_fh.write(")\n")
def write_compile_test(
ctx, name, details, data, cm_fh, manual_library_list=None, is_library_test=False
):
if manual_library_list is None:
manual_library_list = []
inherited_test_name = details["inherit"] if "inherit" in details else None
inherit_details = None
if inherited_test_name and is_library_test:
inherit_details = data["libraries"][inherited_test_name]["test"]
if not inherit_details:
print(f" XXXX Failed to locate inherited library test {inherited_test_name}")
if isinstance(details, str):
write_standalone_compile_test(cm_fh, ctx, data, details, is_library_test)
return
def resolve_head(detail):
head = detail.get("head", "")
if isinstance(head, list):
head = "\n".join(head)
return head
head = ""
if inherit_details:
head += resolve_head(inherit_details)
head += resolve_head(details)
sourceCode = head + "\n"
def resolve_include(detail, keyword):
include = detail.get(keyword, "")
if isinstance(include, list):
include = "#include <" + ">\n#include <".join(include) + ">"
elif include:
include = f"#include <{include}>"
return include
include = ""
if is_library_test:
if inherit_details:
inherited_lib_data = data["libraries"][inherited_test_name]
include += resolve_include(inherited_lib_data, "headers")
this_lib_data = data["libraries"][name]
include += resolve_include(this_lib_data, "headers")
else:
if inherit_details:
include += resolve_include(inherit_details, "include")
include += resolve_include(details, "include")
sourceCode += include + "\n"
def resolve_tail(detail):
tail = detail.get("tail", "")
if isinstance(tail, list):
tail = "\n".join(tail)
return tail
tail = ""
if inherit_details:
tail += resolve_tail(inherit_details)
tail += resolve_tail(details)
sourceCode += tail + "\n"
sourceCode += "int main(int argc, char **argv)\n"
sourceCode += "{\n"
sourceCode += " (void)argc; (void)argv;\n"
sourceCode += " /* BEGIN TEST: */\n"
def resolve_main(detail):
main = detail.get("main", "")
if isinstance(main, list):
main = "\n".join(main)
return main
main = ""
if inherit_details:
main += resolve_main(inherit_details)
main += resolve_main(details)
sourceCode += main + "\n"
sourceCode += " /* END TEST: */\n"
sourceCode += " return 0;\n"
sourceCode += "}\n"
sourceCode = sourceCode.replace('"', '\\"')
librariesCmakeName = ""
languageStandard = ""
compileOptions = ""
qmakeFixme = ""
cm_fh.write(f"# {name}\n")
if "qmake" in details: # We don't really have many so we can just enumerate them all
if details["qmake"] == "unix:LIBS += -lpthread":
librariesCmakeName = format(featureName(name)) + "_TEST_LIBRARIES"
cm_fh.write("if (UNIX)\n")
cm_fh.write(" set(" + librariesCmakeName + " pthread)\n")
cm_fh.write("endif()\n")
elif details["qmake"] == "linux: LIBS += -lpthread -lrt":
librariesCmakeName = format(featureName(name)) + "_TEST_LIBRARIES"
cm_fh.write("if (LINUX)\n")
cm_fh.write(" set(" + librariesCmakeName + " pthread rt)\n")
cm_fh.write("endif()\n")
elif details["qmake"] == "!winrt: LIBS += runtimeobject.lib":
librariesCmakeName = format(featureName(name)) + "_TEST_LIBRARIES"
cm_fh.write("if (NOT WINRT)\n")
cm_fh.write(" set(" + librariesCmakeName + " runtimeobject)\n")
cm_fh.write("endif()\n")
elif details["qmake"] == "CONFIG += c++11":
# do nothing we're always in c++11 mode
pass
elif details["qmake"] == "CONFIG += c++11 c++14":
languageStandard = "CXX_STANDARD 14"
elif details["qmake"] == "CONFIG += c++11 c++14 c++17":
languageStandard = "CXX_STANDARD 17"
elif details["qmake"] == "CONFIG += c++11 c++14 c++17 c++2a":
languageStandard = "CXX_STANDARD 20"
elif details["qmake"] == "QMAKE_CXXFLAGS += -fstack-protector-strong":
compileOptions = details["qmake"][18:]
else:
qmakeFixme = f"# FIXME: qmake: {details['qmake']}\n"
library_list = []
test_libraries = manual_library_list
if "use" in data:
test_libraries += data["use"].split(" ")
for library in test_libraries:
if len(library) == 0:
continue
adjusted_library = get_compile_test_dependent_library_mapping(name, library)
library_usage = get_library_usage_for_compile_test(adjusted_library)
if "fixme" in library_usage:
qmakeFixme += library_usage["fixme"]
continue
else:
library_list.append(library_usage["target_name"])
cm_fh.write(f"qt_config_compile_test({featureName(name)}\n")
cm_fh.write(lineify("LABEL", data.get("label", "")))
if librariesCmakeName != "" or len(library_list) != 0:
cm_fh.write(" LIBRARIES\n")
if librariesCmakeName != "":
cm_fh.write(lineify("", "${" + librariesCmakeName + "}"))
if len(library_list) != 0:
cm_fh.write(" ")
cm_fh.write("\n ".join(library_list))
cm_fh.write("\n")
if compileOptions != "":
cm_fh.write(f" COMPILE_OPTIONS {compileOptions}\n")
cm_fh.write(" CODE\n")
cm_fh.write('"' + sourceCode + '"')
if qmakeFixme != "":
cm_fh.write(qmakeFixme)
if languageStandard != "":
cm_fh.write(f"\n {languageStandard}\n")
cm_fh.write(")\n\n")
# "tests": {
# "cxx11_future": {
# "label": "C++11 <future>",
# "type": "compile",
# "test": {
# "include": "future",
# "main": [
# "std::future<int> f = std::async([]() { return 42; });",
# "(void)f.get();"
# ],
# "qmake": "unix:LIBS += -lpthread"
# }
# },
def write_compiler_supports_flag_test(
ctx, name, details, data, cm_fh, manual_library_list=None, is_library_test=False
):
cm_fh.write(f"qt_config_compiler_supports_flag_test({featureName(name)}\n")
cm_fh.write(lineify("LABEL", data.get("label", "")))
cm_fh.write(lineify("FLAG", data.get("flag", "")))
cm_fh.write(")\n\n")
def write_linker_supports_flag_test(
ctx, name, details, data, cm_fh, manual_library_list=None, is_library_test=False
):
cm_fh.write(f"qt_config_linker_supports_flag_test({featureName(name)}\n")
cm_fh.write(lineify("LABEL", data.get("label", "")))
cm_fh.write(lineify("FLAG", data.get("flag", "")))
cm_fh.write(")\n\n")
def parseTest(ctx, test, data, cm_fh):
skip_tests = {
"c11",
"c99",
"gc_binaries",
"precomile_header",
"reduce_exports",
"gc_binaries",
"libinput_axis_api",
"wayland-scanner",
"xlib",
}
if test in skip_tests:
print(f" **** Skipping features {test}: masked.")
return
if data["type"] == "compile":
knownTests.add(test)
if "test" in data:
details = data["test"]
else:
details = test
write_compile_test(ctx, test, details, data, cm_fh)
if data["type"] == "compilerSupportsFlag":
knownTests.add(test)
if "test" in data:
details = data["test"]
else:
details = test
write_compiler_supports_flag_test(ctx, test, details, data, cm_fh)
if data["type"] == "linkerSupportsFlag":
knownTests.add(test)
if "test" in data:
details = data["test"]
else:
details = test
write_linker_supports_flag_test(ctx, test, details, data, cm_fh)
elif data["type"] == "libclang":
knownTests.add(test)
cm_fh.write(f"# {test}\n")
lib_clang_lib = find_3rd_party_library_mapping("libclang")
cm_fh.write(generate_find_package_info(lib_clang_lib))
cm_fh.write(
dedent(
"""
if(TARGET WrapLibClang::WrapLibClang)
set(TEST_libclang "ON" CACHE BOOL "Required libclang version found." FORCE)
endif()
"""
)
)
cm_fh.write("\n")
elif data["type"] == "x86Simd":
knownTests.add(test)
label = data["label"]
cm_fh.write(f"# {test}\n")
cm_fh.write(f'qt_config_compile_test_x86simd({test} "{label}")\n')
cm_fh.write("\n")
elif data["type"] == "machineTuple":
knownTests.add(test)
label = data["label"]
cm_fh.write(f"# {test}\n")
cm_fh.write(f'qt_config_compile_test_machine_tuple("{label}")\n')
cm_fh.write("\n")
# "features": {
# "android-style-assets": {
# "label": "Android Style Assets",
# "condition": "config.android",
# "output": [ "privateFeature" ],
# "comment": "This belongs into gui, but the license check needs it here already."
# },
else:
print(f" XXXX UNHANDLED TEST TYPE {data['type']} in test description")
def get_feature_mapping():
# This is *before* the feature name gets normalized! So keep - and + chars, etc.
feature_mapping = {
"alloc_h": None, # handled by alloc target
"alloc_malloc_h": None,
"alloc_stdlib_h": None,
"build_all": None,
"ccache": {"autoDetect": "1", "condition": "QT_USE_CCACHE"},
"compiler-flags": None,
"cross_compile": {"condition": "CMAKE_CROSSCOMPILING"},
"debug_and_release": {
"autoDetect": "1", # Setting this to None has weird effects...
"condition": "QT_GENERATOR_IS_MULTI_CONFIG",
},
"debug": {
"autoDetect": "ON",
"condition": "CMAKE_BUILD_TYPE STREQUAL Debug OR Debug IN_LIST CMAKE_CONFIGURATION_TYPES",
},
"dlopen": {"condition": "UNIX"},
"force_debug_info": {
"autoDetect": "CMAKE_BUILD_TYPE STREQUAL RelWithDebInfo OR RelWithDebInfo IN_LIST CMAKE_CONFIGURATION_TYPES"
},
"framework": {
"condition": "APPLE AND BUILD_SHARED_LIBS AND NOT CMAKE_BUILD_TYPE STREQUAL Debug"
},
"gc_binaries": {"condition": "NOT QT_FEATURE_shared"},
"gcc-sysroot": None,
"gcov": None,
"GNUmake": None,
"host-dbus": None,
"iconv": {
"condition": "NOT QT_FEATURE_icu AND QT_FEATURE_textcodec AND NOT WIN32 AND NOT QNX AND NOT ANDROID AND NOT APPLE AND WrapIconv_FOUND",
},
"incredibuild_xge": None,
"ltcg": {
"autoDetect": "ON",
"cmakePrelude": """set(__qt_ltcg_detected FALSE)
if(CMAKE_INTERPROCEDURAL_OPTIMIZATION)
set(__qt_ltcg_detected TRUE)
else()
foreach(config ${CMAKE_BUILD_TYPE} ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${config}" __qt_uc_config)
if(CMAKE_INTERPROCEDURAL_OPTIMIZATION_${__qt_uc_config})
set(__qt_ltcg_detected TRUE)
break()
endif()
endforeach()
unset(__qt_uc_config)
endif()""",
"condition": "__qt_ltcg_detected",
},
"msvc_mp": None,
"simulator_and_device": {"condition": "UIKIT AND NOT QT_UIKIT_SDK"},
"pkg-config": {"condition": "PKG_CONFIG_FOUND"},
"precompile_header": {"condition": "BUILD_WITH_PCH"},
"profile": None,
"qmakeargs": None,
"qpa_default_platform": None, # Not a bool!
"qreal": {
"condition": 'DEFINED QT_COORD_TYPE AND NOT QT_COORD_TYPE STREQUAL "double"',
"output": [
{"type": "define", "name": "QT_COORD_TYPE", "value": "${QT_COORD_TYPE}",},
{
"type": "define",
"name": "QT_COORD_TYPE_STRING",
"value": '\\"${QT_COORD_TYPE}\\"',
},
],
},
"reduce_exports": {"condition": "NOT MSVC",},
"release": None,
"release_tools": None,
"rpath": {
"autoDetect": "1",
"condition": "BUILD_SHARED_LIBS AND UNIX AND NOT WIN32 AND NOT ANDROID",
},
"shared": {
"condition": "BUILD_SHARED_LIBS",
"output": [
"publicFeature",
"publicQtConfig",
"publicConfig",
{
"type": "define",
"name": "QT_STATIC",
"prerequisite": "!defined(QT_SHARED) && !defined(QT_STATIC)",
"negative": True,
},
],
},
"silent": None,
"sql-sqlite": {"condition": "QT_FEATURE_datestring"},
"stl": None, # Do we really need to test for this in 2018?!
"strip": None,
"verifyspec": None, # qmake specific...
"warnings_are_errors": None, # FIXME: Do we need these?
"xkbcommon-system": None, # another system library, just named a bit different from the rest
}
return feature_mapping
def parseFeature(ctx, feature, data, cm_fh):
feature_mapping = get_feature_mapping()
mapping = feature_mapping.get(feature, {})
if mapping is None:
print(f" **** Skipping features {feature}: masked.")
return
handled = {
"autoDetect",
"comment",
"condition",
"description",
"disable",
"emitIf",
"enable",
"label",
"output",
"purpose",
"section",
}
label = mapping.get("label", data.get("label", ""))
purpose = mapping.get("purpose", data.get("purpose", data.get("description", label)))
autoDetect = map_condition(mapping.get("autoDetect", data.get("autoDetect", "")))
condition = map_condition(mapping.get("condition", data.get("condition", "")))
output = mapping.get("output", data.get("output", []))
comment = mapping.get("comment", data.get("comment", ""))
section = mapping.get("section", data.get("section", ""))
enable = map_condition(mapping.get("enable", data.get("enable", "")))
disable = map_condition(mapping.get("disable", data.get("disable", "")))
emitIf = map_condition(mapping.get("emitIf", data.get("emitIf", "")))
cmakePrelude = mapping.get("cmakePrelude", None)
cmakeEpilogue = mapping.get("cmakeEpilogue", None)
for k in [k for k in data.keys() if k not in handled]:
print(f" XXXX UNHANDLED KEY {k} in feature description")
if not output:
# feature that is only used in the conditions of other features
output = ["internalFeature"]
publicFeature = False # #define QT_FEATURE_featurename in public header
privateFeature = False # #define QT_FEATURE_featurename in private header
negativeFeature = False # #define QT_NO_featurename in public header
internalFeature = False # No custom or QT_FEATURE_ defines
publicDefine = False # #define MY_CUSTOM_DEFINE in public header
publicConfig = False # add to CONFIG in public pri file
privateConfig = False # add to CONFIG in private pri file
publicQtConfig = False # add to QT_CONFIG in public pri file
for o in output:
outputType = o
if isinstance(o, dict):
outputType = o["type"]
if outputType in [
"varAssign",
"varAppend",
"varRemove",
"useBFDLinker",
"useGoldLinker",
"useLLDLinker",
]:
continue
elif outputType == "define":
publicDefine = True
elif outputType == "feature":
negativeFeature = True
elif outputType == "publicFeature":
publicFeature = True
elif outputType == "privateFeature":
privateFeature = True
elif outputType == "internalFeature":
internalFeature = True
elif outputType == "publicConfig":
publicConfig = True
elif outputType == "privateConfig":
privateConfig = True
elif outputType == "publicQtConfig":
publicQtConfig = True
else:
print(f" XXXX UNHANDLED OUTPUT TYPE {outputType} in feature {feature}.")
continue
if not any(
[
publicFeature,
privateFeature,
internalFeature,
publicDefine,
negativeFeature,
publicConfig,
privateConfig,
publicQtConfig,
]
):
print(f" **** Skipping feature {feature}: Not relevant for C++.")
return
normalized_feature_name = featureName(feature)
def writeFeature(
name,
publicFeature=False,
privateFeature=False,
labelAppend="",
superFeature=None,
autoDetect="",
cmakePrelude=None,
cmakeEpilogue=None,
):
if comment:
cm_fh.write(f"# {comment}\n")
if cmakePrelude is not None:
cm_fh.write(cmakePrelude)
cm_fh.write("\n")
cm_fh.write(f'qt_feature("{name}"')
if publicFeature:
cm_fh.write(" PUBLIC")
if privateFeature:
cm_fh.write(" PRIVATE")
cm_fh.write("\n")
cm_fh.write(lineify("SECTION", section))
cm_fh.write(lineify("LABEL", label + labelAppend))
if purpose != label:
cm_fh.write(lineify("PURPOSE", purpose))
cm_fh.write(lineify("AUTODETECT", autoDetect, quote=False))
if superFeature:
feature_condition = f"QT_FEATURE_{superFeature}"
else:
feature_condition = condition
cm_fh.write(lineify("CONDITION", feature_condition, quote=False))
cm_fh.write(lineify("ENABLE", enable, quote=False))
cm_fh.write(lineify("DISABLE", disable, quote=False))
cm_fh.write(lineify("EMIT_IF", emitIf, quote=False))
cm_fh.write(")\n")
if cmakeEpilogue is not None:
cm_fh.write(cmakeEpilogue)
cm_fh.write("\n")
# Write qt_feature() calls before any qt_feature_definition() calls
# Default internal feature case.
featureCalls = {}
featureCalls[feature] = {
"name": feature,
"labelAppend": "",
"autoDetect": autoDetect,
"cmakePrelude": cmakePrelude,
"cmakeEpilogue": cmakeEpilogue,
}
# Go over all outputs to compute the number of features that have to be declared
for o in output:
outputType = o
name = feature
# The label append is to provide a unique label for features that have more than one output
# with different names.
labelAppend = ""
if isinstance(o, dict):
outputType = o["type"]
if "name" in o:
name = o["name"]
labelAppend = f": {o['name']}"
if outputType not in ["feature", "publicFeature", "privateFeature"]:
continue
if name not in featureCalls:
featureCalls[name] = {"name": name, "labelAppend": labelAppend}
if name != feature:
featureCalls[name]["superFeature"] = normalized_feature_name
if outputType in ["feature", "publicFeature"]:
featureCalls[name]["publicFeature"] = True
elif outputType == "privateFeature":
featureCalls[name]["privateFeature"] = True
elif outputType == "publicConfig":
featureCalls[name]["publicConfig"] = True
elif outputType == "privateConfig":
featureCalls[name]["privateConfig"] = True
elif outputType == "publicQtConfig":
featureCalls[name]["publicQtConfig"] = True
# Write the qt_feature() calls from the computed feature map
for _, args in featureCalls.items():
writeFeature(**args)
# Write qt_feature_definition() calls
for o in output:
outputType = o
outputArgs = {}
if isinstance(o, dict):
outputType = o["type"]
outputArgs = o
# Map negative feature to define:
if outputType == "feature":
outputType = "define"
outputArgs = {
"name": f"QT_NO_{normalized_feature_name.upper()}",
"negative": True,
"value": 1,
"type": "define",
}
if outputType != "define":
continue
if outputArgs.get("name") is None:
print(f" XXXX DEFINE output without name in feature {feature}.")
continue
out_name = outputArgs.get("name")
cm_fh.write(f'qt_feature_definition("{feature}" "{out_name}"')
if outputArgs.get("negative", False):
cm_fh.write(" NEGATE")
if outputArgs.get("value") is not None:
cm_fh.write(f' VALUE "{outputArgs.get("value")}"')
if outputArgs.get("prerequisite") is not None:
cm_fh.write(f' PREREQUISITE "{outputArgs.get("prerequisite")}"')
cm_fh.write(")\n")
# Write qt_feature_config() calls
for o in output:
outputType = o
name = feature
modified_name = name
outputArgs = {}
if isinstance(o, dict):
outputType = o["type"]
outputArgs = o
if "name" in o:
modified_name = o["name"]
if outputType not in ["publicConfig", "privateConfig", "publicQtConfig"]:
continue
config_type = ""
if outputType == "publicConfig":
config_type = "QMAKE_PUBLIC_CONFIG"
elif outputType == "privateConfig":
config_type = "QMAKE_PRIVATE_CONFIG"
elif outputType == "publicQtConfig":
config_type = "QMAKE_PUBLIC_QT_CONFIG"
if not config_type:
print(" XXXX config output without type in feature {}.".format(feature))
continue
cm_fh.write('qt_feature_config("{}" {}'.format(name, config_type))
if outputArgs.get("negative", False):
cm_fh.write("\n NEGATE")
if modified_name != name:
cm_fh.write("\n")
cm_fh.write(lineify("NAME", modified_name, quote=True))
cm_fh.write(")\n")
def processSummaryHelper(ctx, entries, cm_fh):
for entry in entries:
if isinstance(entry, str):
name = entry
cm_fh.write(f'qt_configure_add_summary_entry(ARGS "{name}")\n')
elif "type" in entry and entry["type"] in [
"feature",
"firstAvailableFeature",
"featureList",
]:
function_args = []
entry_type = entry["type"]
if entry_type in ["firstAvailableFeature", "featureList"]:
feature_mapping = get_feature_mapping()
unhandled_feature = False
for feature_name, value in feature_mapping.items():
# Skip entries that mention a feature which is
# skipped by configurejson2cmake in the feature
# mapping. This is not ideal, but prevents errors at
# CMake configuration time.
if not value and f"{feature_name}" in entry["args"]:
unhandled_feature = True
break
if unhandled_feature:
print(f" XXXX UNHANDLED FEATURE in SUMMARY TYPE {entry}.")
continue
if entry_type != "feature":
function_args.append(lineify("TYPE", entry_type))
if "args" in entry:
args = entry["args"]
function_args.append(lineify("ARGS", args))
if "message" in entry:
message = entry["message"]
function_args.append(lineify("MESSAGE", message))
if "condition" in entry:
condition = map_condition(entry["condition"])
function_args.append(lineify("CONDITION", condition, quote=False))
entry_args_string = "".join(function_args)
cm_fh.write(f"qt_configure_add_summary_entry(\n{entry_args_string})\n")
elif "type" in entry and entry["type"] == "buildTypeAndConfig":
cm_fh.write("qt_configure_add_summary_build_type_and_config()\n")
elif "type" in entry and entry["type"] == "buildMode":
message = entry["message"]
cm_fh.write(f"qt_configure_add_summary_build_mode({message})\n")
elif "type" in entry and entry["type"] == "buildParts":
message = entry["message"]
cm_fh.write(f'qt_configure_add_summary_build_parts("{message}")\n')
elif "section" in entry:
section = entry["section"]
cm_fh.write(f'qt_configure_add_summary_section(NAME "{section}")\n')
processSummaryHelper(ctx, entry["entries"], cm_fh)
cm_fh.write(f'qt_configure_end_summary_section() # end of "{section}" section\n')
else:
print(f" XXXX UNHANDLED SUMMARY TYPE {entry}.")
report_condition_mapping = {
"(features.rpath || features.rpath_dir) && !features.shared": "(features.rpath || QT_EXTRA_RPATHS) && !features.shared",
"(features.rpath || features.rpath_dir) && var.QMAKE_LFLAGS_RPATH == ''": None,
}
def processReportHelper(ctx, entries, cm_fh):
feature_mapping = get_feature_mapping()
for entry in entries:
if isinstance(entry, dict):
entry_args = []
if "type" not in entry:
print(f" XXXX UNHANDLED REPORT TYPE missing type in {entry}.")
continue
report_type = entry["type"]
if report_type not in ["note", "warning", "error"]:
print(f" XXXX UNHANDLED REPORT TYPE unknown type in {entry}.")
continue
report_type = report_type.upper()
entry_args.append(lineify("TYPE", report_type, quote=False))
message = entry["message"]
# Replace semicolons, qt_parse_all_arguments can't handle
# them due to an escaping bug in CMake regarding escaping
# macro arguments.
# https://gitlab.kitware.com/cmake/cmake/issues/19972
message = message.replace(";", ",")
entry_args.append(lineify("MESSAGE", message))
# Need to overhaul everything to fix conditions.
if "condition" in entry:
condition = entry["condition"]
unhandled_condition = False
for feature_name, value in feature_mapping.items():
# Skip reports that mention a feature which is
# skipped by configurejson2cmake in the feature
# mapping. This is not ideal, but prevents errors at
# CMake configuration time.
if not value and f"features.{feature_name}" in condition:
unhandled_condition = True
break
if unhandled_condition:
print(f" XXXX UNHANDLED CONDITION in REPORT TYPE {entry}.")
continue
if isinstance(condition, str) and condition in report_condition_mapping:
new_condition = report_condition_mapping[condition]
if new_condition is None:
continue
else:
condition = new_condition
condition = map_condition(condition)
entry_args.append(lineify("CONDITION", condition, quote=False))
entry_args_string = "".join(entry_args)
cm_fh.write(f"qt_configure_add_report_entry(\n{entry_args_string})\n")
else:
print(f" XXXX UNHANDLED REPORT TYPE {entry}.")
def parseCommandLineCustomHandler(ctx, data, cm_fh):
cm_fh.write(f"qt_commandline_custom({data})\n")
def parseCommandLineOptions(ctx, data, cm_fh):
for key in data:
args = [key]
option = data[key]
if isinstance(option, str):
args += ["TYPE", option]
else:
if "type" in option:
args += ["TYPE", option["type"]]
if "name" in option:
args += ["NAME", option["name"]]
if "value" in option:
args += ["VALUE", option["value"]]
if "values" in option:
values = option["values"]
if isinstance(values, list):
args += ["VALUES", " ".join(option["values"])]
else:
args += ["MAPPING"]
for lhs in values:
args += [lhs, values[lhs]]
cm_fh.write(f"qt_commandline_option({' '.join(args)})\n")
def parseCommandLinePrefixes(ctx, data, cm_fh):
for key in data:
cm_fh.write(f"qt_commandline_prefix({key} {data[key]})\n")
def parseCommandLineAssignments(ctx, data, cm_fh):
for key in data:
cm_fh.write(f"qt_commandline_assignment({key} {data[key]})\n")
def processCommandLine(ctx, data, cm_fh):
print(" commandline:")
if "subconfigs" in data:
for subconf in data["subconfigs"]:
cm_fh.write(f"qt_commandline_subconfig({subconf})\n")
if "commandline" not in data:
return
commandLine = data["commandline"]
if "custom" in commandLine:
print(" custom:")
parseCommandLineCustomHandler(ctx, commandLine["custom"], cm_fh)
if "options" in commandLine:
print(" options:")
parseCommandLineOptions(ctx, commandLine["options"], cm_fh)
if "prefix" in commandLine:
print(" prefix:")
parseCommandLinePrefixes(ctx, commandLine["prefix"], cm_fh)
if "assignments" in commandLine:
print(" assignments:")
parseCommandLineAssignments(ctx, commandLine["assignments"], cm_fh)
def processInputs(ctx, data, cm_fh):
print(" inputs:")
if "commandline" not in data:
return
commandLine = data["commandline"]
if "options" not in commandLine:
return
for input_option in commandLine["options"]:
parseInput(ctx, input_option, commandLine["options"][input_option], cm_fh)
def processTests(ctx, data, cm_fh):
print(" tests:")
if "tests" not in data:
return
for test in data["tests"]:
parseTest(ctx, test, data["tests"][test], cm_fh)
def processFeatures(ctx, data, cm_fh):
print(" features:")
if "features" not in data:
return
for feature in data["features"]:
parseFeature(ctx, feature, data["features"][feature], cm_fh)
def processLibraries(ctx, data, cm_fh):
cmake_find_packages_set = set()
print(" libraries:")
if "libraries" not in data:
return
for lib in data["libraries"]:
parseLib(ctx, lib, data, cm_fh, cmake_find_packages_set)
def processReports(ctx, data, cm_fh):
if "summary" in data:
print(" summary:")
processSummaryHelper(ctx, data["summary"], cm_fh)
if "report" in data:
print(" report:")
processReportHelper(ctx, data["report"], cm_fh)
if "earlyReport" in data:
print(" earlyReport:")
processReportHelper(ctx, data["earlyReport"], cm_fh)
def processSubconfigs(path, ctx, data):
assert ctx is not None
if "subconfigs" in data:
for subconf in data["subconfigs"]:
subconfDir = posixpath.join(path, subconf)
subconfData = readJsonFromDir(subconfDir)
subconfCtx = ctx
processJson(subconfDir, subconfCtx, subconfData)
class special_cased_file:
def __init__(self, base_dir: str, file_name: str, skip_special_case_preservation: bool):
self.base_dir = base_dir
self.file_path = posixpath.join(base_dir, file_name)
self.gen_file_path = self.file_path + ".gen"
self.preserve_special_cases = not skip_special_case_preservation
def __enter__(self):
self.file = open(self.gen_file_path, "w")
if self.preserve_special_cases:
self.sc_handler = SpecialCaseHandler(
os.path.abspath(self.file_path),
os.path.abspath(self.gen_file_path),
os.path.abspath(self.base_dir),
debug=False,
)
return self.file
def __exit__(self, type, value, trace_back):
self.file.close()
if self.preserve_special_cases and self.sc_handler.handle_special_cases():
os.replace(self.gen_file_path, self.file_path)
else:
os.replace(self.gen_file_path, self.file_path)
def processJson(path, ctx, data, skip_special_case_preservation=False):
ctx["project_dir"] = path
ctx["module"] = data.get("module", "global")
ctx["test_dir"] = data.get("testDir", "config.tests")
ctx = processFiles(ctx, data)
with special_cased_file(path, "qt_cmdline.cmake", skip_special_case_preservation) as cm_fh:
processCommandLine(ctx, data, cm_fh)
with special_cased_file(path, "configure.cmake", skip_special_case_preservation) as cm_fh:
cm_fh.write("\n\n#### Inputs\n\n")
processInputs(ctx, data, cm_fh)
cm_fh.write("\n\n#### Libraries\n\n")
processLibraries(ctx, data, cm_fh)
cm_fh.write("\n\n#### Tests\n\n")
processTests(ctx, data, cm_fh)
cm_fh.write("\n\n#### Features\n\n")
processFeatures(ctx, data, cm_fh)
processReports(ctx, data, cm_fh)
if ctx.get("module") == "global":
cm_fh.write(
'\nqt_extra_definition("QT_VERSION_STR" "\\"${PROJECT_VERSION}\\"" PUBLIC)\n'
)
cm_fh.write('qt_extra_definition("QT_VERSION_MAJOR" ${PROJECT_VERSION_MAJOR} PUBLIC)\n')
cm_fh.write('qt_extra_definition("QT_VERSION_MINOR" ${PROJECT_VERSION_MINOR} PUBLIC)\n')
cm_fh.write('qt_extra_definition("QT_VERSION_PATCH" ${PROJECT_VERSION_PATCH} PUBLIC)\n')
# do this late:
processSubconfigs(path, ctx, data)
def main():
if len(sys.argv) < 2:
print("This scripts needs one directory to process!")
quit(1)
skip_special_case_preservation = False
if len(sys.argv) > 2 and sys.argv[2] == "-s":
skip_special_case_preservation = True
directory = sys.argv[1]
print(f"Processing: {directory}.")
data = readJsonFromDir(directory)
processJson(directory, {}, data, skip_special_case_preservation=skip_special_case_preservation)
if __name__ == "__main__":
main()
| 34.608942
| 147
| 0.580251
| 5,997
| 54,959
| 5.121227
| 0.142571
| 0.019146
| 0.029891
| 0.012047
| 0.274518
| 0.196243
| 0.155639
| 0.128582
| 0.098496
| 0.084039
| 0
| 0.008092
| 0.293965
| 54,959
| 1,587
| 148
| 34.63075
| 0.783393
| 0.097236
| 0
| 0.171149
| 0
| 0
| 0.265549
| 0.058592
| 0.002445
| 0
| 0
| 0.00126
| 0.00326
| 1
| 0.0326
| false
| 0.000815
| 0.00815
| 0
| 0.072535
| 0.0326
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f7e10137722c6fcc224fdac359159dee3d532fc
| 819
|
py
|
Python
|
easy_scrapy/2_beautifulsoup/bs4_3_regex.py
|
cyfu/web_scrapying
|
b59a75d3db289032bb9005f062470e8ce745539a
|
[
"MIT"
] | null | null | null |
easy_scrapy/2_beautifulsoup/bs4_3_regex.py
|
cyfu/web_scrapying
|
b59a75d3db289032bb9005f062470e8ce745539a
|
[
"MIT"
] | null | null | null |
easy_scrapy/2_beautifulsoup/bs4_3_regex.py
|
cyfu/web_scrapying
|
b59a75d3db289032bb9005f062470e8ce745539a
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
# open and read web page, decode it if it contains Chinese
html = urlopen('https://mofanpy.com/static/scraping/table.html').read().decode('utf-8')
print(html)
# 'lxml' is parser name
soup = BeautifulSoup(html, features='lxml')
# search by tag name and attribe name (src), use regex match src value
img_list = soup.find_all('img', {'src': re.compile('.*?\.jpg')})
print( [img['src'] for img in img_list] )
# another example
course_links = soup.find_all('a', {'href': re.compile('\/tutorials.*')})
for link in course_links:
print(link['href'])
# another example
tables = soup.find_all('table', {'id': 'course-list'})
for table in tables:
courses = table.find_all('tr', {'class': 'ml'})
print([course['id'] for course in courses])
| 32.76
| 87
| 0.693529
| 125
| 819
| 4.48
| 0.52
| 0.05
| 0.058929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002829
| 0.136752
| 819
| 25
| 88
| 32.76
| 0.78925
| 0.218559
| 0
| 0
| 0
| 0
| 0.193701
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.266667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f8f15b75dc5ee4ca1fc697ef1e5c0863cf598a7
| 1,893
|
py
|
Python
|
easyTCP/CLIENT/backend/Protocol.py
|
dsal3389/easyTCP
|
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
|
[
"MIT"
] | 4
|
2018-12-09T13:57:59.000Z
|
2019-10-19T19:34:28.000Z
|
easyTCP/CLIENT/backend/Protocol.py
|
dsal3389/easyTCP
|
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
|
[
"MIT"
] | null | null | null |
easyTCP/CLIENT/backend/Protocol.py
|
dsal3389/easyTCP
|
0a11ffe4726bfd0461c24fa459e417fd2fe3cd7f
|
[
"MIT"
] | null | null | null |
import asyncio
import json
from ..utils import DEFAULT_SETTINGS
from ..utils.DEFAULT_ENCRYPTION import SERVER_encryption, CLIENT_encryption
def json_dumper(data):
return bytes(json.dumps(data), encoding=DEFAULT_SETTINGS.ENCODING)
def json_loader(data):
return json.loads(str(data, encoding=DEFAULT_SETTINGS.ENCODING))
class Protocol(object):
def __init__(self, reader=None, writer=None, *, loop=None, client_encryption=None):
self.reader=reader
self.writer=writer
self.loop=loop or asyncio.get_event_loop()
self.server_encryption = SERVER_encryption(DEFAULT_SETTINGS.ENCODING)
self.client_encryption = client_encryption or CLIENT_encryption(encoding=DEFAULT_SETTINGS.ENCODING)
self.jload = json_loader
self.jdump = json_dumper
@asyncio.coroutine
def send(self, method, *, drain=False, encrypt=True, **kwargs):
data = self.jdump({'method':method.upper(), **kwargs})
if encrypt: # we don't need to encrypt the data when we want to send the public key
data = self.server_encryption.encrypt(data) # the client wont be able to read the encrypted packet
self.writer.write(data)
if drain:
yield from self.writer.drain()
@asyncio.coroutine
def recv(self, dencrypt=True):
data = yield from self.reader.read(DEFAULT_SETTINGS.READ_SIZE)
if dencrypt:
data = self.client_encryption.dencrypt(data)
data = self.jload(data)
return data['method'], {k:i for k, i in data.items() if k != 'method'}
@asyncio.coroutine
def expected(self, *args, dencrypt=True):
method, _ = yield from self.recv(dencrypt)
if args and method not in method:
raise ValueError('expected %s recved %s' %(args, method))
return method, _
| 36.403846
| 111
| 0.661912
| 238
| 1,893
| 5.138655
| 0.327731
| 0.07359
| 0.075225
| 0.076043
| 0.057236
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242472
| 1,893
| 51
| 112
| 37.117647
| 0.852859
| 0.064976
| 0
| 0.078947
| 0
| 0
| 0.022727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.105263
| 0.052632
| 0.394737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f8f9e391109c41227336b2bb762cb77a40123c1
| 6,413
|
py
|
Python
|
src/harvester.py
|
bmoxon/azfinsim
|
3e203855410abd6c9636377b93ed5d33ac896c41
|
[
"MIT"
] | 5
|
2021-02-24T19:10:34.000Z
|
2022-02-24T21:11:24.000Z
|
src/harvester.py
|
bmoxon/azfinsim
|
3e203855410abd6c9636377b93ed5d33ac896c41
|
[
"MIT"
] | null | null | null |
src/harvester.py
|
bmoxon/azfinsim
|
3e203855410abd6c9636377b93ed5d33ac896c41
|
[
"MIT"
] | 2
|
2021-05-03T11:57:31.000Z
|
2021-12-09T10:24:29.000Z
|
#! /usr/bin/env python3
#-- harvest scheduler that runs on the compute pool nodes
import argparse
import time
import sys
import logging
import os
import psutil
from applicationinsights import TelemetryClient
from applicationinsights.logging import LoggingHandler
from getargs import getargs
import azlog
azlog.color=False
#-- Timeout between polling the harvest #cores api/file
HARVESTPOLLTIMEOUT = 30
#-- Executable to launch per cpu slot
#ENGINE="burn.sh" # (for testing)
ENGINE="/azfinsim/azfinsim.py"
#KVP_MONITOR="/var/lib/hyperv/.kvp_pool_0"
#-- mounted via: sudo docker run -v /var/lib/hyperv:/kvp -it mkharvestazcr.azurecr.io/azfinsim/azfinsimub1804
KVP_MONITOR="/kvp/.kvp_pool_0"
def read_harvest_cores() :
vcores = psutil.cpu_count(logical=True)
pcores = psutil.cpu_count(logical=False)
log.info("Polling Harvester: Physical Cores: %d Logical Cores: %d" % (pcores,vcores))
kvp=KVP_MONITOR
try:
f = open(kvp, "r")
str=f.read()
if (len(str) > 0):
str = str.replace("CurrentCoreCount","")
str = str.replace('\0','')
ncores = int(str.split('.')[0])
log.info("Harvest file %s has current physical core count: %d" % (kvp,ncores))
else:
ncores = vcores
log.warn("Harvest file %s is empty; using static vcore count: %d" % (kvp,ncores))
except OSError:
ncores = vcores
log.warn("Harvest file %s doesn't exist; using static vcore count: %d" % (kvp,ncores))
tc.track_metric('HARVESTCORES', ncores)
tc.flush()
return ncores
def spawn(ncores) :
env = {"PATH":"."}
args = ("null","null")
log.info("spawning %d processes" % ncores)
for i in range(ncores):
pid = os.fork()
if not pid:
try:
os.execvpe("burn.sh", args, env)
except OSError as e:
log.error("Exec failed: %s\n" % (e.strerror))
os._exit(1)
else:
pid = os.waitpid(pid,0)
def spawn_one(start_trade,trade_window,inputargs):
#path = os.environ['PATH']
argtup = tuple(inputargs)
pid = os.fork()
if not pid:
#-- child process
log.info("spawning new process %s: pid %d: start_trade=%d, ntrades=%d" % (ENGINE,os.getpid(),start_trade,trade_window))
#logging.info(argtup)
try:
os.execve(ENGINE, argtup, os.environ.copy())
except OSError as e:
log.error("Exec failed: %s\n" % (e.strerror))
os._exit(1)
#else:
#pid = os.waitpid(pid,0)
def replace_args(start_trade,trade_window,inputargs):
result = []
skip=False
for i in range(len(inputargs)):
if (skip==True):
skip=False
continue
if (inputargs[i]=='start_trade'):
result.append('start_trade')
result.append(str(start_trade))
skip=True
elif (inputargs[i]=='trade_window'):
result.append('trade_window')
result.append(str(trade_window))
skip=True
else:
result.append(inputargs[i])
skip=False
return(result)
#-- register the absolute start time
#launch=time.time_ns() #-- python3.8 only
launch=time.time()
log = azlog.getLogger(__name__)
if __name__ == "__main__":
#-- grab cli args: will be passed through to child processes
args = getargs("harvester")
#-- reformat args into a list of strings for execvpe
inputargs = []
inputargs.append(ENGINE) #-- first arg to execvpe() should be progname
for arg in vars(args):
#print(arg, getattr(args,arg))
val = str(getattr(args,arg))
arg=arg.replace("_","-")
inputargs.append(str("--" + arg)) #-- re-add the stripped "--" prefix
inputargs.append(val)
#print(inputargs)
#-- setup azure application insights handle for telemetry
tc = TelemetryClient("%s" % args.appinsights_key)
# set up logging - STDOUT & Azure AppInsights EventLog
#handler = LoggingHandler(args.appinsights_key)
#logging.basicConfig(
# format="%(asctime)s harvester: %(name)s %(threadName)-10.10s %(levelname)-5.5s %(message)s",
# handlers=[
# LoggingHandler(args.appinsights_key), #-- send to AZURE
# logging.StreamHandler(stream=sys.stdout) #-- send to STDOUT
# ],level=args.loglevel)
#-- log start time
log.info("TRADE %10d: LAUNCH : %d" % (args.start_trade,launch))
tc.track_metric('STARTTIME', launch)
tc.flush()
#-- get initial harvest core count
slots = read_harvest_cores()
log.info("%d x Cores available." % slots)
#-- calculate number of trades per process/batch/cpu
max_batch_size = 10
total_trades = args.trade_window
lastbatch = total_trades % max_batch_size
nbatchesfl = total_trades / max_batch_size
nbatches = int(nbatchesfl)
offset = args.start_trade
log.info("%d trades to process in this task (%.2f batches of %d)" % (total_trades,nbatchesfl,max_batch_size))
#-- Main loop: monitor harvest api/file & dispatch processes to available cores
batchesdone=0
trades_processed=0
while (batchesdone <= nbatches):
procs = psutil.Process().children()
gone, alive = psutil.wait_procs(procs,timeout=1,callback=None)
nprocs = len(alive)
freeslots = slots - nprocs
log.info("%d processes running on %d total slots: %d slots available." % (nprocs,slots,freeslots))
if (nprocs < slots):
for i in range(freeslots):
if (batchesdone == nbatches): batch_size = lastbatch
else: batch_size = max_batch_size
inputargs = replace_args(offset,batch_size,inputargs) # substitute the command line args
spawn_one(offset,batch_size,inputargs)
trades_processed += batch_size
offset += batch_size
batchesdone+=1
if (batch_size == lastbatch):
break
time.sleep(HARVESTPOLLTIMEOUT)
#-- re-read the harvest file - check if #slots has changed
slots = read_harvest_cores()
log.info("%d trades processed. No trades left to process; relinquishing cores" % trades_processed)
# flush all un-sent telemetry items
tc.flush()
#logging.shutdown()
#-- when all work done, exit and allow orchestration to recover node.
exit(0)
| 34.478495
| 127
| 0.626072
| 802
| 6,413
| 4.905237
| 0.34414
| 0.027453
| 0.015252
| 0.011439
| 0.11998
| 0.093035
| 0.084392
| 0.038129
| 0.038129
| 0.038129
| 0
| 0.007097
| 0.252924
| 6,413
| 186
| 128
| 34.478495
| 0.814026
| 0.24388
| 0
| 0.220472
| 0
| 0
| 0.151301
| 0.00437
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031496
| false
| 0
| 0.07874
| 0
| 0.11811
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f8faaab50ba1792d26b495c5cba37135b67c989
| 7,758
|
py
|
Python
|
old/model.py
|
samhippie/shallow-red
|
5690cdf380c6e138e25d88e85093738951438298
|
[
"MIT"
] | null | null | null |
old/model.py
|
samhippie/shallow-red
|
5690cdf380c6e138e25d88e85093738951438298
|
[
"MIT"
] | null | null | null |
old/model.py
|
samhippie/shallow-red
|
5690cdf380c6e138e25d88e85093738951438298
|
[
"MIT"
] | 1
|
2020-03-13T12:53:35.000Z
|
2020-03-13T12:53:35.000Z
|
#!/usr/bin/env python3
#loading tf is slow, so don't do it unless we're using it
USE_TENSORFLOW = False
import collections
import numpy as np
import os
import pickle
if USE_TENSORFLOW:
import tensorflow as tf
from tensorflow import keras
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import modelInput
#used to compare a trained model to a basic model for the same inputs
#can also be used if we want to train a model using the behavior of a basic model
class CombinedModel:
def __init__(self, trainedModel, basicModel):
self.trainedModel = trainedModel
self.basicModel = basicModel
#t controls output of getExpValue
#0 for basic model, 1 for trained, in between for weighted average
self.t = 0
self.compare = False
self.compPointsBasic = []
self.compPointsTrained = []
def getExpValue(self, stateHash=None, stateObj=None, action1=None, action2=None, bulk_input=None):
basicValue = self.basicModel.getExpValue(stateHash, stateObj, action1, action2, bulk_input)
trainedValue = self.trainedModel.getExpValue(stateHash, stateObj, action1, action2, bulk_input)
if type(basicValue) == list:
value = []
for i in range(len(basicValue)):
value.append([None if basicValue[i][0] == None else basicValue[i][0] * (1-self.t) + trainedValue[i][0] * self.t])
else:
value = None if basicValue == None else basicValue * (1-self.t) + trainedValue * self.t
if self.compare:
if type(basicValue) == list:
for i in range(len(basicValue)):
#None means basic has never seen it, so we have no good data
if basicValue[i][0] != None:
self.compPointsBasic.append(basicValue[i][0])
self.compPointsTrained.append(trainedValue[i][0])
else:
self.compPointsBasic.append(basicValue)
self.compPointsTrained.append(trainedValue)
return value
def addReward(self, *args):
self.basicModel.addReward(*args)
self.trainedModel.addReward(*args)
def train(self, epochs=1, batch_size=None):
self.trainedModel.train(epochs, batch_size)
def purge(self, seenStates):
self.basicModel.purge(seenStates)
self.trainedModel.purge(seenStates)
def getMSE(self, clear=False):
sum = 0
count = 0
for i in range(len(self.compPointsBasic)):
b = self.compPointsBasic[i]
t = self.compPointsTrained[i]
sum += (b - t) ** 2
count += 1
if clear:
self.compPointsBasic = []
self.compPointsTrained = []
self.compare = False
if count == 0:
return 0
else:
return sum / count
class TrainedModel:
def __init__(self, alpha=0.001, model=None, width=256):
self.alpha = alpha
if model == None:
#simple feedforward
inputs = keras.Input(modelInput.inputShape)
x = keras.layers.Dense(width, activation='relu')(inputs)
y = keras.layers.Dense(width, activation='relu')(x)
prediction = keras.layers.Dense(1, activation='sigmoid')(y)
self.model = keras.Model(inputs=inputs, outputs=prediction)
self._compile()
else:
self.model = model
#used for training
self.training = True
self.savedInputs = []
self.savedLabels = []
self.expValueCache = {}
def _compile(self):
self.model.compile(
optimizer=tf.train.AdamOptimizer(self.alpha),
loss='logcosh')
#uses the cached expValue if possible
#otherwise generates it, adds it to cache
def OLDgetExpValue(self, stateHash=None, stateObj=None, action1=None, action2=None, bulk_input=None):
if (stateHash, action1, action2) in self.expValueCache:
return self.expValueCache[(stateHash, action1, action2)]
value = self.genExpValue(stateHash, stateObj, action1, action2)
self.expValueCache[(stateHash, action1, action2)] = value
return value
#returns the expected value from the network
def getExpValue(self, stateHash=None, stateObj=None, action1=None, action2=None, bulk_input=None):
if bulk_input:
data = [modelInput.toInput(so, a1, a2) for _, so, a1, a2 in bulk_input]
return self.model.predict(np.array(data))
else:
data = modelInput.toInput(stateObj, action1, action2)
return self.model.predict(np.array([data]))[0][0]
#saves the data-label pair for training later
def addReward(self, stateHash, stateObj, action1, action2, reward):
if not self.training:
return
data = modelInput.toInput(stateObj, action1, action2)
self.savedInputs.append(data)
self.savedLabels.append(np.array([reward]))
#trains on all the saved data-label pairs, then removing
def train(self, epochs=1, batch_size=None):
self.model.fit(np.array(self.savedInputs),
np.array(self.savedLabels),
verbose=0,
epochs=epochs,
batch_size=batch_size)
self.savedInputs = []
self.savedLabels = []
self.expValueCache = {}
#this doesn't need to purge, as memory usage doesn't grow much
def purge(self, seenStates):
pass
#Save and load, also saves/loads the idMap from modeInput
#dir should not include a trailing /
def saveModel(self, dir, name):
self.model.save(dir + '/' + name + '-model.h5', include_optimizer=False)
idMapData = pickle.dumps(modelInput.idMap)
with open(dir + '/' + name + '-map.pickle', 'wb') as mapFile:
mapFile.write(idMapData)
def loadModel(self, dir, name):
self.model = keras.models.load_model(dir + '/' + name + '-model.h5', compile=False)
self._compile()
with open(dir + '/' + name + '-map.pickle', 'rb') as mapFile:
idMapData = mapFile.read()
modelInput.idMap = pickle.loads(idMapData)
class BasicModel:
def __init__(self):
self.rewardTable = collections.defaultdict(int)
self.countTable = collections.defaultdict(int)
#log holds a list of (stateHash, stateObj, action1, action2, reward) tuples
#so these can be written out at some point an analyzed
self.shouldLog = False
self.log = []
#returns the actual average reward for the (s,a,a) tuple
def getExpValue(self, stateHash=None, stateObj=None, action1=None, action2=None, bulk_input=None):
if bulk_input:
#have to make this look like it came out of tf
return [[self.getExpValue(*b, bulk_input=None)] for b in bulk_input]
if self.shouldLog:
self.log.append((stateHash, stateObj, action1, action2, reward))
cumReward = self.rewardTable[(stateHash, action1, action2)]
count = self.countTable[(stateHash, action1, action2)]
return None if count == 0 else cumReward / count
#adds the count and reward for the (s,a,a) tuple
def addReward(self, stateHash, stateObj, action1, action2, reward):
self.rewardTable[(stateHash, action1, action2)] += reward
self.countTable[(stateHash, action1, action2)] += 1
#removes information on states that haven't been seen
def purge(self, seenStates):
keys = list(self.rewardTable)
for key in keys:
stateHash = key[0]
if not stateHash in seenStates:
del self.rewardTable[key]
del self.countTable[key]
| 37.298077
| 130
| 0.620907
| 921
| 7,758
| 5.186754
| 0.256243
| 0.046891
| 0.041449
| 0.045426
| 0.305212
| 0.239481
| 0.14821
| 0.113042
| 0.082479
| 0.067406
| 0
| 0.014469
| 0.278422
| 7,758
| 207
| 131
| 37.478261
| 0.838871
| 0.143336
| 0
| 0.267123
| 0
| 0
| 0.01374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130137
| false
| 0.006849
| 0.047945
| 0
| 0.267123
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f9508b579771bc7e41b7b6de9c4a49ddf05f51e
| 3,368
|
py
|
Python
|
models/generatorUnet.py
|
ctyler9/cartoon-gan
|
48ec80cfcf23c6f30c5d1c446c12ff6f9c81afc8
|
[
"MIT"
] | 177
|
2020-01-31T08:32:07.000Z
|
2022-03-28T02:20:29.000Z
|
models/generatorUnet.py
|
ctyler9/cartoon-gan
|
48ec80cfcf23c6f30c5d1c446c12ff6f9c81afc8
|
[
"MIT"
] | 10
|
2020-06-26T04:46:26.000Z
|
2022-02-01T18:17:10.000Z
|
models/generatorUnet.py
|
ctyler9/cartoon-gan
|
48ec80cfcf23c6f30c5d1c446c12ff6f9c81afc8
|
[
"MIT"
] | 44
|
2020-03-11T17:21:51.000Z
|
2022-03-16T16:09:22.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_channels, out_channels):
super(Bottleneck, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, in_channels, 1, padding=0, bias=False),
nn.ReLU(inplace=True),
single_conv(in_channels, out_channels, 3),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 1, padding=0, bias=False),
)
def forward(self, x):
return F.relu(self.conv(x) + x, inplace=True)
class Up(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
Bottleneck(out_channels, out_channels)
)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.pool = nn.Sequential(
nn.AvgPool2d(2, 1),
nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
single_conv(in_channels, out_channels)
)
def forward(self, x):
return self.pool(x)
def single_conv(in_channels, out_channels, ks=3):
return nn.Sequential(
nn.ReflectionPad2d(ks//2),
nn.Conv2d(in_channels, out_channels, 3, bias=False),
nn.ReLU(inplace=True)
)
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = single_conv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 512)
self.res = nn.Sequential(
Bottleneck(512, 512),
Bottleneck(512, 512),
Bottleneck(512, 512),
)
self.up1 = Up(1024, 256, bilinear)
self.up2 = Up(512, 128, bilinear)
self.up3 = Up(256, 64, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = nn.Conv2d(64, n_classes, 1, padding=0)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x5 = self.res(x5)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
| 32.384615
| 101
| 0.561758
| 451
| 3,368
| 4.026608
| 0.190687
| 0.082599
| 0.104626
| 0.092511
| 0.378304
| 0.318282
| 0.189427
| 0.1663
| 0.124449
| 0.102423
| 0
| 0.065896
| 0.306116
| 3,368
| 104
| 102
| 32.384615
| 0.711168
| 0.003563
| 0
| 0.193182
| 0
| 0
| 0.002385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102273
| false
| 0
| 0.034091
| 0.034091
| 0.238636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1f964a207f38c7145c92fc77855d4848bb25de63
| 1,716
|
py
|
Python
|
app/calc/utility.py
|
sajeeshen/WebCalculatorAPI
|
d951e688e84741cc594877914d292fbddb4e9542
|
[
"MIT"
] | null | null | null |
app/calc/utility.py
|
sajeeshen/WebCalculatorAPI
|
d951e688e84741cc594877914d292fbddb4e9542
|
[
"MIT"
] | null | null | null |
app/calc/utility.py
|
sajeeshen/WebCalculatorAPI
|
d951e688e84741cc594877914d292fbddb4e9542
|
[
"MIT"
] | null | null | null |
import math
from datetime import datetime
AVAILABLE_ACTIONS = [{'action': 'add', 'admin_required': False,
'operator': '+'},
{'action': 'subtract', 'admin_required': False,
'operator': '-'},
{'action': 'multiply', 'admin_required': False,
'operator': '*'},
{'action': 'divide', 'admin_required': False,
'operator': '/'},
{'action': 'power', 'admin_required': True,
'operator': '**'},
{'action': 'sqrt', 'admin_required': True,
'operator': 'sqrt'},
]
def get_available_options(action):
"""
Go through the available options and find it, then return that object
:param action: string
:return: list
"""
return [obj for obj in AVAILABLE_ACTIONS
if obj['action'] == action.lower()]
def do_calculation(action, x, y):
"""
This function does all the calculation thig
:param action: string
:param x: int
:param y: int
:return: int ( the result )
"""
operator = get_available_options((action))[0]['operator']
ops = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x / y if y else 0,
'**': lambda x, y: x ** y,
'sqrt': lambda x, y: math.sqrt(int(x))
}
return ops[operator](int(x), int(y))
def get_current_month():
now = datetime.now()
return now.month
def get_current_year():
now = datetime.now()
return now.year
def get_current_date():
return datetime.now().date()
| 28.131148
| 73
| 0.501166
| 185
| 1,716
| 4.545946
| 0.318919
| 0.028537
| 0.057075
| 0.053508
| 0.26635
| 0.047562
| 0.047562
| 0.047562
| 0.047562
| 0.047562
| 0
| 0.001781
| 0.345571
| 1,716
| 60
| 74
| 28.6
| 0.747106
| 0.132284
| 0
| 0.054054
| 0
| 0
| 0.164575
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0
| 0.054054
| 0.027027
| 0.324324
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f06bad44169797de0c1276f26ece53ea110fad2
| 6,009
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/api/v1/models.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/api/v1/models.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/api/v1/models.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
""" API v1 models. """
import logging
from itertools import groupby
from django.db import transaction
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.course_modes.models import CourseMode
from lms.djangoapps.verify_student.models import VerificationDeadline
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = logging.getLogger(__name__)
UNDEFINED = object()
class Course:
""" Pseudo-course model used to group CourseMode objects. """
id = None # pylint: disable=invalid-name
modes = None
_deleted_modes = None
def __init__(self, id, modes, **kwargs): # pylint: disable=redefined-builtin
self.id = CourseKey.from_string(str(id)) # pylint: disable=invalid-name
self.modes = list(modes)
self.verification_deadline = UNDEFINED
if 'verification_deadline' in kwargs:
self.verification_deadline = kwargs['verification_deadline']
self._deleted_modes = []
@property
def name(self):
""" Return course name. """
course_id = CourseKey.from_string(str(self.id))
try:
return CourseOverview.get_from_id(course_id).display_name
except CourseOverview.DoesNotExist:
# NOTE (CCB): Ideally, the course modes table should only contain data for courses that exist in
# modulestore. If that is not the case, say for local development/testing, carry on without failure.
log.warning('Failed to retrieve CourseOverview for [%s]. Using empty course name.', course_id)
return None
def get_mode_display_name(self, mode):
""" Returns display name for the given mode. """
slug = mode.mode_slug.strip().lower()
if slug == 'credit':
return 'Credit'
if 'professional' in slug:
return 'Professional Education'
elif slug == 'verified':
return 'Verified Certificate'
elif slug == 'honor':
return 'Honor Certificate'
elif slug == 'audit':
return 'Audit'
return mode.mode_slug
@transaction.atomic
def save(self, *args, **kwargs): # pylint: disable=unused-argument
""" Save the CourseMode objects to the database. """
if self.verification_deadline is not UNDEFINED:
# Override the verification deadline for the course (not the individual modes)
# This will delete verification deadlines for the course if self.verification_deadline is null
VerificationDeadline.set_deadline(self.id, self.verification_deadline, is_explicit=True)
for mode in self.modes:
mode.course_id = self.id
mode.mode_display_name = self.get_mode_display_name(mode)
mode.save()
deleted_mode_ids = [mode.id for mode in self._deleted_modes]
CourseMode.objects.filter(id__in=deleted_mode_ids).delete()
self._deleted_modes = []
def update(self, attrs):
""" Update the model with external data (usually passed via API call). """
# There are possible downstream effects of settings self.verification_deadline to null,
# so don't assign it a value here unless it is specifically included in attrs.
if 'verification_deadline' in attrs:
self.verification_deadline = attrs.get('verification_deadline')
existing_modes = {mode.mode_slug: mode for mode in self.modes}
merged_modes = set()
merged_mode_keys = set()
for posted_mode in attrs.get('modes', []):
merged_mode = existing_modes.get(posted_mode.mode_slug, CourseMode())
merged_mode.course_id = self.id
merged_mode.mode_slug = posted_mode.mode_slug
merged_mode.mode_display_name = posted_mode.mode_slug
merged_mode.min_price = posted_mode.min_price
merged_mode.currency = posted_mode.currency
merged_mode.sku = posted_mode.sku
merged_mode.bulk_sku = posted_mode.bulk_sku
merged_mode.expiration_datetime = posted_mode.expiration_datetime
merged_mode.save()
merged_modes.add(merged_mode)
merged_mode_keys.add(merged_mode.mode_slug)
# Masters degrees are not sold through the eCommerce site.
# So, Masters course modes are not included in PUT calls to this API,
# and their omission which would normally cause them to be deleted.
# We don't want that to happen, but for the time being,
# we cannot include in Masters modes in the PUT calls from eCommerce.
# So, here's hack to handle Masters course modes, along with any other
# modes that end up in that boat.
MODES_TO_NOT_DELETE = {
CourseMode.MASTERS,
}
modes_to_delete = set(existing_modes.keys()) - merged_mode_keys
modes_to_delete -= MODES_TO_NOT_DELETE
self._deleted_modes = [existing_modes[mode] for mode in modes_to_delete]
self.modes = list(merged_modes)
@classmethod
def get(cls, course_id):
""" Retrieve a single course. """
try:
course_id = CourseKey.from_string(str(course_id))
except InvalidKeyError:
log.debug('[%s] is not a valid course key.', course_id)
raise ValueError # lint-amnesty, pylint: disable=raise-missing-from
course_modes = CourseMode.objects.filter(course_id=course_id)
if course_modes:
verification_deadline = VerificationDeadline.deadline_for_course(course_id)
return cls(course_id, list(course_modes), verification_deadline=verification_deadline)
return None
@classmethod
def iterator(cls):
""" Generator that yields all courses. """
course_modes = CourseMode.objects.order_by('course_id')
for course_id, modes in groupby(course_modes, lambda o: o.course_id):
yield cls(course_id, list(modes))
| 40.328859
| 112
| 0.669496
| 748
| 6,009
| 5.183155
| 0.295455
| 0.035079
| 0.024761
| 0.01625
| 0.069126
| 0.02992
| 0
| 0
| 0
| 0
| 0
| 0.000222
| 0.251955
| 6,009
| 148
| 113
| 40.601351
| 0.862291
| 0.237144
| 0
| 0.083333
| 0
| 0
| 0.067065
| 0.018592
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072917
| false
| 0
| 0.083333
| 0
| 0.302083
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f0914ec0565214e9bbc4b09ca688ebda76940dd
| 3,428
|
py
|
Python
|
training_v1_backup/training/PPO/run_ppo.py
|
prasoonpatidar/multiagentRL-resource-sharing
|
e63ba7fc3c7ab019e9fd109cd45b739e3322152f
|
[
"MIT"
] | null | null | null |
training_v1_backup/training/PPO/run_ppo.py
|
prasoonpatidar/multiagentRL-resource-sharing
|
e63ba7fc3c7ab019e9fd109cd45b739e3322152f
|
[
"MIT"
] | null | null | null |
training_v1_backup/training/PPO/run_ppo.py
|
prasoonpatidar/multiagentRL-resource-sharing
|
e63ba7fc3c7ab019e9fd109cd45b739e3322152f
|
[
"MIT"
] | null | null | null |
'''
Wrapper function to run PPO algorithm for training
'''
import numpy as np
import matplotlib.pyplot as plt
import time
import math
import logging
from scipy.optimize import minimize, LinearConstraint
# custom libraries
from training.PPO.run_helper import buyerPenaltiesCalculator, buyerUtilitiesCalculator, evaluation
from training.PPO.run_helper import logger_handle, initialize_agent, get_ys, choose_prob, cumlativeBuyerExp, getPurchases
def learn_policy(run_config, seller_info, buyer_info, train_config, logger_pass):
# Initialize the logger
logger = logger_handle(logger_pass)
# get required parameters for WolFPHC algorithm
aux_price_min = 1 / seller_info.max_price
aux_price_max = 1 / seller_info.min_price
logger.info("Fetched raw market information..")
# initialize seller agents
sellers, logger = initialize_agent(seller_info, buyer_info, train_config, logger)
# Get Containers to record history(Interesting insight: append in python list is O(1))
price_history = []
purchase_history = []
provided_resource_history = []
seller_utility_history = []
seller_penalty_history = []
buyer_utility_history = []
buyer_penalty_history = []
# Start Loop for training
logger.info("Starting training iterations...")
start_time = time.time()
for train_iter in range(0, train_config.iterations):
if train_iter % 1000 == 0:
logger.info("Finished %d training iterations in %.3f secs..." % (train_iter, time.time() - start_time))
# get the prices for all seller agents
ys = get_ys(sellers, train_config, seller_info)
# print(ys, '==', train_iter)
probAll, yAll = choose_prob(ys, compare=False, yAll=None)
# Save prices in history
prices = 1 / ys
price_history.append(prices)
cumulativeBuyerExperience = cumlativeBuyerExp(buyer_info, sellers)
X = getPurchases(buyer_info, cumulativeBuyerExperience, ys, probAll)
# Save purchased history
purchases = X.sum(axis=0)
purchase_history.append(purchases)
# Get Buyer utilities and penalties in history
buyerUtilities = buyerUtilitiesCalculator(X, ys, buyer_info.V, buyer_info.a_val, probAll,
buyer_info.count,
cumulativeBuyerExperience, buyer_info.unfinished_task_penalty)
buyer_utility_history.append(buyerUtilities)
buyerPenalties = buyerPenaltiesCalculator(X, ys, buyer_info.V, buyer_info.a_val, buyer_info.count,
cumulativeBuyerExperience, buyer_info.unfinished_task_penalty)
buyer_penalty_history.append(buyerPenalties)
# loop parameters
lr = 1 / (20 + train_iter)
seller_utilities, seller_penalties, seller_provided_resources = evaluation(sellers, train_config, yAll, X, lr, train=True)
# Get seller utilties and penalties in history
seller_utilities = np.array(seller_utilities)
seller_penalties = np.array(seller_penalties)
seller_utility_history.append(seller_utilities)
seller_penalty_history.append(seller_penalties)
# update provided resources history
seller_provided_resources = np.array(seller_provided_resources)
provided_resource_history.append(seller_provided_resources)
...
| 38.516854
| 130
| 0.698658
| 388
| 3,428
| 5.927835
| 0.322165
| 0.046957
| 0.04
| 0.015652
| 0.144348
| 0.144348
| 0.118261
| 0.086957
| 0.086957
| 0.064348
| 0
| 0.005678
| 0.229288
| 3,428
| 88
| 131
| 38.954545
| 0.864875
| 0.151984
| 0
| 0.04
| 0
| 0
| 0.038141
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0.04
| 0.16
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f0957f3db94b5ef71452361a51b110a5a627030
| 14,927
|
py
|
Python
|
mlprogram/entrypoint/train.py
|
HiroakiMikami/mlprogram
|
573e94c567064705fa65267dd83946bf183197de
|
[
"MIT"
] | 9
|
2020-05-24T11:25:01.000Z
|
2022-03-28T15:32:10.000Z
|
mlprogram/entrypoint/train.py
|
HiroakiMikami/mlprogram
|
573e94c567064705fa65267dd83946bf183197de
|
[
"MIT"
] | 87
|
2020-05-09T08:56:55.000Z
|
2022-03-31T14:46:45.000Z
|
mlprogram/entrypoint/train.py
|
HiroakiMikami/NL2Prog
|
573e94c567064705fa65267dd83946bf183197de
|
[
"MIT"
] | 3
|
2021-02-22T20:38:29.000Z
|
2021-11-11T18:48:44.000Z
|
import os
import traceback
from dataclasses import dataclass
from typing import Any, Callable, List, Optional, Union
import pytorch_pfn_extras as ppe
import torch
from pytorch_pfn_extras.training import extension, extensions
from torch import nn
from torch.utils.data import DataLoader
from mlprogram import distributed, logging
from mlprogram.builtins import Environment
from mlprogram.pytorch_pfn_extras import SaveTopKModel, StopByThreshold
from mlprogram.synthesizers import Synthesizer
logger = logging.Logger(__name__)
@dataclass
class Epoch:
n: int
def n_iter(self, iter_per_epoch: int) -> int:
return self.n * iter_per_epoch
@dataclass
class Iteration:
n: int
def n_iter(self, iter_per_epoch: int) -> int:
return self.n
Length = Union[Epoch, Iteration]
class Trigger:
def __init__(self, interval: int, n_iter: int):
self.interval = interval
self.n_iter = n_iter
def __call__(self, manager):
return (manager.iteration == self.n_iter) or \
(manager.iteration % self.interval == 0)
class Call(extension.Extension):
def __init__(self, f: Callable[[], None]):
super().__init__()
self.f = f
def __call__(self, manager):
self.f()
def create_extensions_manager(n_iter: int, evaluation_interval_iter: int,
snapshot_interval_iter: int,
iter_per_epoch: int,
model: nn.Module,
optimizer: torch.optim.Optimizer,
evaluate: Optional[Callable[[], None]],
metric: str, maximize: bool,
threshold: Optional[float],
output_dir: str,
report_metrics: Optional[List[str]] = None):
model_dir = os.path.join(output_dir, "model")
logger.info("Prepare pytorch-pfn-extras")
manager = ppe.training.ExtensionsManager(
model, optimizer, n_iter / iter_per_epoch,
out_dir=os.path.join(output_dir),
extensions=[],
iters_per_epoch=iter_per_epoch,
)
manager.extend(
extensions.FailOnNonNumber(),
trigger=Trigger(evaluation_interval_iter, n_iter)
)
if evaluate is not None:
manager.extend(
Call(evaluate),
trigger=Trigger(evaluation_interval_iter, n_iter),
)
if distributed.is_main_process():
manager.extend(
extensions.LogReport(
trigger=Trigger(100, n_iter),
filename="log.json",
)
)
manager.extend(extensions.ProgressBar())
manager.extend(
SaveTopKModel(model_dir, 1, metric, model, maximize=maximize),
trigger=Trigger(evaluation_interval_iter, n_iter),
)
metrics = report_metrics or []
manager.extend(
extensions.PrintReport(entries=[
"loss", *metrics,
"iteration", "epoch",
"time.iteration", "gpu.time.iteration", "elapsed_time"
]),
trigger=Trigger(100, n_iter),
)
if threshold is not None:
manager.extend(
StopByThreshold(metric, threshold, maximize=maximize),
trigger=Trigger(evaluation_interval_iter, n_iter),
)
if distributed.is_initialized():
snapshot = extensions.snapshot(autoload=True, n_retains=1,
saver_rank=0)
snapshot._rank = distributed.rank()
snapshot._size = distributed.size()
snapshot._local_rank = distributed.rank()
else:
snapshot = extensions.snapshot(autoload=True, n_retains=1)
manager.extend(snapshot, trigger=Trigger(snapshot_interval_iter, n_iter))
return manager
def create_dataloader(dataset: torch.utils.data.Dataset,
batch_size: int, n_worker: int, collate_fn: Callable) \
-> torch.utils.data.DataLoader:
if hasattr(dataset, "__len__"):
is_iterable = False
else:
is_iterable = True
if is_iterable:
return DataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=n_worker,
collate_fn=collate_fn)
else:
return DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=n_worker,
collate_fn=collate_fn)
def get_world_process_group(device: torch.device) \
-> Optional[torch.distributed.group]:
if not distributed.is_initialized():
return None
else:
if device.type == "cuda":
return distributed.groups["world_nccl"]
else:
return distributed.groups["world_gloo"]
def setup_distributed_training(
model: nn.Module,
loss: nn.Module,
group: torch.distributed.group
):
class TrainModule(nn.Module):
def __init__(self, model: nn.Module, loss: nn.Module):
super().__init__()
self.model = model
self.loss = loss
def forward(self, *args, **kwargs):
return self.loss(self.model(*args, **kwargs))
model = TrainModule(model, loss)
if group is None:
return model
else:
return ppe.nn.parallel.distributed.DistributedDataParallel(
module=model,
process_group=group,
)
def save_results(output_dir: str,
model: nn.Module, optimizer: torch.optim.Optimizer) -> None:
if distributed.is_main_process():
logger.info("Dump the last model")
torch.save(model.state_dict(), os.path.join(output_dir, "model.pt"))
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
def train_supervised(output_dir: str,
dataset: torch.utils.data.Dataset,
model: nn.Module,
optimizer: torch.optim.Optimizer,
loss: Callable[[Any], torch.Tensor],
evaluate: Optional[Callable[[], None]],
metric: str,
collate: Callable[[List[Any]], Any],
batch_size: int,
length: Length,
evaluation_interval: Optional[Length] = None,
snapshot_interval: Optional[Length] = None,
maximize: bool = True,
threshold: Optional[float] = None,
n_dataloader_worker: int = 1,
device: torch.device = torch.device("cpu")) \
-> None:
logger.info("Prepare model")
model.to(device)
model.train()
group = get_world_process_group(device)
global_batch_size = batch_size * distributed.size(group)
if hasattr(dataset, "__len__"):
iter_per_epoch = len(dataset) // global_batch_size
else:
iter_per_epoch = 1
evaluation_interval = evaluation_interval or Epoch(1)
snapshot_interval = snapshot_interval or Epoch(1)
n_iter = length.n_iter(iter_per_epoch)
evaluation_interval_iter = evaluation_interval.n_iter(iter_per_epoch)
snapshot_interval_iter = snapshot_interval.n_iter(iter_per_epoch)
# Initialize extensions manager
manager = \
create_extensions_manager(
n_iter, evaluation_interval_iter, snapshot_interval_iter,
iter_per_epoch,
model, optimizer,
evaluate, metric, maximize, threshold, output_dir)
train_model = setup_distributed_training(model, loss, group)
logger.info("Start training")
try:
while manager.iteration < n_iter:
loader = create_dataloader(dataset, batch_size, n_dataloader_worker,
collate)
for batch in logger.iterable_block("iteration", loader, True):
if manager.iteration >= n_iter:
break
if len(batch.to_dict()) == 0:
logger.warning(f"Skip {manager.iteration} th batch")
continue
with manager.run_iteration():
train_model.train()
with logger.block("to"):
batch.to(device=device)
with logger.block("forward"):
bloss = train_model(batch)
with logger.block("backward"):
optimizer.zero_grad(set_to_none=True)
bloss.backward()
with logger.block("optimizer.step"):
optimizer.step()
ppe.reporting.report({"loss": bloss.item()})
logger.dump_elapsed_time_log()
if device.type == "cuda":
ppe.reporting.report({
"gpu.max_memory_allocated":
torch.cuda.max_memory_allocated(device)
})
except RuntimeError as e: # noqa
logger.critical(traceback.format_exc())
save_results(output_dir, model, optimizer)
def train_REINFORCE(input_dir: str, output_dir: str,
dataset: torch.utils.data.Dataset,
synthesizer: Synthesizer,
model: nn.Module,
optimizer: torch.optim.Optimizer,
loss: Callable[[Any], torch.Tensor],
evaluate: Optional[Callable[[], None]],
metric: str,
reward: Callable[[Environment, Any], float],
collate: Callable[[List[Any]], Any],
batch_size: int,
n_rollout: int,
length: Length,
evaluation_interval: Optional[Length] = None,
snapshot_interval: Optional[Length] = None,
maximize: bool = True,
threshold: Optional[float] = None,
use_pretrained_model: bool = False,
use_pretrained_optimizer: bool = False,
n_dataloader_worker: int = 2,
device: torch.device = torch.device("cpu")) \
-> None:
logger.info("Prepare model")
model.to(device)
model.train()
group = get_world_process_group(device)
if hasattr(dataset, "__len__"):
iter_per_epoch = len(dataset) // batch_size
else:
iter_per_epoch = 1
evaluation_interval = evaluation_interval or Epoch(1)
snapshot_interval = snapshot_interval or Epoch(1)
n_iter = length.n_iter(iter_per_epoch)
evaluation_interval_iter = evaluation_interval.n_iter(iter_per_epoch)
snapshot_interval_iter = snapshot_interval.n_iter(iter_per_epoch)
if use_pretrained_model:
logger.info("Load pretrained model")
pretrained_model = os.path.join(input_dir, "model.pt")
state_dict = torch.load(pretrained_model,
map_location=torch.device("cpu"))
model.load_state_dict(state_dict)
if use_pretrained_optimizer:
logger.info("Load pretrained optimizer")
pretrained_optimizer = os.path.join(input_dir, "optimizer.pt")
state_dict = torch.load(pretrained_optimizer,
map_location=torch.device("cpu"))
optimizer.load_state_dict(state_dict)
# Initialize extensions manager
manager = \
create_extensions_manager(
n_iter, evaluation_interval_iter, snapshot_interval_iter,
iter_per_epoch,
model, optimizer,
evaluate, metric, maximize, threshold, output_dir,
report_metrics=["reward"])
train_model = setup_distributed_training(model, loss, group)
logger.info("Start training")
try:
while manager.iteration < n_iter:
loader = create_dataloader(dataset, batch_size, n_dataloader_worker,
lambda x: x)
for samples in logger.iterable_block("iteration", loader, True):
if manager.iteration >= n_iter:
break
# Rollout
rollouts = []
train_model.train()
with torch.no_grad():
for sample in logger.iterable_block("rollout", samples):
sample_inputs = sample.clone_without_supervision()
sample_inputs.to(device)
for rollout in logger.iterable_block(
"sample",
synthesizer(sample_inputs,
n_required_output=n_rollout)):
if not rollout.is_finished:
continue
for _ in range(rollout.num):
output = sample.clone()
output["ground_truth"] = rollout.output
output.mark_as_supervision("ground_truth")
output["reward"] = \
torch.tensor(reward(sample.clone(), rollout.output))
rollouts.append(output)
if len(rollouts) == 0:
logger.warning("No rollout")
continue
if len(rollouts) != n_rollout:
logger.warning(
"#rollout is unexpected: "
f"expected={n_rollout} actual={len(rollouts)}")
with manager.run_iteration():
model.train()
with logger.block("collate"):
batch2 = collate(rollouts)
with logger.block("to"):
batch2.to(device)
with logger.block("forward"):
train_model.train()
bloss = train_model(batch2)
with logger.block("backward"):
optimizer.zero_grad(set_to_none=True)
bloss.backward()
with logger.block("optimizer.step"):
optimizer.step()
ppe.reporting.report({"loss": bloss.item()})
ppe.reporting.report({
"reward": batch2["reward"].float().mean().item()
})
logger.dump_elapsed_time_log()
if device.type == "cuda":
ppe.reporting.report({
"gpu.max_memory_allocated":
torch.cuda.max_memory_allocated(device)
})
except RuntimeError as e: # noqa
logger.critical(traceback.format_exc())
save_results(output_dir, model, optimizer)
| 37.599496
| 88
| 0.554231
| 1,469
| 14,927
| 5.397549
| 0.1484
| 0.018918
| 0.027242
| 0.018161
| 0.53752
| 0.479506
| 0.454156
| 0.436751
| 0.379493
| 0.359945
| 0
| 0.002602
| 0.356401
| 14,927
| 396
| 89
| 37.694444
| 0.822733
| 0.005158
| 0
| 0.486647
| 0
| 0
| 0.043048
| 0.004716
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04451
| false
| 0
| 0.038576
| 0.011869
| 0.139466
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f09b816cae5d16accf1cca62376da23fd995e52
| 3,381
|
py
|
Python
|
visualization.py
|
aditya-srikanth/Data-Mining-Assignment-3
|
7dc44d7ca8884680130db9b52a75e3036cf2f8a7
|
[
"MIT"
] | null | null | null |
visualization.py
|
aditya-srikanth/Data-Mining-Assignment-3
|
7dc44d7ca8884680130db9b52a75e3036cf2f8a7
|
[
"MIT"
] | null | null | null |
visualization.py
|
aditya-srikanth/Data-Mining-Assignment-3
|
7dc44d7ca8884680130db9b52a75e3036cf2f8a7
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import math
import numpy as np
class Visualization:
"""
This class contains methods for reducing the dimensions of the points to 2-D and
visualization of the reduced points.
Attributes
----------
OUTLIERS : list
List of points marked as outliers.
NON_OUTLIERS : list
List of points that are not marked as outliers.
"""
def __init__(self):
self.OUTLIERS = []
self.NON_OUTLIERS = []
self.K = 1
def dimension_reduction(self, point):
"""
This method is used for reducing the dimensions of the given point to 2-D.
Parameters
----------
point : list
A list of coordinates representing an n-dimensional vector.
Returns
-------
type list
A list representing a 2-D point in the x-y plane.
"""
temp_point = []
reduced_point = [0,0]
index = 1
for element in point:
if not math.isnan(element % index):
# Using modulo operation to spread values of coordinates.
temp_point.append(element % index)
index = index + 1
for element in temp_point:
# The modulo results are distributed among the two coordinates according to
# their divisibilty by 2.
if element % 2 == 0:
reduced_point[1] = reduced_point[1] + element
else:
reduced_point[0] = reduced_point[0] + element
reduced_point[0] = round(reduced_point[0], 2)
reduced_point[1] = round(reduced_point[1], 2)
return reduced_point
def outlier_plot(self,save_path=None):
"""
This mehtod takes the points marked as outliers and non-outliers and plots them as
a scatter plot.
Returns
-------
None
The result of this method is a matplotlib scatter plot.
"""
for element in self.OUTLIERS:
plt.scatter(element[0], element[0], facecolors='none', edgecolors='r', marker='o')
for element in self.NON_OUTLIERS:
plt.scatter(element[0], element[1], facecolors='none', edgecolors='b', marker = 'o')
plt.xlabel("K = " + str(self.K))
if save_path != None:
plt.savefig(save_path+'.png')
else:
plt.show()
def outlier_plot_numpy(self,save_path=None):
"""
This mehtod takes the points marked as outliers and non-outliers and plots them as
a scatter plot.
Returns
-------
None
The result of this method is a matplotlib scatter plot.
"""
if len(self.OUTLIERS) > 0:
self.OUTLIERS = np.array(self.OUTLIERS)
plt.scatter(self.OUTLIERS[:,0],self.OUTLIERS[:,0], facecolors='none', edgecolors='r', marker='o')
if len(self.NON_OUTLIERS) > 0:
self.NON_OUTLIERS = np.array(self.NON_OUTLIERS)
plt.scatter(self.NON_OUTLIERS[:,0], self.NON_OUTLIERS[:,1], facecolors='none', edgecolors='b', marker = 'o')
# plt.xlabel("K = " + str(self.K))
if save_path != None:
plt.savefig(save_path+'.png')
else:
plt.show()
| 34.85567
| 121
| 0.55102
| 405
| 3,381
| 4.51358
| 0.259259
| 0.060175
| 0.05744
| 0.036105
| 0.501094
| 0.410284
| 0.342451
| 0.272429
| 0.272429
| 0.272429
| 0
| 0.014072
| 0.348418
| 3,381
| 97
| 122
| 34.85567
| 0.815706
| 0.317658
| 0
| 0.2
| 0
| 0
| 0.018443
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f0df6e28987fcaa913b236b22575fcae954bfe4
| 3,639
|
py
|
Python
|
robotidy/transformers/ext_ExtraIndentForKeywordArguments.py
|
josflorap/robotframework-tidy
|
9d4e1ccc6a50c415187468305235830f80f3373b
|
[
"Apache-2.0"
] | null | null | null |
robotidy/transformers/ext_ExtraIndentForKeywordArguments.py
|
josflorap/robotframework-tidy
|
9d4e1ccc6a50c415187468305235830f80f3373b
|
[
"Apache-2.0"
] | null | null | null |
robotidy/transformers/ext_ExtraIndentForKeywordArguments.py
|
josflorap/robotframework-tidy
|
9d4e1ccc6a50c415187468305235830f80f3373b
|
[
"Apache-2.0"
] | null | null | null |
from robot.api.parsing import ModelTransformer, get_model, ModelVisitor, Token
import os, sys
keywordlist = []
other_keywords = []
used_keywords = []
class ext_ExtraIndentForKeywordArguments(ModelTransformer):
def __init__(self):
self.cont = 0
def visit_File(self, node):
# Get keywords in python libraries
for path in sys.path:
if 'site-packages' in path:
goodpath = path
for path, subdirs, files in os.walk(goodpath.replace('\\', '\\\\')):
for name in files:
if '.py' in name and '.pyc' not in name and '_init_' not in name and ('robot' in path or 'wslw' in path or 'gurux' in path):
# print(os.path.join(path, name))
with open(os.path.join(path, name), 'r', errors='ignore') as f:
for line in f.readlines():
if 'def' == line.lstrip()[0:3] and '__init__' not in line:
# print(line.split('def')[1].split('(')[0].lstrip().rstrip())
other_keywords.append(line.split('def')[1].split('(')[0].lstrip().rstrip().lower().replace('_', ' '))
# Get keywords in resource files
for path, subdirs, files in os.walk(os.getcwd().replace('in_dev', 'keywords').replace('\\', '\\\\')):
for name in files:
if('.robot' in name):
# print(os.path.join(path, name))
model = get_model(os.path.join(path, name))
printer = TestNamePrinter()
printer.visit(model)
# Get keywords in the Keywords section
model = get_model(node.source)
printer = TestNamePrinter()
printer.visit(model)
# Get keywords used in the test
model = get_model(node.source)
printer = KeywordsNamePrinter()
printer.visit(model)
self.generic_visit(node)
def visit_KeywordCall(self, node):
keywords_name = [sec[0].value for sec in used_keywords]
for token in node.data_tokens:
for i, sec in enumerate(used_keywords[:-1]):
if token.lineno >= sec[1] and token.lineno < used_keywords[i + 1][1]:
# print(repr(token) + ' va con seccion: ' + sec[0].value + ' y indent_level: ' + str(sec[3]))
if token.type == Token.ARGUMENT and token.value in keywords_name:
token.value = ' ' * 4*(sec[3] - 1) + token.value
elif token.type == Token.ARGUMENT and token.value not in keywords_name:
token.value = ' ' * 4*(sec[3]) + token.value
return node
class TestNamePrinter(ModelVisitor):
def visit_KeywordName(self, node):
# print(node.name)
keywordlist.append(node.name.lower())
class KeywordsNamePrinter(ModelVisitor):
def visit_KeywordCall(self, node):
for token in node.data_tokens:
if((token.value.lower() in keywordlist or token.value.lower() in other_keywords) and token.type == Token.KEYWORD):
used_keywords.append([token, token.lineno, True, 0])
# print(repr(token) + ' ES KEYWORD RECONOCIDA')
elif((token.value.lower() in keywordlist or token.value.lower() in other_keywords) and token.type == Token.ARGUMENT):
extra_indent_level = used_keywords[-1][3] + 1
used_keywords.append([token, token.lineno, False, extra_indent_level])
# print(repr(token) + ' ES KEYWORD NO RECONOCIDA' + ' extra_indent_level: ' + str(used_keywords[-1][3]))
| 50.541667
| 140
| 0.569387
| 437
| 3,639
| 4.636156
| 0.242563
| 0.049358
| 0.019743
| 0.027641
| 0.441757
| 0.37463
| 0.245805
| 0.135242
| 0.076012
| 0.076012
| 0
| 0.009858
| 0.303105
| 3,639
| 71
| 141
| 51.253521
| 0.789038
| 0.141797
| 0
| 0.245283
| 0
| 0
| 0.031501
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09434
| false
| 0
| 0.037736
| 0
| 0.207547
| 0.113208
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f0e2ccc0b7fb78f69f72c37d56b7289930132ef
| 6,581
|
py
|
Python
|
Common/Strategies/TechIndicators/MacdStrategy.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 2
|
2020-03-04T11:18:38.000Z
|
2020-05-10T15:36:42.000Z
|
Common/Strategies/TechIndicators/MacdStrategy.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 6
|
2020-03-30T16:42:47.000Z
|
2021-12-13T20:37:21.000Z
|
Common/Strategies/TechIndicators/MacdStrategy.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 1
|
2020-04-14T11:26:16.000Z
|
2020-04-14T11:26:16.000Z
|
from typing import Tuple
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from Common.Strategies.TechIndicators.AbstractTechStrategy import AbstractTechStrategy
from Common.TechIndicators.MacdIndicator import MacdIndicator
class MacdStrategy(AbstractTechStrategy):
_macd_indicator: MacdIndicator
_summary: pd.DataFrame
def __init__(self, macd_indicator: MacdIndicator):
self._macd_indicator = macd_indicator
a_df: pd.DataFrame = self._macd_indicator.GetData()
self._col = self._macd_indicator.Column
self._lower_label = a_df.columns[self._macd_indicator.LowMedHighTuple[0]]
#
self._upper_label = a_df.columns[self._macd_indicator.LowMedHighTuple[1]]
self._data = a_df[self._macd_indicator.Column].to_frame()
self._data[self._lower_label] = a_df[self._lower_label]
#
self._data[self._upper_label] = a_df[self._upper_label]
self._buy_label = self._macd_indicator.Label + self._buy_label
self._sell_label = self._macd_indicator.Label + self._sell_label
buyNsellTuple = self._buyNsell()
self._data[self._buy_label] = buyNsellTuple[0]
self._data[self._sell_label] = buyNsellTuple[1]
print('DATA', self._data.columns)
self._setSummary()
@property
def Summary(self):
return self._summary
def PlotAx(self, ax: object) -> object:
for a_ind, col in enumerate(self._data.columns[0:1]):
an_alpha: float = 1.0 if a_ind == 0 else 0.3
self._data[col].plot(alpha=an_alpha, ax=ax)
ax.scatter(self._macd_indicator.GetData().index, self._data[self._buy_label], label=self._buy_label, marker='^',
color='green')
ax.scatter(self._macd_indicator.GetData().index, self._data[self._sell_label], label=self._sell_label,
marker='v', color='red')
return ax
def Plot(self) -> plt:
plt.figure(figsize=self._macd_indicator.FigSizeTuple)
plt.style.use(self._macd_indicator.FigStyle)
for a_ind, col in enumerate(self._data.columns[0:1]):
an_alpha: float = 1.0 if a_ind == 0 else 0.3
self._data[col].plot(alpha=an_alpha)
print('i', an_alpha)
plt.scatter(self._macd_indicator.GetData().index, self._data[self._buy_label], label=self._buy_label,
marker='^', color='green')
plt.scatter(self._macd_indicator.GetData().index, self._data[self._sell_label], label=self._sell_label,
marker='v', color='red')
plt.title(self._macd_indicator.LabelMain)
plt.xlabel(self._macd_indicator.LabelX)
plt.xticks(rotation=self._macd_indicator.LabelXangle)
plt.ylabel(self._macd_indicator.LabelY)
plt.legend(loc=self._macd_indicator.LegendPlace)
plt.tight_layout()
return plt
def PlotAll(self) -> plt:
n_col: int = 1
n_row: int = 3
a_title: str = self._macd_indicator.LabelMain
x_title: str = self._macd_indicator.LabelX
y_title: str = self._macd_indicator.LabelY
f_size: Tuple[float, float] = (self._macd_indicator.FigSizeTuple[0], self._macd_indicator.FigSizeTuple[0])
fig, ax = plt.subplots(n_row, n_col, figsize=f_size, sharex=True)
plt.style.use(self._macd_indicator.FigStyle)
# ax0 strategy
for a_ind, col in enumerate(self._data.columns[0:1]):
an_alpha: float = 1.0 if a_ind == 0 else 0.3
ax[0].plot(self._data[col], alpha=an_alpha, label=col)
ax[0].scatter(self._macd_indicator.GetData().index, self._data[self._buy_label], marker='^', color='green',
label=self._buy_label)
ax[0].scatter(self._macd_indicator.GetData().index, self._data[self._sell_label], marker='v', color='red',
label=self._sell_label)
ax[0].set(ylabel=y_title, title=a_title)
ax[0].legend(loc=self._macd_indicator.LegendPlace)
# ax1 index
for a_ind, col in enumerate(self._macd_indicator.GetData().columns[-2:self._macd_indicator.GetData().columns.size]):
an_alpha: float = 0.5 if a_ind != 0 else 1.0
ax[1].plot(self._macd_indicator.GetData()[col], alpha=an_alpha, label=col)
#ax[1].xaxis.set_tick_params(rotation=self._macd_indicator.LabelXangle)
ax[1].set(ylabel='Index')
ax[1].legend(loc=self._macd_indicator.LegendPlace)
# ax2
ax[2].plot(self._summary, alpha=an_alpha)
ax[2].legend(loc=self._macd_indicator.LegendPlace)
ax[2].xaxis.set_tick_params(rotation=self._macd_indicator.LabelXangle)
ax[2].set(ylabel='Buy & Sell', xlabel=x_title)
plt.tight_layout()
return plt
def _buyNsell(self):
buySignal = []
sellSignal = []
flag = -1
for i in range(len(self._data)):
if self._data[self._lower_label][i] > self._data[self._upper_label][i]:
sellSignal.append(np.nan)
if flag != 1:
buySignal.append(self._data[self._col][i])
flag = 1
else:
buySignal.append(np.nan)
elif self._data[self._lower_label][i] < self._data[self._upper_label][i]:
buySignal.append(np.nan)
if flag != 0:
sellSignal.append(self._data[self._col][i])
flag = 0
else:
sellSignal.append(np.nan)
else:
buySignal.append(np.nan)
sellSignal.append(np.nan)
return buySignal, sellSignal
def _setSummary(self):
self._summary = pd.DataFrame(index=self._data.index)
self._summary['Buy'] = self._data[self._buy_label].replace(np.nan, 0)
self._summary['Buy'][self._summary['Buy'] > 0] = 1
self._summary['Sell'] = self._data[self._sell_label].replace(np.nan, 0)
self._summary['Sell'][self._summary['Sell'] > 0] = 1
self._summary['BuyAndSell'] = 0
last_float: float = 0.0
for ind in self._summary.index:
if self._summary['Buy'][ind] > self._summary['Sell'][ind]:
self._summary['BuyAndSell'][ind] = 1.0
last_float = 1.0
elif self._summary['Buy'][ind] < self._summary['Sell'][ind]:
self._summary['BuyAndSell'][ind] = -1.0
last_float = -1.0
else: # row['Buy'] == row['Sell']
self._summary['BuyAndSell'][ind] = last_float
| 46.34507
| 124
| 0.621942
| 856
| 6,581
| 4.502336
| 0.146028
| 0.128179
| 0.158796
| 0.062273
| 0.573949
| 0.447846
| 0.375714
| 0.305656
| 0.281266
| 0.281266
| 0
| 0.01481
| 0.251026
| 6,581
| 141
| 125
| 46.673759
| 0.767093
| 0.018538
| 0
| 0.193548
| 0
| 0
| 0.019377
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056452
| false
| 0
| 0.048387
| 0.008065
| 0.169355
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f1305b235214a028b433be662b9539aa5ea50e7
| 7,572
|
py
|
Python
|
dayu_widgets/wizard.py
|
xiaonuoAndy/dayu_widgets
|
0a87e40b5b3b10e9f1f3f98c17a252c107118257
|
[
"MIT"
] | null | null | null |
dayu_widgets/wizard.py
|
xiaonuoAndy/dayu_widgets
|
0a87e40b5b3b10e9f1f3f98c17a252c107118257
|
[
"MIT"
] | null | null | null |
dayu_widgets/wizard.py
|
xiaonuoAndy/dayu_widgets
|
0a87e40b5b3b10e9f1f3f98c17a252c107118257
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2018.5
# Email : muyanru345@163.com
###################################################################
from collections import defaultdict
import utils
from qt import *
from separator import DayuHSeparator
from field_mixin import MFieldMixin
class MWizardPage(QWidget, MFieldMixin):
sig_complete_changed = Signal()
def __init__(self, subtitle=None, parent=None):
super(MWizardPage, self).__init__(parent)
self.field_dict = defaultdict(None)
self.wizard = parent
self.initialized = False
self.subtitle = subtitle
def init_page(self):
pass
def _is_complete(self):
for name, f_obj in self.field_dict.items():
if f_obj.required:
if not self.field(name):
return False
return True
def callback(self, *args, **kwargs):
pass
class MStepLabel(QLabel, MFieldMixin):
def __init__(self, parent=None):
super(MStepLabel, self).__init__(parent)
self.setProperty('status', 'waiting')
self.register_field('my_index', -1)
self.register_field('parent_index', -1)
self.register_field('title', '')
self.register_field('title_text', self.computed_title_text)
self.register_field('current_status', self.computed_status)
self.register_field('enable', self.computed_enable)
self.setObjectName('wizard-step')
self.setAlignment(Qt.AlignCenter)
self.bind('title_text', self, 'text')
self.bind('enable', self, 'enabled')
self.bind('current_status', self, 'status', callback=self.polish_qss)
def polish_qss(self):
self.style().polish(self)
def computed_title_text(self):
return '<span style="font-size:13pt;font-weight:bold;">Step {}</span><br/>{}'.format(
self.field('my_index') + 1,
self.field('title'))
def computed_enable(self):
return self.field('current_status') == 'waiting'
def computed_status(self):
if self.field('parent_index') == self.field('my_index'):
return 'current'
elif self.field('parent_index') < self.field('my_index'):
return 'waiting'
else:
return 'passed'
class MWizard(QDialog, MFieldMixin):
@utils.dayu_css()
def __init__(self, parent=None):
super(MWizard, self).__init__(parent)
self.field_dict = defaultdict(None)
title_label = QLabel()
title_label.setObjectName('wizard-title')
title_label.setAlignment(Qt.AlignCenter)
step_frame = QFrame()
step_frame.setObjectName('wizard-frame')
self.step_lay = QHBoxLayout()
self.step_lay.setContentsMargins(0, 0, 0, 0)
self.step_lay.setSpacing(0)
step_frame.setLayout(self.step_lay)
subtitle_label = QLabel()
subtitle_label.setObjectName('wizard-subtitle')
self.stacked_lay = QStackedLayout()
self.next_button = QPushButton('Next')
self.previous_button = QPushButton('Previous')
self.previous_button.clicked.connect(self.slot_back)
self.next_button.clicked.connect(self.slot_next)
button_lay = QHBoxLayout()
button_lay.addStretch()
button_lay.addWidget(self.previous_button)
button_lay.addWidget(self.next_button)
main_lay = QVBoxLayout()
main_lay.addWidget(title_label)
main_lay.addWidget(step_frame)
main_lay.addSpacing(20)
main_lay.addWidget(subtitle_label)
main_lay.addWidget(DayuHSeparator())
main_lay.addLayout(self.stacked_lay)
main_lay.addWidget(DayuHSeparator())
main_lay.addLayout(button_lay)
self.setLayout(main_lay)
self.register_field('current_index', 1)
self.register_field('current_subtitle', '')
self.register_field('window_title', '')
self.register_field('next_button_text', self.computed_next_button_text)
self.register_field('previous_visible', self.computed_previous_visible)
self.register_field('next_button_enable', self.computed_next_button_enable)
self.bind('window_title', title_label, 'text')
self.bind('current_index', self.stacked_lay, 'currentIndex')
self.bind('window_title', self, 'windowTitle')
self.bind('current_subtitle', subtitle_label, 'text')
self.bind('next_button_text', self.next_button, 'text')
self.bind('previous_visible', self.previous_button, 'visible')
self.bind('next_button_enable', self.next_button, 'enabled')
def computed_next_button_text(self):
return 'Finish' if self.field('current_index') >= (self.stacked_lay.count() - 1) else 'Next'
def computed_previous_visible(self):
return self.field('current_index') != 0
def computed_next_button_enable(self):
current_widget = self.stacked_lay.currentWidget()
if current_widget:
return current_widget._is_complete()
else:
return False
def add_page(self, page):
index = self.stacked_lay.addWidget(page)
page.wizard = self
# page.sig_complete_changed.connect(self._update_button_states)
# for f in page.field_dict.values():
# self.combine_field(f)
label = MStepLabel()
label.set_field('my_index', index)
label.set_field('title', page.subtitle)
self.bind('current_index', label, 'parent_index')
self.step_lay.addWidget(label)
return index
def combine_field(self, field):
if field.name in self.fields():
raise Exception('Field name {} already exists'.format(field.name))
self.field_dict.update({field.name: field})
if field.required and field.signal:
field.signal.connect(field.page.sig_complete_changed)
def set_title(self, text):
self.set_field('window_title', text)
@Slot()
def slot_back(self):
self.go_to(self.field('current_index') - 1)
@Slot()
def slot_next(self):
if self.field('next_button_text') == 'Finish':
self.accept()
self.go_to(self.field('current_index') + 1)
def go_to(self, index):
self.set_field('current_index', index)
page = self.stacked_lay.currentWidget()
self.set_field('current_subtitle', page.subtitle)
if not page.initialized:
try:
page.init_page()
except Exception:
import traceback
error_detail = traceback.format_exc()
self.set_field('current_subtitle', error_detail)
self.next_button.setEnabled(False)
self.previous_button.setEnabled(False)
page.initialized = True
return
page.initialized = True
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = MWizard()
test.register_field('formats', [])
test.register_field('type_group', 'element')
test.register_field('current_step', 'prep')
test.set_title('Publish Element')
page0 = MWizardPage('Select Publish Type')
page1 = MWizardPage('Write Comment')
page2 = MWizardPage('Upload Thumbnail')
page3 = MWizardPage('Quality Check')
test.add_page(page0)
test.add_page(page3)
test.add_page(page1)
test.add_page(page2)
test.go_to(0)
test.show()
sys.exit(app.exec_())
| 34.108108
| 100
| 0.633386
| 876
| 7,572
| 5.221461
| 0.207763
| 0.035418
| 0.0446
| 0.019676
| 0.183865
| 0.081329
| 0.069961
| 0.049847
| 0.018365
| 0
| 0
| 0.006554
| 0.234284
| 7,572
| 221
| 101
| 34.262443
| 0.782339
| 0.029583
| 0
| 0.093567
| 0
| 0.005848
| 0.1267
| 0.006245
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.017544
| 0.040936
| 0.023392
| 0.251462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f14ec3187ef5944e2d523b10e6eabf13148caae
| 897
|
py
|
Python
|
examples/TechChangeModel.py
|
timkittel/PyViability
|
63b628df47ab506e9317a908a63a49a556232137
|
[
"BSD-2-Clause"
] | null | null | null |
examples/TechChangeModel.py
|
timkittel/PyViability
|
63b628df47ab506e9317a908a63a49a556232137
|
[
"BSD-2-Clause"
] | null | null | null |
examples/TechChangeModel.py
|
timkittel/PyViability
|
63b628df47ab506e9317a908a63a49a556232137
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import division, print_function, generators
import numpy as np
pi = np.pi
def techChange_rhs(uB_pB, t, rvar, pBmin, pE, delta, smax, sBmax):
uB, pB = uB_pB
if sBmax == 0.:
p = pE
else:
if smax < sBmax * uB:
p = pE + smax / uB
else:
p = sBmax + pE
duB = rvar * uB * (1 - uB) * (p - pB)
dpB = -(pB - pBmin) * ((pB - pBmin) * uB - delta)
return np.array([duB, dpB])
def techChange_sunny(p):
"""sunny constraint for techChangeModel"""
return p[:, 0] > 0.325
def techChange_rhsPS(uB_pB, t, rvar, pBmin, pE, delta, smax, sBmax):
uB, pB = uB_pB
p = np.zeros_like(pB)
p[:] = sBmax + pE
mask = (smax < sBmax * uB)
p[mask] = (pE + smax / uB[mask])
duB = rvar * uB * (1 - uB) * (p - pB)
dpB = -(pB - pBmin) * ((pB - pBmin) * uB - delta)
return np.array([duB, dpB])
| 20.860465
| 68
| 0.528428
| 133
| 897
| 3.466165
| 0.308271
| 0.052061
| 0.095445
| 0.039046
| 0.416486
| 0.416486
| 0.416486
| 0.416486
| 0.416486
| 0.416486
| 0
| 0.013051
| 0.316611
| 897
| 42
| 69
| 21.357143
| 0.738989
| 0.040134
| 0
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.076923
| 0
| 0.307692
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f1545a93541c971b7ff89f3c71a62f913a542c9
| 2,502
|
py
|
Python
|
tests/test_heif.py
|
Cykooz/cykooz.heif
|
cfd60687406763503a57fe949bdf01fb9997cae8
|
[
"MIT"
] | 5
|
2020-03-05T20:31:23.000Z
|
2021-11-24T00:22:18.000Z
|
tests/test_heif.py
|
Cykooz/cykooz.heif
|
cfd60687406763503a57fe949bdf01fb9997cae8
|
[
"MIT"
] | 3
|
2021-01-14T15:23:04.000Z
|
2021-11-24T00:30:37.000Z
|
tests/test_heif.py
|
Cykooz/cykooz.heif
|
cfd60687406763503a57fe949bdf01fb9997cae8
|
[
"MIT"
] | 1
|
2020-06-12T01:29:10.000Z
|
2020-06-12T01:29:10.000Z
|
# -*- coding: utf-8 -*-
"""
:Authors: cykooz
:Date: 23.06.2019
"""
from pathlib import Path
import piexif
import pytest
from PIL import Image
from cykooz.heif.errors import HeifError
from cykooz.heif.image import RawHeifImage
from cykooz.heif.pil import register_heif_opener
@pytest.fixture(scope='session', autouse=True)
def reg_pil_opener():
register_heif_opener()
@pytest.fixture(name='data_path')
def data_path_fixture() -> Path:
return Path(__file__).parent / 'data'
def test_raw_heif_image_form_path(data_path):
img = RawHeifImage.from_path(data_path / 'test.heic')
assert img.width == 3024
assert img.height == 4032
assert img.mode == 'RGB'
assert len(img.data) == 36578304
assert img.stride == 9072
assert len(img.exif) == 2026
def test_raw_heif_image_form_reader(data_path):
img_path = data_path / 'test.heic'
with img_path.open('rb') as f:
img = RawHeifImage.from_stream(f)
assert img.width == 3024
assert img.height == 4032
assert img.mode == 'RGB'
assert len(img.data) == 36578304
assert img.stride == 9072
assert len(img.exif) == 2026
def test_raw_heif_image_form_reader_errors(data_path):
img_path = data_path / 'test.heic'
with img_path.open('rb') as f:
img = RawHeifImage.from_stream(f)
assert img.width == 3024
assert img.height == 4032
# File is closed
with pytest.raises(HeifError):
_ = img.data
@pytest.mark.parametrize(
['source_type'],
[
('path',),
('stream',),
]
)
@pytest.mark.parametrize(
['file_name'],
[
('test.heic',),
('heic_as.jpg',),
]
)
def test_open_pillow_image(data_path, source_type, file_name):
fp = data_path / file_name
if source_type == 'stream':
fp = open(str(fp), 'rb')
img: Image.Image = Image.open(fp)
assert img.size == (3024, 4032)
assert img.mode == 'RGB'
assert 'exif' in img.info
exif = piexif.load(img.info['exif'])
assert exif['Exif'][42035] == b'Apple'
assert exif['Exif'][42036] == b'iPhone 7 Plus back dual camera 6.6mm f/2.8'
pixel = img.getpixel((100, 100))
assert pixel == (73, 74, 69)
def test_open_png_as_heif(data_path):
fp = data_path / 'png_as.heif'
img: Image.Image = Image.open(fp)
assert img.size == (1280, 720)
assert img.mode == 'RGB'
assert 'exif' not in img.info
pixel = img.getpixel((100, 100))
assert pixel == (132, 185, 255)
| 24.529412
| 79
| 0.63709
| 355
| 2,502
| 4.315493
| 0.287324
| 0.082245
| 0.031332
| 0.041775
| 0.510444
| 0.456919
| 0.405352
| 0.362272
| 0.362272
| 0.313969
| 0
| 0.062984
| 0.225819
| 2,502
| 101
| 80
| 24.772277
| 0.72793
| 0.028777
| 0
| 0.378378
| 0
| 0
| 0.082197
| 0
| 0
| 0
| 0
| 0
| 0.324324
| 1
| 0.094595
| false
| 0
| 0.094595
| 0.013514
| 0.202703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f15770186ad88ae65932854e1cbbe4f54f58e9d
| 3,960
|
py
|
Python
|
ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
|
risdenk/ambari
|
3809bdc6d5fe367c2c3207812ee42856214db8de
|
[
"Apache-2.0"
] | null | null | null |
ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
|
risdenk/ambari
|
3809bdc6d5fe367c2c3207812ee42856214db8de
|
[
"Apache-2.0"
] | 1
|
2018-10-22T17:50:00.000Z
|
2018-10-22T17:50:00.000Z
|
ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py
|
risdenk/ambari
|
3809bdc6d5fe367c2c3207812ee42856214db8de
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import signal
import threading
import logging
import multiprocessing
from ambari_agent.PythonReflectiveExecutor import PythonReflectiveExecutor
from ambari_agent.RemoteDebugUtils import bind_debug_signal_handlers
from ambari_agent.ExitHelper import ExitHelper
logger = logging.getLogger(__name__)
class StatusCommandsExecutor(multiprocessing.Process):
"""
A process which executes status/security status commands.
It dies and respawns itself on timeout of the command. Which is the most graceful way to end the currently running status command.
"""
def __init__(self, config, actionQueue):
multiprocessing.Process.__init__(self)
self.config = config
self.actionQueue = actionQueue
self.status_command_timeout = int(self.config.get('agent', 'status_command_timeout', 5)) # in seconds
self.hasTimeoutedEvent = multiprocessing.Event()
ExitHelper().register(self.kill)
def run(self):
try:
bind_debug_signal_handlers()
logger.info("StatusCommandsExecutor starting")
while True:
command = self.actionQueue.statusCommandQueue.get(True) # blocks until status status command appears
logger.debug("Running status command for {0}".format(command['componentName']))
timeout_timer = threading.Timer( self.status_command_timeout, self.respawn, [command])
timeout_timer.start()
self.process_status_command(command)
timeout_timer.cancel()
logger.debug("Completed status command for {0}".format(command['componentName']))
except:
logger.exception("StatusCommandsExecutor process failed with exception:")
raise
logger.warn("StatusCommandsExecutor process has finished")
def process_status_command(self, command):
component_status_result = self.actionQueue.customServiceOrchestrator.requestComponentStatus(command)
component_security_status_result = self.actionQueue.customServiceOrchestrator.requestComponentSecurityState(command)
result = (command, component_status_result, component_security_status_result)
self.actionQueue.statusCommandResultQueue.put(result)
def respawn(self, command):
try:
if hasattr(PythonReflectiveExecutor, "last_context"):
# Force context to reset to normal. By context we mean sys.path, imports, etc. They are set by specific status command, and are not relevant to ambari-agent.
PythonReflectiveExecutor.last_context.revert()
logger.warn("Command {0} for {1} is running for more than {2} seconds. Terminating it due to timeout.".format(command['commandType'], command['componentName'], self.status_command_timeout))
self.hasTimeoutedEvent.set()
except:
logger.exception("StatusCommandsExecutor.finish thread failed with exception:")
raise
def kill(self):
os.kill(self.pid, signal.SIGKILL)
# prevent queue from ending up with non-freed semaphores, locks during put. Which would result in dead-lock in process executing get.
self.actionQueue.statusCommandResultQueue.close()
self.actionQueue.statusCommandResultQueue.join_thread()
self.actionQueue.statusCommandResultQueue = multiprocessing.Queue()
| 41.684211
| 195
| 0.769192
| 479
| 3,960
| 6.256785
| 0.415449
| 0.047714
| 0.026693
| 0.024024
| 0.102436
| 0.058058
| 0.028695
| 0
| 0
| 0
| 0
| 0.002991
| 0.155808
| 3,960
| 94
| 196
| 42.12766
| 0.893509
| 0.330303
| 0
| 0.115385
| 0
| 0.019231
| 0.161782
| 0.044538
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.153846
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f16819a3d5eb873ef8eef277cfd895042d5e5d1
| 5,630
|
py
|
Python
|
blender/addons/2.8/mifth_tools/mifth_tools_ui.py
|
feynmanliang/mifthtools
|
cf99bc5811215a8747c43d84895ba4fa806812b7
|
[
"BSD-3-Clause"
] | null | null | null |
blender/addons/2.8/mifth_tools/mifth_tools_ui.py
|
feynmanliang/mifthtools
|
cf99bc5811215a8747c43d84895ba4fa806812b7
|
[
"BSD-3-Clause"
] | null | null | null |
blender/addons/2.8/mifth_tools/mifth_tools_ui.py
|
feynmanliang/mifthtools
|
cf99bc5811215a8747c43d84895ba4fa806812b7
|
[
"BSD-3-Clause"
] | null | null | null |
import bpy
from bpy.props import *
from bpy.types import Operator, AddonPreferences
class MFT_PT_PanelPose(bpy.types.Panel):
bl_label = "Bones"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "posemode"
bl_category = 'Mifth'
# bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
mifthTools = context.scene.mifthTools
op = layout.operator("mft.copy_bones_transform", text="CopyBonesTransform")
op.mode = 'Copy'
op = layout.operator("mft.copy_bones_transform", text="PasteBonesTransform")
op.mode = 'Paste'
class MFT_PT_PanelAnimation(bpy.types.Panel):
bl_label = "Animations"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "objectmode"
bl_category = 'Mifth'
# bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
mifthTools = context.scene.mifthTools
layout.operator("mft.curveanimator", text="Curve Animator")
layout.prop(mifthTools, "doUseSceneFrames", text='UseSceneFrames')
row = layout.row()
row.prop(mifthTools, "curveAniStartFrame", text='Start')
row.prop(mifthTools, "curveAniEndFrame", text='End')
row = layout.row()
row.prop(mifthTools, "curveAniStepFrame", text='Steps')
row.prop(mifthTools, "curveAniInterpolation", text='Interpolation')
layout.separator()
layout.separator()
layout.operator("mft.morfcreator", text="Morfer")
layout.prop(mifthTools, "morfCreatorNames")
layout.prop(mifthTools, "morfUseWorldMatrix", text='useWorldMatrix')
layout.prop(mifthTools, "morfApplyModifiers", text='applyModifiers')
class MFT_PT_PanelPlaykot(bpy.types.Panel):
bl_label = "PlaykotTools"
bl_space_type = 'NODE_EDITOR'
bl_region_type = 'UI'
bl_context = "objectmode"
bl_category = 'Mifth'
# bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
mifthTools = context.scene.mifthTools
layout.operator("mft.render_scene_2x", text="ScaleCrop")
layout.operator("mft.cropnoderegion", text="CropNodeRegion")
layout.operator("mft.crop_to_viewport", text="CropToViewport")
layout.separator()
layout.operator("mft.outputcreator", text="Create Output")
layout.prop(mifthTools, "outputFolder")
row = layout.row()
row.prop(mifthTools, "outputSubFolder")
row.prop(mifthTools, "doOutputSubFolder", text='')
layout.prop(mifthTools, "outputSequence")
layout.prop(mifthTools, "outputSequenceSize")
class MFT_PT_PanelCloning(bpy.types.Panel):
bl_label = "Cloning"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "objectmode"
bl_category = 'Mifth'
# bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
mifthTools = bpy.context.scene.mifthTools
mifthCloneTools = bpy.context.scene.mifthCloneTools
layout.label(text="Draw Clones:")
layout.operator("mft.draw_clones", text="DrawClones")
layout.operator("mft.pick_obj_to_clone_draw", text="PickObjects")
layout.prop(mifthCloneTools, "drawClonesDirectionRotate", text='DirectionRotate')
layout.prop(mifthCloneTools, "drawClonesRadialRotate", text='RadialRotate')
layout.prop(mifthCloneTools, "drawClonesNormalRotate", text='NormalRotate')
#layout.prop(mifthCloneTools, "drawClonesOptimize", text='Optimize')
layout.prop(mifthCloneTools, "drawStrokeLength", text='Stroke')
layout.prop(mifthCloneTools, "drawRandomStrokeScatter", text='Scatter')
layout.prop(mifthCloneTools, "randNormalRotateClone", text='RandNormal')
layout.prop(mifthCloneTools, "randDirectionRotateClone", text='RandDirection')
layout.prop(mifthCloneTools, "randScaleClone", text='RandScale')
layout.prop(mifthCloneTools, "drawPressure", text='DrawPressure')
row = layout.row()
row.prop(mifthCloneTools, "drawPressureRelativeStroke", text='S')
row.prop(mifthCloneTools, "drawPressureScale", text='S')
row.prop(mifthCloneTools, "drawPressureScatter", text='S')
layout.prop(mifthCloneTools, "drawClonesAxis", text='Axis')
layout.separator()
layout.label(text="Clone Selected:")
layout.operator("mft.clonetoselected", text="CloneToSelected")
layout.separator()
layout.label(text="Radial Clone:")
layout.operator("mft.radialclone", text="Radial Clone")
# layout.prop(mifthTools, "radialClonesNumber", text='')
row = layout.row()
row.prop(mifthCloneTools, "radialClonesAxis", text='')
row.prop(mifthCloneTools, "radialClonesAxisType", text='')
layout.separator()
layout.label(text="Position Group:")
layout.operator("mft.group_instance_to_cursor", text="Position Group")
layout.prop(mifthCloneTools, "getGroupsLst", text='')
layout.separator()
layout.operator("mft.group_to_mesh", text="Groups To Mesh")
class MFT_PT_PanelVertexPaint(bpy.types.Panel):
bl_label = "Vertex Paint"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_context = "vertexpaint"
bl_category = 'Mifth'
# bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
mifthTools = bpy.context.scene.mifthTools
layout.operator("mftv.set_colors_to_selected", text="Set Colors")
layout.operator("mftv.invert_colors", text="Invert Colors")
| 37.533333
| 89
| 0.675311
| 574
| 5,630
| 6.480836
| 0.250871
| 0.053763
| 0.063978
| 0.020161
| 0.365323
| 0.270161
| 0.228495
| 0.228495
| 0.206452
| 0.206452
| 0
| 0.001107
| 0.197513
| 5,630
| 149
| 90
| 37.785235
| 0.822266
| 0.049911
| 0
| 0.392857
| 0
| 0
| 0.27111
| 0.058603
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044643
| false
| 0
| 0.026786
| 0
| 0.339286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|