text string | size int64 | token_count int64 |
|---|---|---|
import redis
# 测试连接redis
"""
if __name__ == '__main__':
conn = redis.Redis(host='127.0.0.1', port=6379)
if conn:
print('ok')
else:
print('error')
"""
# 测试session是否保存在redis中
"""
if __name__ == '__main__':
conn = redis.Redis(host='127.0.0.1', port=6379, password='foobared')
print(conn.keys())
"""
"""
[
b'session:eyJjc3JmX3Rva2VuIjoiNWNhMTdiYzJlOGFkZjc5NDZjODA0ODkzNzk4NTY2NzIxYjBkNWVmNiIsInVzZXIiOiJlcmljcyIsInVzZXJfaWQiOjF9.Xw2bFQ.tYod_F02MDofFCLR3aUs5BBEKQs',
b'session:eyJjc3JmX3Rva2VuIjoiNWNhMTdiYzJlOGFkZjc5NDZjODA0ODkzNzk4NTY2NzIxYjBkNWVmNiIsInVzZXJfaWQiOjF9.Xwt53A.ucQMG-hSprAb8YMhWep-lY2RqCE'
]
"""
| 652 | 407 |
"""Helper functions useful for all wrapper classes."""
import re
from typing import Callable, Optional, Union
from urllib.parse import quote_plus
from requests import exceptions, Response
from .output_format import OUTPUT_FORMAT
def get(nest: Union[dict, list, str], *args, default=None):
"""Get a value in a nested mapping/iterable.
Args:
nest: The object that contains nested mappings and lists.
*args: The keys/indices.
default: The default value for when a key does not exist or an index is out of range.
The default value is `None`.
Returns:
The value at the end of the 'args-chain' in `nest` if all keys/indices
can be accessed.
`default` otherwise and when no args/nest is given.
Examples:
>>> utils.get({"foo": {"bar": [1,2,3]}}, "foo", "bar", 2)
3
>>> utils.get("foobar", 3)
'b'
>>> utils.get({"foo": [1,2,3]}, "bar", default=-1)
-1
>>> utils.get([1,2,3], 4, default=-1)
-1
>>> utils.get({"foo": {"bar": [1,2,3]}}, default=-1)
-1
"""
if not nest or not args:
return default
try:
for arg in args:
nest = nest[arg]
except (TypeError, IndexError, KeyError):
return default
else:
return nest
def build_group(items: [str], match: str, match_pad: str = " ", negater: str = "NOT ") -> str:
"""Build and return a search group by inserting <match> between each of the items.
Args:
items: List of items that should be connected.
match: The connection between the items. Has to be one of ["AND", "OR", "NOT"].
When using "NOT", the items are connected with "OR" and then negated.
match_pad: The padding characters around match.
negater: The characters that are used to negate a group.
Returns:
The created search group.
Raises:
ValueError: When given match is unknown.
Examples:
>>> print(build_group(["foo", "bar", "baz"], "AND", match_pad="_"))
(foo_AND_bar_AND_baz)
>>> print(build_group(["foo", "bar", "baz"], "NOT", negater="-"))
-(foo OR bar OR baz)
"""
if match not in ["AND", "OR", "NOT"]:
raise ValueError("Unknown match.")
group = "("
# connect with OR and negate group
if match == "NOT":
group = negater + group
match = "OR"
# Insert and combine
group += (match_pad + match + match_pad).join(items)
group += ")"
return group
def clean_output(out: dict, format_dict: dict = OUTPUT_FORMAT):
"""Delete undefined fields in the return JSON.
Args:
out: The returned JSON.
format_dict: Override the output format
"""
# NOTE: list() has to be used to avoid a:
# "RuntimeError: dictionary changed size during iteration"
for key in list(out.keys()):
if key not in format_dict.keys():
del out[key]
def invalid_output(
query: dict, db_query: Union[str, dict], api_key: str, error: str, start_record: int,
page_length: int) -> dict:
"""Create and return the output for a failed request.
Args:
query: The query in format as defined in wrapper/input_format.py.
db_query: The query that was sent to the API in its language.
api_key: The key used for the request.
error: The error message returned.
start_record: The index of the first record requested.
page_length: The page length requested.
Returns:
A dict containing the passed values and "-1" as index where necessary
to be compliant with wrapper/output_format.
"""
out = dict()
out["query"] = query
out["dbQuery"] = db_query
out["apiKey"] = api_key
out["error"] = error
out["result"] = {
"total": "-1",
"start": str(start_record),
"pageLength": str(page_length),
"recordsDisplayed": "0",
}
out["records"] = list()
return out
def request_error_handling(req_func: Callable[..., Response], req_kwargs: dict, max_retries: int,
invalid: dict) -> Optional[Response]:
"""Make an HTTP request and handle error that possibly occur.
Args:
req_func: The function that makes the HTTP request.
For example `requests.put`.
req_kwargs: The arguments that will be unpacked and passed to `req_func`.
invalid: A dictionary conforming to wrapper/output_format.py. It will be modified if an
error occurs ("error" field will be set).
Returns:
If no errors occur, the return of `req_func` will be returned. Otherwise `None` will be
returned and `invalid` modified.
"""
for i in range(max_retries + 1):
try:
response = req_func(**req_kwargs)
# Raise an HTTP error if there were any
response.raise_for_status()
except exceptions.HTTPError as err:
invalid["error"] = "HTTP error: " + str(err)
return None
except exceptions.ConnectionError as err:
invalid["error"] = "Connection error: Failed to establish a connection: " \
"Name or service not known."
return None
except exceptions.Timeout as err:
if i < max_retries:
# Try again
continue
# Too many failed attempts
invalid["error"] = "Connection error: Failed to establish a connection: Timeout."
return None
except exceptions.RequestException as err:
invalid["error"] = "Request error: " + str(err)
return None
# request successful
break
return response
def translate_get_query(query: dict, match_pad: str, negater: str, connector: str) -> str:
"""Translate a GET query.
Translate a query in format `wrapper/input_format.py` into a string that can
be used in the query part of the url of GET requests.
Args:
query: The query complying to `wrapper/input_format.py`. This is modified.
match_pad: The padding around the match values.
negater: The negater used for negating a search group.
conn: The connector between the different parameters.
Returns:
The translated query.
"""
# Deep copy is necessary here since we url encode the search terms
groups = query.get("search_groups", [])
for i in range(len(groups)):
if groups[i].get("match") == "NOT" and query["match"] == "OR":
raise ValueError("Only AND NOT supported.")
for j in range(len(groups[i].get("search_terms", []))):
term = groups[i].get("search_terms")[j]
# Enclose search term in quotes if it contains a space and is not
# quoted already to prevent splitting.
if " " in term:
if term[0] != '"':
term = '"' + term
if term[-1] != '"':
term += '"'
# Urlencode search term
groups[i].get("search_terms")[j] = quote_plus(term)
groups[i] = build_group(
groups[i].get("search_terms", []), groups[i].get("match"), match_pad, negater
)
search_terms = build_group(groups, query.get("match"), match_pad, negater)
query_str = ""
for field in query.get("fields") or []:
query_str += field + search_terms + connector
return query_str[:-len(connector)]
def build_get_query(params: dict, delim: str, connector: str) -> str:
"""Build a manual GET query from set parameters.
Build a string that can be used in the query part of the url of a GET
request from a dictionary containing the search parameters.
Args:
params: Dictionary of key, value pairs.
delim: Delimiter between key and value.
connector: Connector between different pairs.
Returns:
Built query.
"""
url = ""
for key, value in params.items():
# Enclose value in quotes if it contains a space and is not quoted
# already to prevent splitting.
if " " in value:
if value[0] != '"':
value = '"' + value
if value[-1] != '"':
value += '"'
# Url encode and add key value pair
url += key + delim + quote_plus(value) + connector
# Remove trailing connector and return
return url[:-len(connector)]
# List of stopwords bases on (added did)
# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
STOP_WORDS = [
'a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against',
'all', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always',
'am', 'among', 'amongst', 'amoungst', 'amount', 'an', 'and', 'another',
'any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are',
'around', 'as', 'at', 'back', 'be', 'became', 'because', 'become',
'becomes', 'becoming', 'been', 'before', 'beforehand', 'behind', 'being',
'below', 'beside', 'besides', 'between', 'beyond', 'bill', 'both', 'bottom',
'but', 'by', 'call', 'can', 'cannot', 'cant', 'co', 'computer', 'con',
'could', 'couldnt', 'cry', 'de', 'describe', 'detail', 'did', 'do', 'done',
'down', 'due', 'during', 'each', 'eg', 'eight', 'either', 'eleven', 'else',
'elsewhere', 'empty', 'enough', 'etc', 'even', 'ever', 'every', 'everyone',
'everything', 'everywhere', 'except', 'few', 'fifteen', 'fify', 'fill',
'find', 'fire', 'first', 'five', 'for', 'former', 'formerly', 'forty',
'found', 'four', 'from', 'front', 'full', 'further', 'get', 'give', 'go',
'had', 'has', 'hasnt', 'have', 'he', 'hence', 'her', 'here', 'hereafter',
'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', 'himself', 'his',
'how', 'however', 'hundred', 'i', 'ie', 'if', 'in', 'inc', 'indeed',
'interest', 'into', 'is', 'it', 'its', 'itself', 'keep', 'last', 'latter',
'latterly', 'least', 'less', 'ltd', 'made', 'many', 'may', 'me',
'meanwhile', 'might', 'mill', 'mine', 'more', 'moreover', 'most', 'mostly',
'move', 'much', 'must', 'my', 'myself', 'name', 'namely', 'neither',
'never', 'nevertheless', 'next', 'nine', 'no', 'nobody', 'none', 'noone',
'nor', 'not', 'nothing', 'now', 'nowhere', 'of', 'off', 'often', 'on',
'once', 'one', 'only', 'onto', 'or', 'other', 'others', 'otherwise', 'our',
'ours', 'ourselves', 'out', 'over', 'own', 'part', 'per', 'perhaps',
'please', 'put', 'rather', 're', 'same', 'see', 'seem', 'seemed', 'seeming',
'seems', 'serious', 'several', 'she', 'should', 'show', 'side', 'since',
'sincere', 'six', 'sixty', 'so', 'some', 'somehow', 'someone', 'something',
'sometime', 'sometimes', 'somewhere', 'still', 'such', 'system', 'take',
'ten', 'than', 'that', 'the', 'their', 'them', 'themselves', 'then',
'thence', 'there', 'thereafter', 'thereby', 'therefore', 'therein',
'thereupon', 'these', 'they', 'thick', 'thin', 'third', 'this', 'those',
'though', 'three', 'through', 'throughout', 'thru', 'thus', 'to',
'together', 'too', 'top', 'toward', 'towards', 'twelve', 'twenty', 'two',
'un', 'under', 'until', 'up', 'upon', 'us', 'very', 'via', 'was', 'we',
'well', 'were', 'what', 'whatever', 'when', 'whence', 'whenever', 'where',
'whereafter', 'whereas', 'whereby', 'wherein', 'whereupon', 'wherever',
'whether', 'which', 'while', 'whither', 'who', 'whoever', 'whole', 'whom',
'whose', 'why', 'will', 'with', 'within', 'without', 'would', 'yet', 'you',
'your', 'yours', 'yourself', 'yourselves',
]
def into_keywords_format(keywords: dict) -> list:
"""Convert a dictionary of keyword, counter pairs into a list of dicts.
Args:
keywords: A dictionary that contains a counter for every keyword.
Returns:
The keywords in the format specified in wrapper/output_format.py.
"""
keywords_list = []
for word, count in keywords.items():
keywords_list.append({
"text": word,
"value": count,
})
return keywords_list
def from_keywords_format(keywords: list) -> dict:
"""Convert a list of keywords in a specific format into a dictionary.
Args:
keywords: A list in the format specified in wrapper/output_format.py
Returns:
The keywords as a dictionary with the keyword as key and its counter as
value.
"""
keywords_dict = {}
for keyword in keywords:
keywords_dict[keyword.get("text", "Unknown")] = keyword.get("value", 0)
return keywords_dict
def titles_to_keywords(titles: str) -> list:
"""Count words and format that data.
Args:
titles: A string containing all titles concatinated.
Returns:
A list in the format specified in ["facets"]["keywords"] in
wrapper.output_format.py
"""
# Delete everything except alphanumeric characters, digits and spaces,
# convert to lowercase and then split on spaces
pat = re.compile("[^a-zA-Z0-9 ]+")
words = pat.sub("", titles).lower().split(" ")
freqs = {}
for word in words:
# Kick out stop words
if word in STOP_WORDS:
continue
# Add to counter/init if new word
elif word not in freqs:
freqs[word] = 1
else:
freqs[word] += 1
# Convert into right format
return into_keywords_format(freqs)
def combine_facets(facets: [dict]):
"""Combine facets.
Combine the facet counters of different wrappers.
Args:
facets: List of the facets dictionaries.
NOTE: The first element will be modified!
Returns:
The combined facets.
"""
total = {
"countries": {},
"keywords": {},
}
# Save one iteration.
if len(facets) == 0:
return total
total["countries"] = get(facets, 0, "countries", default={})
total["keywords"] = from_keywords_format(get(facets, 0, "keywords", default=[]))
# Combine the rest.
for i in range(1, len(facets)):
if not isinstance(facets[i], dict):
continue
for category in facets[i]:
if category not in total:
continue
for facet in get(facets, i, category, default=[]):
if category == "countries":
key = facet
value = get(facets, i, category, facet, default=1)
elif category == "keywords":
# Bring in dict format
key, value = list(from_keywords_format([facet]).items())[0]
else:
continue
if key in total[category]:
total[category][key] += int(value)
else:
total[category][key] = int(value)
total["keywords"] = into_keywords_format(total["keywords"])
return total | 14,928 | 4,559 |
import numpy as np
parameters_svr_1 = [
{'kernel': ['rbf'], 'gamma': [0.1, 0.5, 0.9, 1],
'C': np.logspace(-4, 4, 5)},
]
parameters_svr_2 = [
{'kernel': ['rbf'], 'gamma': [1e-4, 0.1, 0.3,
0.5, 0.7, 0.9, 1], 'C': np.logspace(-4, 4, 10)},
{'kernel': ['linear'], 'gamma': [1e-4, 0.1, 0.3,
0.5, 0.7, 0.9, 1], 'C': np.logspace(-4, 4, 10)},
{'kernel': ['poly'], 'gamma': [1e-4, 0.1, 0.3,
0.5, 0.7, 0.9, 1], 'C': np.logspace(-4, 4, 10)},
]
parameters_svr_3 = [
{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4, 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 0.9], 'C': np.logspace(-4, 4, 20)},
{'kernel': ['linear'], 'gamma': [1e-3, 1e-4, 0.1, 0.2, 0.3,
0.4, 0.5, 0.6, 0.7, 0.8, 0.9], 'C': np.logspace(-4, 4, 20)},
{'kernel': ['poly'], 'gamma': [1e-3, 1e-4, 0.1, 0.2, 0.3,
0.4, 0.5, 0.6, 0.7, 0.8, 0.9], 'C': np.logspace(-4, 4, 20)},
{'kernel': ['sigmoid'], 'gamma': [1e-3, 1e-4, 0.1, 0.2, 0.3,
0.4, 0.5, 0.6, 0.7, 0.8, 0.9], 'C': np.logspace(-4, 4, 20)},
]
parameters_knr_1 = [{
'n_neighbors': list(range(1, 11)),
'weights': ['uniform', 'distance'],
'algorithm': ['auto', 'kd_tree', 'brute'],
}]
parameters_knr_2 = [{
'n_neighbors': list(range(1, 21)),
'weights': ['uniform', 'distance'],
'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute'],
}]
parameters_knr_3 = [{
'n_neighbors': list(range(1, 31)),
'weights': ['uniform', 'distance'],
'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute'],
}]
parameters_dt_1 = [{
'criterion': ['mse', 'mae'],
"min_samples_leaf": [20, 40],
"max_leaf_nodes": [5, 20],
'max_depth': [4, 6, 8, 10, 12, 20, 40, 70],
}]
parameters_dt_2 = [{
'criterion': ['mse', 'friedman_mse', 'mae'],
"min_samples_leaf": [20, 40, 100],
"max_leaf_nodes": [5, 20, 100],
'max_features': [2, 3],
'max_depth': [4, 6, 7, 9, 10, 12, 20, 40, 50, 90, 120],
}]
parameters_dt_3 = [{
'criterion': ['mse', 'friedman_mse', 'mae', 'poisson'],
"min_samples_leaf": [20, 40, 100, 150],
"max_leaf_nodes": [5, 20, 100, 150],
'max_features': [2, 3],
'max_depth': [4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 20, 30, 40, 50, 70, 90, 120, 150],
}]
parameters_rfr_1 = [{
'n_estimators': [100, 200, 300, 400, 500, 750, 1000],
'max_depth': [4, 6, 8, 10, 12, 20, 40, 70],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
}]
parameters_rfr_2 = [{
'criterion': ['mse', 'mae'],
'n_estimators': [50, 100, 150, 200, 250, 300, 400, 500, 700, 900, 1000],
'bootstrap': [True, False],
'max_depth': [4, 6, 7, 9, 10, 12, 20, 40, 50, 90, 120],
'max_features': [2, 3],
'min_samples_leaf': [4, 5],
'min_samples_split': [10, 12],
}]
parameters_rfr_3 = [{
'criterion': ['mse', 'mae'],
'n_estimators': [50, 100, 150, 200, 250, 300, 400, 500, 600, 700, 800, 900, 1000],
'bootstrap': [True, False],
'max_depth': [4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 20, 30, 40, 50, 70, 90, 120, 150],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
}]
parameters_gbr_1 = [{
'learning_rate': [0.02,0.04],
'subsample' : [ 0.5, 0.2, 0.1],
'n_estimators': [100, 200, 300, 400, 500, 750, 1000],
'max_depth' : [4,6],
'loss': ['ls', 'lad'],
'criterion' : ['mse', 'mae'],
'min_samples_split': [8, 10],
'min_samples_leaf': [3, 4],
}]
parameters_gbr_2 = [{
'learning_rate': [0.02,0.03,0.04],
'subsample' : [0.5, 0.2, 0.1],
'n_estimators': [50, 100, 150, 200, 250, 300, 400, 500, 700, 900, 1000],
'max_depth' : [4,6,8],
'loss': ['ls', 'lad', 'huber'],
'criterion' :['friedman_mse', 'mse'],
'min_samples_split': [8, 10, 12],
'min_samples_leaf': [3, 4],
}]
parameters_gbr_3 = [{
'learning_rate': [0.01,0.02,0.03,0.04],
'subsample' : [0.9, 0.5, 0.2, 0.1],
'n_estimators': [50, 100, 150, 200, 250, 300, 400, 500, 600, 700, 800, 900, 1000],
'max_depth' : [4,6,8,10],
'loss': ['ls', 'lad', 'huber', 'quantile'],
'criterion' : ['friedman_mse', 'mse', 'mae'],
'min_samples_split': [8, 10, 12],
'min_samples_leaf': [3, 4, 5],
}]
parameters_ann_1 = [{'batch_size': [20, 50, 32],
'nb_epoch': [200, 100, 300],
'input_units': [5, 6, 10, ],
}]
parameters_ann_2 = [{'batch_size': [20, 50, 25, 32],
'nb_epoch': [200, 100, 300, 350],
'input_units': [5, 6, 10, 11, 12, ],
}]
parameters_ann_3 = [{'batch_size': [100, 20, 50, 25, 32],
'nb_epoch': [200, 100, 300, 400],
'input_units': [5, 6, 10, 11, 12, 15],
}]
parameters_lin = [{
"fit_intercept": [True, False],
"positive":[True, False]
}]
parameters_sgd_1 = [{
'penalty': ['l1', 'l2'],
'loss': ['squared_loss', 'huber'],
'alpha': [0.1, 0.5, 0.9, 1],
'learning_rate': ['constant', 'optimal'],
}]
parameters_sgd_2 = [{
'penalty': ['l1', 'l2', 'elasticnet', ],
'loss': ['squared_loss', 'huber', 'epsilon_insensitive'],
'alpha': [1e-4, 0.1, 0.3,
0.5, 0.7, 0.9, 1],
"fit_intercept": [True, False],
'learning_rate': ['constant', 'optimal', 'invscaling'],
'eta0': [10, 100],
}]
parameters_sgd_3 = [{
'penalty': ['l1', 'l2', 'elasticnet' ],
'loss': ['squared_loss', 'huber', 'epsilon_insensitive','squared_epsilon_insensitive'],
"fit_intercept": [True, False],
'alpha': [1e-3, 1e-4, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
'learning_rate': ['constant', 'optimal', 'invscaling', 'adaptive'],
'eta0': [1, 10, 100],
}]
parameters_ker_1 = [{
'alpha': [0.1, 0.5, 0.9, 1],
'gamma': [0.1, 0.5, 0.9, 1],
}]
parameters_ker_2 = [{
'alpha': [1e-4, 0.1, 0.3,
0.5, 0.7, 0.9, 1],
'gamma': [1e-4, 0.1, 0.3,
0.5, 0.7, 0.9, 1],
}]
parameters_ker_3 = [{
'alpha': [1e-3, 1e-4, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
'gamma': [1e-3, 1e-4, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
}]
parameters_elas = [{
'alpha': [0.1, 0.5, 0.9, 1],
'l1_ratio': [0, 0.25, 0.5, 0.75, 1],
}]
parameters_br = [{
'alpha_1': [1e-3, 1e-4, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
'alpha_2': [1e-3, 1e-4, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
'lambda_1': [1e-3, 1e-4, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
'lambda_1': [1e-3, 1e-4, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
}]
parameters_lgbm_1 = [{
'n_estimators': [100, 200, 300, 400, 500, 750, 1000],
'min_child_weight': [1, 5, 10],
'colsample_bytree': [0.8, 1.0],
'reg_alpha': [0, 1, 2, 5, 7, 10],
'reg_lambda': [0, 1, 2, 5, 7, 10],
}]
parameters_lgbm_2 = [{
'n_estimators': [50, 100, 150, 200, 250, 300, 400, 500, 700, 900, 1000],
'min_child_weight': [1, 5, 10],
'subsample': [ 0.8, 1.0],
'colsample_bytree': [ 0.8, 1.0],
'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50],
'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50],
}]
parameters_lgbm_3 = [{
'n_estimators': [50, 100, 150, 200, 250, 300, 400, 500, 600, 700, 800, 900, 1000],
'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],
'subsample': [0.6, 0.8, 1.0],
'colsample_bytree': [0.6, 0.8, 1.0],
'reg_alpha': [0, 1e-1, 1, 2, 5, 7, 10, 50, 100],
'reg_lambda': [0, 1e-1, 1, 5, 10, 20, 50, 100],
}]
parameters_xgb_1 = [{
'min_child_weight': [1, 5, 10],
'n_estimators': [100, 200, 300, 400, 500, 750, 1000],
'gamma': [0.1, 0.5, 0.9, 1],
'max_depth': [4, 6, 8, 10, 12, 20, 40, 70],
'learning_rate': [0.3, 0.1],
}]
parameters_xgb_2 = [{
'min_child_weight': [1e-3, 1e-2, 1e-1, 1, 1e1, 1e2],
'n_estimators': [50, 100, 150, 200, 250, 300, 400, 500, 700, 900, 1000],
'gamma': [1e-4, 0.1, 0.3,
0.5, 0.7, 0.9, 1],
'subsample': [0.6, 0.8],
'colsample_bytree': [0.6, 0.8, 1.0],
'max_depth': [4, 6, 7, 9, 10, 12, 20, 40, 50, 90, 120],
'learning_rate': [0.3, 0.1, 0.01],
}]
parameters_xgb_3 = [{
'min_child_weight': [1e-3, 1e-2, 1e-1, 1, 1e1, 1e2],
'n_estimators': [50, 100, 150, 200, 250, 300, 400, 500, 600, 700, 800, 900, 1000],
'gamma': [1e-3, 1e-4, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
'subsample': [ 0.8, 1.0],
'colsample_bytree': [ 0.8, 1.0],
'max_depth': [4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 20, 30, 40, 50, 70, 90, 120, 150],
'learning_rate': [0.3, 0.1, 0.03],
}]
parameters_cat = [{
'depth': [6, 8, 10],
'learning_rate': [0.01, 0.05, 0.1],
'iterations': [30, 50, 100],
'depth': [2, 4, 6, 8],
'l2_leaf_reg': [0.2, 0.5, 1, 3]
}]
| 8,982 | 5,262 |
# Refaça o DESAFIO 35 dos triângulos, acrescentando o recurso de mostrar que tipo de triângulo será formado:
#
# – EQUILÁTERO: todos os lados iguais
#
# – ISÓSCELES: dois lados iguais, um diferente
#
# – ESCALENO: todos os lados diferentes
side1 = int(input('Primeiro lado: '))
side2 = int(input('Segundo lado: '))
side3 = int(input('Terceiro lado: '))
if side1 < side2 + side3 and side2 < side1 + side3 and side3 < side1 + side2:
type_ = ''
if side1 == side2 == side3:
type_ = 'EQUILÁTERO'
elif side1 != side2 and side1 != side3:
type_ = 'ESCALENO'
else:
type_ = 'ISÓSCELES'
print('Tipo do triângulo: \033[32m{}\033[m'.format(type_))
else:
print('\033[31mEstes lados não podem formar um triângulo.\033[m')
| 757 | 315 |
"""Main WSGI server runner for sd-proxy
"""
import os
import logging
from sys import argv, path, stderr, exit
from gevent.wsgi import WSGIServer
class VersionedWSGIServer(WSGIServer):
def __init__(self, server_version, *args, **kwargs):
self.base_env['SERVER_SOFTWARE'] = server_version
super(VersionedWSGIServer, self).__init__(*args, **kwargs)
def run(app, port=8889, listener=None):
if listener is None:
listener = ('', port)
version = 'sd-proxy/%s' % (app._version,)
http_server = VersionedWSGIServer(version, listener, app)
http_server.serve_forever()
def main():
if len(argv) < 1:
print >> stderr, 'Please provide a path to your config file.'
return 1
os.environ['SD_PROXY_CONFIG'] = argv[1]
from serverdensity.proxy import settings, setup_logging
from serverdensity.proxy.app import app
setup_logging(app)
app.debug = settings.debug
app.logger.info('Starting sd-proxy on port %s..' % (settings.port,))
run(app, settings.port)
return 0
if __name__ == '__main__':
path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
exit(main())
| 1,179 | 403 |
import citationsCommon
def countByIdAndYear(rdd):
docsplit = rdd.flatMap(lambda row:
[('{}.{}'.format(ref, row[2]), 1) for ref in row[1]])
return docsplit.reduceByKey(lambda c, d: c + d)
def joinIdYearAge(idYearCount, ddpairs):
# idYear: id, year cited
idYear = idYearCount.map(lambda row: (row[0][:-5], int(row[0][-4:])))
# ddpairs is expected to be: id, year published
# idYearAge: id, year cited - year published
return idYear.join(ddpairs).filter(lambda row: (row[1][0] - row[1][1] >= -2)).map(
lambda row: ('{}.{}'.format(row[0], row[1][0]), (row[1][0] - row[1][1])))
def citationCountArrays(idYearAge, idYearCount):
p2Afunc = citationsCommon.pairsToArrayHelper.pairsToArray
return idYearAge.join(idYearCount).map(
lambda row: (row[0][:-5], [(row[1][0], row[1][1])])).reduceByKey(
lambda c, d: c + d).mapValues(lambda x: p2Afunc(x))
# df is the dataframe read from json before we've filtered out rows where
# references is NULL
# partitionCount says how many partitions to coalesce the intermediate
# data to.
def citationCountsE2E(df, partitionCount=34):
dd = df.select("id", "references", "year").filter("references is not NULL").rdd
idYearCount = countByIdAndYear(dd)
# For publication dates, include publications with no references.
idYearAge = joinIdYearAge(idYearCount, df.select("id", "year").rdd)
citCountArrays = citationCountArrays(idYearAge.coalesce(partitionCount),
idYearCount)
return citCountArrays
| 1,553 | 529 |
# Copyright (c) 2009, Tomohiro Kusumi
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import os
import re
import sys
from .. import extension
from .. import filebytes
from .. import kernel
from .. import libc
from .. import path
from .. import screen
from .. import setting
from .. import util
def I(x):
return ' ' * 4 * x
class _node (object):
def __init__(self, type):
self.type = type
def get_size(self):
return 0
def get_repr(self, buf, name, indent):
return []
class _builtin (_node):
def __init__(self):
super(_builtin, self).__init__(util.get_class_name(self))
def get_repr(self, buf, name, indent):
s = "{0}{1} {2};".format(I(indent), self.type, name)
if len(buf) == self.get_size():
v = self.__get_value_expr(buf)
a = ''.join(["\\x{0:02X}".format(x) for x in
filebytes.iter_ords(buf)])
b = ''.join([screen.chr_repr[x] for x in filebytes.iter_ords(buf)])
s += " {0} {1} [{2}]".format(v, a, b)
return [s]
def __get_value_expr(self, buf):
n = self.to_int(buf)
m = _builtin_xtype_regex.match(self.type)
if m:
siz = builtin_int(m.group(1))
siz //= 4 # string size in hex
fmt = "0x{0:0" + str(siz) + "X}"
return fmt.format(n)
else:
return str(n)
_toplevel_regex = re.compile(r"\s*struct\s+(\S+)\s*{([\s\S]+?)}\s*;")
_struct_member_regex = re.compile(r"^(\S+)\[([0-9]+)\]$")
_builtin_type_regex = re.compile(r"^(u|s|x)(8|16|32|64)(le|be)$")
_builtin_xtype_regex = re.compile(r"^x(8|16|32|64)") # only to detect x
# XXX
# This is necessary as this module uses int()
# while __create_builtin_class() overwrites int.
builtin_int = util.get_builtin("int")
_classes = []
def __create_builtin_class(name, size):
def get_size(self):
return size
sign = (name[0] == 's')
m = _builtin_type_regex.match(name)
if not m:
def to_int(self, b):
return util.host_to_int(b, sign)
elif m.group(3) == "le":
def to_int(self, b):
return util.le_to_int(b, sign)
elif m.group(3) == "be":
def to_int(self, b):
return util.be_to_int(b, sign)
else:
assert False, m.group(0)
cls = type(name, (_builtin,), dict(get_size=get_size, to_int=to_int,),)
assert cls not in _classes
_classes.append(cls)
setattr(sys.modules[__name__], name, cls)
def __init_class():
for x in util.get_xrange(4):
size = 2 ** x
for sign in "usx":
for suffix in ("", "le", "be"):
name = "{0}{1}{2}".format(sign, size * 8, suffix)
__create_builtin_class(name, size)
for name, func_name, fn in libc.iter_defined_type():
__create_builtin_class(name, fn())
# A node for this class can't be added on import
class _string (_node):
def __init__(self, size):
self.__size = size
super(_string, self).__init__(_string_type(self.__size))
def get_size(self):
return self.__size
def get_repr(self, buf, name, indent):
i = buf.find(filebytes.ZERO)
b = filebytes.str(buf[:i])
s = "{0}string {1}; \"{2}\"".format(I(indent), name, b)
return [s]
def _string_type(n):
return "string{0}".format(n)
class _struct (_node):
def __init__(self, type, defs):
super(_struct, self).__init__(type)
self.__member = []
for type, name in self.__iter_member(defs):
o = get_node(type)
if not o:
extension.fail(type + " not defined yet")
self.__member.append((o, name))
def get_size(self):
return sum(_[0].get_size() for _ in self.__member)
def get_repr(self, buf, name, indent):
l = ["{0}struct {1} {{".format(I(indent), self.type)]
for _ in self.__member:
n = _[0].get_size()
l.extend(_[0].get_repr(buf[:n], _[1], indent+1))
buf = buf[n:]
x = " " + name
l.append("{0}}}{1};".format(I(indent), x.rstrip()))
return l
def __iter_member(self, defs):
for s in [x.strip() for x in defs.split(';')]:
l = s.split()
if l:
if l[0] == "struct":
l = l[1:]
if len(l) != 2:
extension.fail("Invalid syntax: {0}".format(l))
type, name = l
if type == "string":
yield self.__scan_string_type(type, name)
else:
# anything but string, including struct
m = _struct_member_regex.match(name)
if m:
var = m.group(1)
num = builtin_int(m.group(2))
for i in util.get_xrange(num):
yield type, "{0}[{1}]".format(var, i)
else:
yield type, name
def __scan_string_type(self, type, name):
m = _struct_member_regex.match(name)
if m:
var = m.group(1)
num = builtin_int(m.group(2))
else:
var = name
num = 1 # force "[1]"
type = _string_type(num)
if not get_node(type):
add_node(_string(num))
return type, "{0}[{1}]".format(var, num)
_nodes = []
def init_node():
global _nodes
_nodes = [cls() for cls in _classes]
def get_node(s):
for o in _nodes:
if o.type == s:
return o
def add_node(o):
while True:
x = get_node(o.type)
if x:
del _nodes[_nodes.index(x)]
else:
_nodes.append(o)
break
def get_text(co, fo, args):
pos = args.pop()
if not args:
return "No struct name"
f = path.get_path(args[0])
if os.path.exists(f):
args = args[1:]
if not args:
return "No struct name"
else:
f = setting.get_ext_path("cstruct")
if path.is_noent(f):
return "Need {0} with struct definition".format(f)
if not os.path.isfile(f):
return "Can not read " + f
try:
l = kernel.fopen_text(f).readlines()
except Exception as e:
return str(e)
l = [x.strip() for x in l] # strip whitespaces and tabs first
l = [x for x in l if not x.startswith('#')] # then ignore comments
s = ''.join([x for x in l if x])
s = re.sub(r"\s{1,}", ' ', s)
init_node()
while True:
m = _toplevel_regex.match(s)
if m:
s = s[m.end():]
add_node(_struct(*m.groups()))
else:
break
l = []
for x in args:
o = get_node(x)
if o:
buf = fo.read(pos, o.get_size())
l.extend(o.get_repr(buf, '', 0))
else:
l.append("struct {0} is not defined in {1}".format(x, f))
l.append('')
return l
def init():
setting.ext_add_name("path_cstruct", "cstruct",
"Set configuration file path for :cstruct. "
"Defaults to ~/.fileobj/cstruct if undefined.")
__init_class()
# create an empty file
f = setting.get_ext_path("cstruct")
if not os.path.exists(f):
try:
kernel.fcreat_text(f)
except Exception:
pass # ignore
def cleanup():
setting.ext_delete("path_cstruct")
init()
| 8,727 | 3,019 |
# Generated by Django 2.2.19 on 2021-03-11 18:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("references", "0051_auto_20210223_1404"),
("submissions", "0015_submissionattributes_history"),
]
operations = [
migrations.CreateModel(
name="ToptierAgencyPublishedDABSView",
fields=[],
options={
"db_table": "vw_published_dabs_toptier_agency",
"managed": False,
},
),
migrations.RunSQL(
sql=[
"""
CREATE VIEW vw_published_dabs_toptier_agency AS
SELECT
DISTINCT ON (ta.toptier_code)
ta.toptier_code,
ta.name,
ta.abbreviation,
ta.toptier_agency_id,
a.id AS agency_id,
a.user_selectable
FROM toptier_agency ta
INNER JOIN agency a ON (a.toptier_agency_id = ta.toptier_agency_id AND a.toptier_flag = TRUE)
INNER JOIN submission_attributes sa USING (toptier_code)
INNER JOIN dabs_submission_window_schedule schedule ON sa.submission_window_id = schedule.id
WHERE schedule.submission_reveal_date <= now();
"""
],
reverse_sql=["DROP VIEW IF EXISTS vw_published_dabs_toptier_agency;"],
),
]
| 1,532 | 472 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter_mean
from torch.nn import Linear, ReLU
from torch_geometric.nn import GraphConv
class GraphConvNet(nn.Module):
def __init__(self, n_feat, n_hid, n_out):
super(GraphConvNet).__init__()
self.conv1 = GraphConv(n_feat, n_hid)
self.conv2 = GraphConv(n_hid, n_hid * 2)
self.conv3 = GraphConv(n_hid * 2, n_out)
def forward(self, data):
data.x = F.elu(self.conv1(data.x, data.edge_index))
data.x = F.elu(self.conv2(data.x, data.edge_index))
data.x = F.elu(self.conv3(data.x, data.edge_index))
x_1 = scatter_mean(data.x, data.batch, dim=0)
x = x_1
return x
class DoubleGraphConvNet(nn.Module):
def __init__(self, graph, subgraph, point):
super(DoubleGraphConvNet).__init__()
self.graph_conv = GraphConvNet(graph.n_feat, graph.n_feat * 2, graph.n_feat * 3)
self.subgraph_conv = GraphConvNet(subgraph.n_feat, subgraph.n_feat * 2, subgraph.n_feat * 3)
self.l1 = Linear(graph.n_feat * 3 + subgraph.n_feat * 3 + point, 600)
self.l2 = Linear(600, 256)
self.l3 = Linear(256, graph.n_feat)
def forward(self, graph, subgraph, point):
x1 = self.graph_conv(graph)
x2 = self.subgraph_conv(subgraph)
x = torch.cat([x1, x2, point])
x = ReLU(self.l1(x))
x = ReLU(self.l2(x))
x = self.l3(x)
return x
| 1,493 | 616 |
# Generated by Django 2.0 on 2017-12-28 13:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tutor',
name='salary',
field=models.FloatField(blank=True, max_length=5, null=True),
),
]
| 392 | 135 |
import json
import httpx
import requests
import urllib
import ssl
from urllib3 import poolmanager
from bs4 import BeautifulSoup
from unilogin import Unilogin
class TLSAdapter(requests.adapters.HTTPAdapter): #https://stackoverflow.com/questions/61631955/python-requests-ssl-error-during-requests
def init_poolmanager(self, connections, maxsize, block=False):
"""Create and initialize the urllib3 PoolManager."""
ctx = ssl.create_default_context()
ctx.set_ciphers('DEFAULT@SECLEVEL=1')
self.poolmanager = poolmanager.PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLS,
ssl_context=ctx)
class Skoleintra:
def __init__(self, url, type="elev", brugernavn="", adgangskode=""):
self.success = False
self.session = requests.session()
self.session.mount('https://', TLSAdapter())
self.uniloginClient = Unilogin(brugernavn=brugernavn, adgangskode=adgangskode)
self.defaultHeaders = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "da-DK,da;q=0.9,en-US;q=0.8,en;q=0.7",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36"
}
if url[-1] == "/":
url = url[:-1]
if "https://" not in url and "http://" not in url:
url = "https://" + url
baseUrl = url.split("://")[1].split("/")[0]
if type == "elev":
url = f"{url}/Account/IdpLogin?role=Student&partnerSp=urn%3Aitslearning%3Ansi%3Asaml%3A2.0%3A{baseUrl}"
resp = self.session.get(url, headers=self.defaultHeaders, allow_redirects=False)
cookies = {"Pool": resp.cookies["Pool"], "SsoSessionId": resp.cookies["SsoSessionId"], "__RequestVerificationToken": resp.cookies["__RequestVerificationToken"]} #, "HasPendingSSO": resp.cookies["HasPendingSSO"]
href = f"https://{baseUrl}" + BeautifulSoup(resp.text, 'html.parser').find("a", {"class": "ccl-button sk-button-light-green sk-font-icon sk-button-text-only sk-uni-login-button"}).get("href")
headers = self.defaultHeaders
headers["cookie"] = f"Pool={cookies['Pool']}; SsoSessionId={cookies['SsoSessionId']}; __RequestVerificationToken={cookies['__RequestVerificationToken']}"
resp = self.session.get(href, headers=headers, allow_redirects=False)
location = resp.headers["location"]
authUrl = self.uniloginClient.login(href=location, referer=baseUrl)
resp = self.session.get(authUrl, headers=self.defaultHeaders, allow_redirects=False)
cookies["SsoSelectedSchool"] = resp.cookies["SsoSelectedSchool"]
cookies["UserRole"] = resp.cookies["UserRole"]
cookies["Language"] = resp.cookies["Language"]
cookies[".AspNet.SSO.ApplicationCookie"] = resp.cookies[".AspNet.SSO.ApplicationCookie"]
location = resp.headers["location"]
headers = self.defaultHeaders
headers["cookie"] = f"SsoSelectedSchool={cookies['SsoSelectedSchool']}; Language={cookies['Language']}; .AspNet.SSO.ApplicationCookie={cookies['.AspNet.SSO.ApplicationCookie']}"
resp = self.session.get(location, headers=headers, allow_redirects=False)
html = BeautifulSoup(resp.text, 'html.parser')
href = html.find('form').get('action')
samlResponse = [html.find("input", {"name": "SAMLResponse"}).get("name"), html.find("input", {"name": "SAMLResponse"}).get("value")]
replayState = [html.find("input", {"name": "RelayState"}).get("name"), html.find("input", {"name": "RelayState"}).get("value")]
payload = f"{samlResponse[0]}={urllib.parse.quote_plus(samlResponse[1])}&{replayState[0]}={urllib.parse.quote_plus(replayState[1])}"
headers = self.defaultHeaders
headers["content-length"] = str(len(payload))
headers["content-type"] = "application/x-www-form-urlencoded"
headers["cookie"] = f"Pool={cookies['Pool']}; SsoSessionId={cookies['SsoSessionId']}; __RequestVerificationToken={cookies['__RequestVerificationToken']}; SsoSelectedSchool={cookies['SsoSelectedSchool']}; UserRole={cookies['UserRole']}; Language={cookies['Language']}; .AspNet.SSO.ApplicationCookie={cookies['.AspNet.SSO.ApplicationCookie']}"
resp = self.session.post(href, headers=headers, data=payload, allow_redirects=False)
cookies[".AspNet.ApplicationCookie"] = resp.cookies[".AspNet.ApplicationCookie"]
self.cookies = cookies
self.success = True
def getWeeklyplans(self, week, year):
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "da-DK,da;q=0.9,en-US;q=0.8,en;q=0.7",
"cookie": f"Pool={self.cookies['Pool']}; SsoSessionId={self.cookies['SsoSessionId']}; __RequestVerificationToken={self.cookies['__RequestVerificationToken']}; SsoSelectedSchool={self.cookies['SsoSelectedSchool']}; UserRole={self.cookies['UserRole']}; Language={self.cookies['Language']}; .AspNet.SSO.ApplicationCookie={self.cookies['.AspNet.SSO.ApplicationCookie']}; .AspNet.ApplicationCookie={self.cookies['.AspNet.ApplicationCookie']}",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36",
}
resp = self.session.get(f"https://{self.cookies['SsoSelectedSchool']}/student/weeklyplans/list/item/class/{week}-{year}", headers=headers)
weeklyplan = json.loads(BeautifulSoup(resp.text, 'html.parser').find("div", {"id": "root"}).get("data-clientlogic-settings-weeklyplansapp"))
return weeklyplan
async def getWeeklyplansAsync(self, week, year):
if len(str(week)) == 1:
week = f"0{week}"
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "da-DK,da;q=0.9,en-US;q=0.8,en;q=0.7",
"cookie": f"Pool={self.cookies['Pool']}; SsoSessionId={self.cookies['SsoSessionId']}; __RequestVerificationToken={self.cookies['__RequestVerificationToken']}; SsoSelectedSchool={self.cookies['SsoSelectedSchool']}; UserRole={self.cookies['UserRole']}; Language={self.cookies['Language']}; .AspNet.SSO.ApplicationCookie={self.cookies['.AspNet.SSO.ApplicationCookie']}; .AspNet.ApplicationCookie={self.cookies['.AspNet.ApplicationCookie']}",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36",
}
async with httpx.AsyncClient() as client:
resp = await client.get(f"https://{self.cookies['SsoSelectedSchool']}/student/weeklyplans/list/item/class/{week}-{year}", headers=headers)
weeklyplan = json.loads(BeautifulSoup(resp.text, 'html.parser').find("div", {"id": "root"}).get("data-clientlogic-settings-weeklyplansapp"))
return weeklyplan | 7,437 | 2,507 |
from valor import Valor
from discord.ext.commands import Context
from util import ErrorEmbed, LongTextEmbed, LongFieldEmbed, guild_name_from_tag
import random
from datetime import datetime
import requests
from sql import ValorSQL
from commands.common import get_uuid, from_uuid
async def _register_leaderboard(valor: Valor):
desc = "The leaderboard"
@valor.command()
async def leaderboard(ctx: Context):
res = await ValorSQL._execute("SELECT uuid_name.name, uuid_name.uuid, player_stats.galleons_graveyard FROM player_stats LEFT JOIN uuid_name ON uuid_name.uuid=player_stats.uuid ORDER BY galleons_graveyard DESC LIMIT 50")
stats = []
for m in res:
if not m[0] and m[1]:
stats.append((await from_uuid(m[1]), m[2]))
else:
stats.append((m[0] if m[0] else "can't find name", m[2]))
table = "```\n"+'\n'.join("%3d. %24s %5d" % (i+1, stats[i][0], stats[i][1]) for i in range(len(stats)))+"\n```"
await LongTextEmbed.send_message(valor, ctx, "Galleon's Graveyard", content=table, color=0x11FFBB)
@leaderboard.error
async def cmd_error(ctx, error: Exception):
await ctx.send(embed=ErrorEmbed())
raise error
@valor.help_override.command()
async def leaderboard(ctx: Context):
await LongTextEmbed.send_message(valor, ctx, "Leaderboard", desc, color=0xFF00)
| 1,433 | 475 |
from deduplication import simhash, lsimhash
from pathlib import Path
from scipy.spatial.distance import hamming
import numpy as np
import textwrap
def test_main():
with open(Path(__file__).resolve().parent / 'data' / 'wiki_nlp.txt', 'r') as f:
content = f.read()
hval = lsimhash.lsimhash(content)
print(hval)
def test_cmp():
h1 = simhash.simhash('Hyperparameter Tuning is one of the most computationally expensive tasks when creating deep learning networks. Luckily, you can use Google Colab to speed up the process significantly. In this post, I will show you how you can tune the hyperparameters of your existing keras models using Hyperas and run everything in a Google Colab Notebook.')
h2 = simhash.simhash('Hyperparameter tuning is one of the most computationally expensive tasks for deep learning networks. we can use Google Colab to speed up the process significantly. In this post, I will show you how to tune the hyper parameters of keras models using Hyperas and run everything.')
print('\n', h1, '\n', h2)
| 1,057 | 291 |
import numpy as np
def rmsle(y_pred, y_true):
assert len(y_pred) == len(y_true)
return np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2)))
| 164 | 77 |
from os import listdir, makedirs
from os.path import isfile, join, exists
from __main__ import *
print '\033[32m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\033[0m\n'
print '\033[94m'+'||===== '+'\033[92m' + ' ====== '+'\033[93m'+' ====='
print '\033[94m'+'|| \\\\ '+'\033[92m' + ' // '+'\033[93m'+' //'
print '\033[94m'+'|| || ' + '\033[92m'+'|| '+'\033[93m'+'||'
print '\033[94m'+'|| // '+'\033[92m' + '\\\\ '+'\033[93m'+'||'
print '\033[94m'+'||===== '+'\033[92m' + ' ===== '+'\033[93m'+'||'
print '\033[94m'+'|| \\\\ '+'\033[92m' + ' \\\\ '+'\033[93m'+'||'
print '\033[94m'+'|| || '+'\033[92m' + ' || '+'\033[93m'+'||'
print '\033[94m'+'|| || ' + '\033[92m'+' // '+'\033[93m'+' \\\\'
print '\033[94m'+'|| \\\\'+'\033[92m' + ' ====== '+'\033[93m'+' =====\033[0m'
print 'Spring \033[94mR\033[0mepository-\033[92mS\033[0mervice-\033[93mC\033[0montroller generator'
print 'v 1.0.0 \n\033[100mcopyrights : Nilanka manoj\033[0m'
print 'Repository : https://github.com/nilankamanoj/spring-rsc-generator'
print '\033[32m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\033[0m\n'
# common imports for repository, service and controllers
IMPORT_MAP = {
'jpa_repo': 'org.springframework.data.jpa.repository.JpaRepository',
'autowire': 'org.springframework.beans.factory.annotation.Autowired',
'service': 'org.springframework.stereotype.Service',
'list': 'java.util.List',
'http_status': 'org.springframework.http.HttpStatus',
'request_mapping': 'org.springframework.web.bind.annotation.RequestMapping',
'response_entity': 'org.springframework.http.ResponseEntity',
'controller': 'org.springframework.stereotype.Controller',
'request_method': 'org.springframework.web.bind.annotation.RequestMethod',
'path_variable': 'org.springframework.web.bind.annotation.PathVariable',
'request_body': 'org.springframework.web.bind.annotation.RequestBody'
}
# exception cases for plurals
ABERRANT_PLURAL_MAP = {
'appendix': 'appendices',
'barracks': 'barracks',
'cactus': 'cacti',
'child': 'children',
'criterion': 'criteria',
'deer': 'deer',
'echo': 'echoes',
'elf': 'elves',
'embargo': 'embargoes',
'focus': 'foci',
'fungus': 'fungi',
'goose': 'geese',
'hero': 'heroes',
'hoof': 'hooves',
'index': 'indices',
'knife': 'knives',
'leaf': 'leaves',
'life': 'lives',
'man': 'men',
'mouse': 'mice',
'nucleus': 'nuclei',
'person': 'people',
'phenomenon': 'phenomena',
'potato': 'potatoes',
'self': 'selves',
'syllabus': 'syllabi',
'tomato': 'tomatoes',
'torpedo': 'torpedoes',
'veto': 'vetoes',
'woman': 'women',
}
# vowel set for pluralize
VOWELS = set('aeiou')
# pluralize singular nouns for listNames
def pluralize(singular):
if not singular:
return ''
plural = ABERRANT_PLURAL_MAP.get(singular)
if plural:
return plural
root = singular
try:
if singular[-1] == 'y' and singular[-2] not in VOWELS:
root = singular[:-1]
suffix = 'ies'
elif singular[-1] == 's':
if singular[-2] in VOWELS:
if singular[-3:] == 'ius':
root = singular[:-2]
suffix = 'i'
else:
root = singular[:-1]
suffix = 'ses'
else:
suffix = 'es'
elif singular[-2:] in ('ch', 'sh'):
suffix = 'es'
else:
suffix = 's'
except IndexError:
suffix = 's'
plural = root + suffix
return plural
# load model class names from model package
def loadModels():
models = [f[:-5] for f in listdir('src/main/java/'+modelPkg.replace(
'.', '/')+'/') if isfile(join('src/main/java/'+modelPkg.replace('.', '/')+'/', f))]
return models
# class creation data generation for each model
def createModelData(model):
data = {}
data['model'] = model
data['attribute_single'] = model[0].lower() + model[1:]
data['attribute_plural'] = pluralize(data['attribute_single'])
data['class_repository'] = reposPrefx + model + reposSufx
data['attribute_repository'] = data['class_repository'][0].lower() + \
data['class_repository'][1:]
data['class_service'] = servicesPrefx+model+servicesSufx
data['attribute_service'] = data['class_service'][0].lower() + \
data['class_service'][1:]
data['class_controller'] = controllersPrefx + model + controllersSufx
data['model_location'] = modelPkg+'.'+model
data['repository_location'] = repoPkg + '.' + data['class_repository']
data['service_location'] = servicePkg + '.' + data['class_service']
data['folder_repository'] = 'src/main/java/' + \
repoPkg.replace('.', '/') + '/'
data['file_repository'] = data['folder_repository'] + \
data['class_repository'] + '.java'
data['folder_service'] = 'src/main/java/' + \
servicePkg.replace('.', '/') + '/'
data['file_service'] = data['folder_service'] + \
data['class_service'] + '.java'
data['folder_controller'] = 'src/main/java/' + \
controllerPkg.replace('.', '/') + '/'
data['file_controller'] = data['folder_controller'] + \
data['class_controller'] + '.java'
return data
# create new files/ foldes
def createFiles(models):
for model in models:
data = createModelData(model)
if not exists(data.get('folder_repository')):
makedirs(data.get('folder_repository'))
print '\033[94m' + 'create '+'\033[0m' + \
': ' + data.get('folder_repository')
if not exists(data.get('folder_service')):
makedirs(data.get('folder_service'))
print '\033[94m' + 'create '+'\033[0m' + \
': ' + data.get('folder_service')
if not exists(data.get('folder_controller')):
makedirs(data.get('folder_controller'))
print '\033[94m' + 'create '+'\033[0m' + \
': ' + data.get('folder_controller')
with open(data.get('file_repository'), 'w') as f:
f.write('package '+repoPkg+';\n\nimport '+IMPORT_MAP.get('jpa_repo')+';\nimport '+data.get('model_location') +
';\n\npublic interface '+data.get('class_repository')+' extends JpaRepository<'+model+', '+idType+'> {\n\n}')
print '\033[92m' + 'create'+'\033[0m' + \
' : ' + data.get('file_repository')
with open(data.get('file_service'), 'w') as f2:
f2.write('package '+servicePkg+';\n\nimport '+IMPORT_MAP.get('autowire')+';\nimport '+IMPORT_MAP.get('service')+';\nimport '+data.get('model_location')+';\nimport '+data.get('repository_location')+';\nimport '+IMPORT_MAP.get('list')+';\n\n@Service\npublic class '+data.get('class_service')+' {\n\t@Autowired\n\tprivate '+data.get('class_repository')+' '+data.get('attribute_repository')+';\n\n\tpublic List<'+model+'> findAll() {\n\t\tList<'+model+'> '+data.get('attribute_plural')+' = '+data.get('attribute_repository')+'.findAll();\n\t\treturn '+data.get(
'attribute_plural')+';\n\t}\n\n\tpublic '+model+' findOne('+idType+' id) {\n\t\t'+model+' '+data.get('attribute_single')+' = '+data.get('attribute_repository')+'.findOne(id);\n\t\treturn '+data.get('attribute_single')+';\n\t}\n\n\tpublic '+model+' save('+model + ' ' + data.get('attribute_single')+') {\n\t\t'+data.get('attribute_repository')+'.save('+data.get('attribute_single')+');\n\t\t'+model+' new'+model+' = '+data.get('attribute_repository')+'.findOne('+data.get('attribute_single')+'.getId());\n\t\treturn new'+model+';\n\t}\n\n}')
print '\033[92m' + 'create'+'\033[0m' + \
' : ' + data.get('file_service')
with open(data.get('file_controller'), 'w') as f3:
f3.write('package '+controllerPkg+';\n\nimport '+IMPORT_MAP.get('autowire')+';\nimport '+IMPORT_MAP.get('http_status')+';\nimport '+IMPORT_MAP.get('request_method')+';\nimport '+IMPORT_MAP.get('request_mapping')+';\nimport '+IMPORT_MAP.get('list')+';\nimport '+IMPORT_MAP.get('response_entity')+';\nimport '+IMPORT_MAP.get('path_variable')+';\nimport '+IMPORT_MAP.get('request_body')+';\nimport '+IMPORT_MAP.get('controller')+';\nimport '+data.get(
'model_location')+';\nimport '+data.get('service_location')+';\n\n@Controller\n@RequestMapping(path = "/'+data.get('attribute_single')+'")\npublic class '+data.get('class_controller')+'{\n\t@Autowired\n\tprivate '+data.get('class_service')+' '+data.get('attribute_service')+';\n\n\t@RequestMapping(path = "/", method = RequestMethod.GET)\n\tpublic ResponseEntity<?> findAll() {\n\t\tList<'+model+'> '+data.get('attribute_plural')+' = '+data.get('attribute_service')+'.findAll();\n\t\treturn new ResponseEntity<>('+data.get('attribute_plural')+', HttpStatus.OK);\n\t}\n\n\t@RequestMapping(path = "/{'+'id}", method = RequestMethod.GET)\n\tpublic ResponseEntity<?> findOne(@PathVariable Integer id) {\n\t\t'+model+' '+data.get('attribute_single')+' = '+data.get('attribute_service')+'.findOne(id);\n\t\treturn new ResponseEntity<>('+data.get('attribute_single')+', HttpStatus.OK);\n\t}\n\n\t@RequestMapping(path = "/", method = RequestMethod.POST)\n\tpublic ResponseEntity<?> save(@RequestBody '+model+' '+data.get('attribute_single')+') {\n\t\t'+model+' new'+model+' = '+data.get('attribute_service')+'.save('+data.get('attribute_single')+');\n\t\treturn new ResponseEntity<>( new'+model+', HttpStatus.OK);\n\t}\n}')
print '\033[92m' + 'create'+'\033[0m' + \
' : ' + data.get('file_controller')
if __name__ == '__main__':
confirm = 'n'
while(confirm != 'y'):
modelPkg = raw_input("model package : ")
repoPkg = raw_input("repository package : ")
servicePkg = raw_input("service package : ")
controllerPkg = raw_input("controller package : ")
idType = raw_input("model id type : ")
reposPrefx = raw_input("class name prefix for repositories : ")
reposSufx = raw_input("class name sufix for repositories : ")
servicesPrefx = raw_input("class name prefix for services : ")
servicesSufx = raw_input("class name sufix for services : ")
controllersPrefx = raw_input("class name prefix for controllers : ")
controllersSufx = raw_input("class name sufix for controllers : ")
print '\033[93m' + \
'======================= input data =============================' + \
'\033[0m'
print 'models : '+modelPkg + '\nrepository : '+repoPkg + '\nservice : ' + servicePkg + '\ncontroller : '+controllerPkg + '\nid type : '+idType + '\nrepository prefix : '+reposPrefx + \
'\nrepository suffix : '+reposSufx + '\nservice prefix : '+servicesPrefx + '\nservice sufix : ' + \
servicesSufx + '\ncontroller prefix : '+controllersPrefx + \
'\ncontroller sufix : '+controllersSufx
print '\033[93m' + \
'======================= confirmation =============================' + \
'\033[0m'
while(True):
confirm = raw_input('confirm ? y/n : ')
if(confirm in ['y', 'n']):
break
models = loadModels()
createFiles(models)
print '\033[93m' + \
'======================= done =============================' + \
'\033[0m'
| 11,581 | 4,117 |
from django.contrib.auth.models import User
from django.db import models
from django.core.validators import RegexValidator
class Profile(models.Model):
""" Profile model.
A proxy model that extends the data
with data with extra information.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
phone_regex = RegexValidator(
regex = r'\+?1?\d{9,15}$',
message = "Phone number must be entered in th format: +999999999999. Up to 15 digits allowed."
)
phone_number = models.CharField(
validators=[phone_regex],
max_length=15,
blank=True
)
picture = models.ImageField(
upload_to='users/pictures',
blank=True,
null=True
)
followers = models.IntegerField(default=0)
following = models.IntegerField(default=0)
totalTweets = models.IntegerField(default=0)
about_me = models.TextField(max_length=250, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
"""Return username."""
return self.user.username | 1,132 | 346 |
from game.sequence import Sequence
max_value = 999888777666555
class AlfaBeta:
def __init__(self, values, state, k, player, enemy, max_deepth = None):
self.values = values
self.state = state
self.k = k
self.player = player
self.enemy = enemy
if max_deepth is None:
self.max_deepth = k
else:
self.max_deepth = max_deepth
def get_move(self):
alfa = float('-inf')
beta = float('inf')
move = None
for i in range(0, len(self.state)):
if self.state[i] == 0:
self.state[i] = self.player
child_alfa = self.alfa_beta(self.max_deepth - 1, alfa, beta, self.enemy)
if alfa < child_alfa:
alfa = child_alfa
move = i
self.state[i] = 0
if alfa >= beta:
break
return move
def alfa_beta(self, deepth, alfa, beta, current_player):
terminal_value = self.calculate_terminal_node_value(current_player)
if terminal_value is not None:
return terminal_value
if deepth == 0:
return self.evaluate_node(current_player)
if current_player != self.player:
return self.enemy_visits_children(deepth, alfa, beta, current_player)
else:
return self.visit_children(deepth, alfa, beta, current_player)
def visit_children(self, deepth, alfa, beta, current_player):
for i in range(0, len(self.state)):
if self.state[i] == 0:
self.state[i] = current_player
child_alfa = self.alfa_beta(deepth - 1, alfa, beta, self.enemy)
alfa = max(alfa, child_alfa)
self.state[i] = 0
if alfa >= beta:
break
return alfa
def enemy_visits_children(self, deepth, alfa, beta, current_player):
for i in range(0, len(self.state)):
if self.state[i] == 0:
self.state[i] = current_player
child_beta = self.alfa_beta(deepth - 1, alfa, beta, self.player)
beta = min(beta, child_beta)
self.state[i] = 0
if alfa >= beta:
break
return beta
def evaluate_node(self, current_player):
sequence = self.create_evalute_sequence(current_player)
value = sequence.evaluate()
if current_player == self.player:
return value
else:
return -value
def calculate_terminal_node_value(self, current_player):
sequence = self.create_terminal_sequence(current_player)
is_term = sequence.is_term()
if is_term:
if current_player == self.player:
return max_value
else:
return -max_value
return None
def create_terminal_sequence(self, player):
return self.create_sequence(lambda el: el == player)
def create_evalute_sequence(self, player):
return self.create_sequence(lambda el: el == player or el == 0)
def create_sequence(self, element_pred):
elements = []
for i in range(0, len(self.state)):
if element_pred(self.state[i]):
elements.append(self.values[i])
return Sequence(elements, self.k)
| 3,388 | 1,015 |
#----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
from flask import Flask, render_template, request,jsonify
import random
# from flask.ext.sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
import os
import requests
from bs4 import BeautifulSoup
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
app.config.from_object('config')
#db = SQLAlchemy(app)
# Automatically tear down SQLAlchemy.
'''
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
'''
# Login required decorator.
'''
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
'''
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
# @app.route('/')
# def home():
# return render_template('pages/placeholder.home.html')
@app.route('/')
def news():
return render_template('pages/placeholder.news.html')
@app.route('/randomnews')
def randomnews():
html = requests.get('http://www.dawn.com')
soup = BeautifulSoup(html.text, 'html5lib')
h2 = soup.find_all('h2', {'data-layout': 'story'})
news = []
for link in h2:
mylink = BeautifulSoup(str(link), 'html.parser')
gettinglink = mylink.find('a', href=True)
newsarray = []
newsarray.append(str(gettinglink.find(text=True)))
newsarray.append(str(gettinglink['href']))
news.append(newsarray)
response = jsonify({
'data':random.choice(news),
'status':'awesome'
})
response.headers.add('Access-Control-Allow-Origin','*')
return response,200
@app.route('/fortune', methods=['GET'])
def fortune():
fortunes = [
'A feather in the hand is better than a bird in the air. ',
'A golden egg of opportunity falls into your lap this month.',
'Bide your time, for success is near.',
'Curiosity kills boredom. Nothing can kill curiosity.',
'Disbelief destroys the magic.',
'Dont just spend time. Invest it.',
'Every wise man started out by asking many questions.',
'Fortune Not Found: Abort, Retry, Ignore?',
'Good to begin well, better to end well.',
'How many of you believe in psycho-kinesis? Raise my hand.',
'Imagination rules the world.',
'Keep your face to the sunshine and you will never see shadows.',
'Listen to everyone. Ideas come from everywhere.',
'Man is born to live and not prepared to live.',
'No one can walk backwards into the future.',
'One of the first things you should look for in a problem is its positive side.',
'Pick battles big enough to matter, small enough to win.',
'Remember the birthday but never the age.',
'Success is failure turned inside out.',
'The harder you work, the luckier you get.',
'Use your eloquence where it will do the most good.',
'What is hidden in an empty box?',
'Your reputation is your wealth.'
]
response = jsonify({
'data':random.choice(fortunes),
'status':'awesome'
})
response.headers.add('Access-Control-Allow-Origin', '*')
return response,200
# Error handlers.
@app.errorhandler(500)
def internal_error(error):
#db_session.rollback()
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
| 4,620 | 1,339 |
#!/usr/bin/env python3
# Written by Antonio Galea - 2010/11/18
# Distributed under Gnu LGPL 3.0
# see http://www.gnu.org/licenses/lgpl-3.0.txt
import binascii
import os
import struct
import sys
import zlib
from optparse import OptionParser
try:
from intelhex import IntelHex
except ImportError:
IntelHex = None
DEFAULT_DEVICE = "0x0483:0xdf11"
DEFAULT_NAME = b'ST...'
def named(tuple, names):
return dict(list(zip(names.split(), tuple)))
def consume(fmt, data, names):
n = struct.calcsize(fmt)
return named(struct.unpack(fmt, data[:n]), names), data[n:]
def cstring(bytestring):
return bytestring.partition(b'\0')[0]
def compute_crc(data):
return 0xFFFFFFFF & -zlib.crc32(data) - 1
def parse(file, dump_images=False):
print('File: "%s"' % file)
data = open(file, 'rb').read()
crc = compute_crc(data[:-4])
prefix, data = consume('<5sBIB', data, 'signature version size targets')
print('%(signature)s v%(version)d, image size: %(size)d, targets: %(targets)d' % prefix)
for t in range(prefix['targets']):
tprefix, data = consume(
'<6sBI255s2I', data, 'signature altsetting named name size elements'
)
tprefix['num'] = t
if tprefix['named']:
tprefix['name'] = cstring(tprefix['name'])
else:
tprefix['name'] = ''
print(
'%(signature)s %(num)d, alt setting: %(altsetting)s, name: "%(name)s", size: %(size)d, elements: %(elements)d'
% tprefix
)
tsize = tprefix['size']
target, data = data[:tsize], data[tsize:]
for e in range(tprefix['elements']):
eprefix, target = consume('<2I', target, 'address size')
eprefix['num'] = e
print(' %(num)d, address: 0x%(address)08x, size: %(size)d' % eprefix)
esize = eprefix['size']
image, target = target[:esize], target[esize:]
if dump_images:
out = '%s.target%d.image%d.bin' % (file, t, e)
open(out, 'wb').write(image)
print(' DUMPED IMAGE TO "%s"' % out)
if len(target):
print("target %d: PARSE ERROR" % t)
suffix = named(struct.unpack('<4H3sBI', data[:16]), 'device product vendor dfu ufd len crc')
print(
'usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x'
% suffix
)
if crc != suffix['crc']:
print("CRC ERROR: computed crc32 is 0x%08x" % crc)
data = data[16:]
if data:
print("PARSE ERROR")
def checkbin(binfile):
data = open(binfile, 'rb').read()
if (len(data) < 16):
return
crc = compute_crc(data[:-4])
suffix = named(struct.unpack('<4H3sBI', data[-16:]), 'device product vendor dfu ufd len crc')
if crc == suffix['crc'] and suffix['ufd'] == b'UFD':
print(
'usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x'
% suffix
)
print("It looks like the file %s has a DFU suffix!" % binfile)
print("Please remove any DFU suffix and retry.")
sys.exit(1)
def build(file, targets, name=DEFAULT_NAME, device=DEFAULT_DEVICE):
data = b''
for t, target in enumerate(targets):
tdata = b''
for image in target:
tdata += struct.pack('<2I', image['address'], len(image['data'])) + image['data']
tdata = struct.pack('<6sBI255s2I', b'Target', 0, 1, name, len(tdata), len(target)) + tdata
data += tdata
data = struct.pack('<5sBIB', b'DfuSe', 1, len(data) + 11, len(targets)) + data
v, d = [int(x, 0) & 0xFFFF for x in device.split(':', 1)]
data += struct.pack('<4H3sB', 0, d, v, 0x011a, b'UFD', 16)
crc = compute_crc(data)
data += struct.pack('<I', crc)
open(file, 'wb').write(data)
if __name__ == "__main__":
usage = """
%prog [-d|--dump] infile.dfu
%prog {-b|--build} address:file.bin [-b address:file.bin ...] [{-D|--device}=vendor:device] outfile.dfu
%prog {-s|--build-s19} file.s19 [{-D|--device}=vendor:device] outfile.dfu
%prog {-i|--build-ihex} file.hex [-i file.hex ...] [{-D|--device}=vendor:device] outfile.dfu"""
parser = OptionParser(usage=usage)
parser.add_option(
"-b",
"--build",
action="append",
dest="binfiles",
help=
"build a DFU file from given BINFILES. Note that the BINFILES must not have any DFU suffix!",
metavar="BINFILES"
)
parser.add_option(
"-i",
"--build-ihex",
action="append",
dest="hexfiles",
help="build a DFU file from given Intel HEX HEXFILES",
metavar="HEXFILES"
)
parser.add_option(
"-s",
"--build-s19",
type="string",
dest="s19files",
help="build a DFU file from given S19 S-record S19FILE",
metavar="S19FILE"
)
parser.add_option(
"-D",
"--device",
action="store",
dest="device",
help="build for DEVICE, defaults to %s" % DEFAULT_DEVICE,
metavar="DEVICE"
)
parser.add_option(
"-d",
"--dump",
action="store_true",
dest="dump_images",
default=False,
help="dump contained images to current directory"
)
(options, args) = parser.parse_args()
if (options.binfiles or options.hexfiles) and len(args) == 1:
target = []
if options.binfiles:
for arg in options.binfiles:
try:
address, binfile = arg.split(':', 1)
except ValueError:
print("Address:file couple '%s' invalid." % arg)
sys.exit(1)
try:
address = int(address, 0) & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
if not os.path.isfile(binfile):
print("Unreadable file '%s'." % binfile)
sys.exit(1)
checkbin(binfile)
target.append({'address': address, 'data': open(binfile, 'rb').read()})
if options.hexfiles:
if not IntelHex:
print("Error: IntelHex python module could not be found")
sys.exit(1)
for hex in options.hexfiles:
ih = IntelHex(hex)
for (address, end) in ih.segments():
try:
address = address & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
target.append({
'address': address,
'data': ih.tobinstr(start=address, end=end - 1)
})
outfile = args[0]
device = DEFAULT_DEVICE
if options.device:
device = options.device
try:
v, d = [int(x, 0) & 0xFFFF for x in device.split(':', 1)]
except:
print("Invalid device '%s'." % device)
sys.exit(1)
build(outfile, [target], DEFAULT_NAME, device)
elif options.s19files and len(args) == 1:
address = 0
data = ""
target = []
name = DEFAULT_NAME
with open(options.s19files) as f:
lines = f.readlines()
for line in lines:
curaddress = 0
curdata = ""
line = line.rstrip()
if line.startswith("S0"):
name = binascii.a2b_hex(line[8:len(line) - 2]).replace(".s19", "")
elif line.startswith("S3"):
try:
curaddress = int(line[4:12], 16) & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
curdata = binascii.unhexlify(line[12:-2])
elif line.startswith("S2"):
try:
curaddress = int(line[4:10], 16) & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
curdata = binascii.unhexlify(line[10:-2])
elif line.startswith("S1"):
try:
curaddress = int(line[4:8], 16) & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
curdata = binascii.unhexlify(line[8:-2])
if address == 0:
address = curaddress
data = curdata
elif address + len(data) != curaddress:
target.append({'address': address, 'data': data})
address = curaddress
data = curdata
else:
data += curdata
outfile = args[0]
device = DEFAULT_DEVICE
if options.device:
device = options.device
try:
v, d = [int(x, 0) & 0xFFFF for x in device.split(':', 1)]
except:
print("Invalid device '%s'." % device)
sys.exit(1)
build(outfile, [target], name, device)
elif len(args) == 1:
infile = args[0]
if not os.path.isfile(infile):
print("Unreadable file '%s'." % infile)
sys.exit(1)
parse(infile, dump_images=options.dump_images)
else:
parser.print_help()
if not IntelHex:
print("Note: Intel hex files support requires the IntelHex python module")
sys.exit(1)
| 9,836 | 3,200 |
from setuptools import setup, find_packages
setup(
name='predict_functions_package',
version='0.1',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='A list of python Data Analysis Functions',
long_description=open('README.md').read(),
install_requires=['numpy', 'pandas'],
url='https://github.com/bmsimang/Analyse_Function_Predict',
author='Bongani Msimanga, Azukile Kobe, Vinita Maharaj, Shraddha Rajcoomar, Akshar Jadoonandan',
author_email='bonganimsimanga0@gmail.com, dhrtirajcoomar@gmail.com, azukilekobe11@gmail.com, vinita.maharaj@gmail.com, aksharj003@gmail.com'
) | 636 | 228 |
import numpy as np
import os
import os.path as op
import matplotlib.pyplot as plt
import pickle
from tqdm import tqdm
from Board import Board
from func import opposite_color, loc2int, int2color, is_last_rank
LETTERS = [chr(i) for i in range(65, 65+26)]
BOARD_DIM = 8
N_PIECES = 6
MAX_MOVES = 100
def logistic(x):
return (2. / (1 + np.exp(-x))) - 1
class AI:
def __init__(self, color, name='rock', show=False):
self.color = color
self.name = name
if not op.isfile(op.join('networks', name + 'net.pkl')):
self.train_random()
self.network = load_network(name)
self.network.show = show
def make_decision(self, board):
self.network.make_decision(board, self.color)
def get_promotion(self, board, loc):
return self.network.get_promotion(board, loc)
def train_random(self, exp_n=2):
genomes = [Genome(name=''.join([LETTERS[i] for i, b in enumerate(format(1023, '026b')) if b == '1']),
seed=(4334 * i)) for i in tqdm(range(2**exp_n))]
while len(genomes) > 1:
genomes = self.genome_tournament(genomes)
genomes[0].network.name = self.name
genomes[0].name = self.name
genomes[0].network.save()
genomes[0].save()
def train_offspring(self, exp_n=8):
pass
def genome_tournament(self, genomes):
if len(genomes) % 2:
raise ValueError('Must have an even number of genomes for tournament')
keep_indices = []
for i in tqdm(range(int(len(genomes)/2))):
board = Board()
order = [i, len(genomes) - i - 1]
np.random.shuffle(order)
players = {'white': genomes[order[0]], 'black': genomes[order[1]]}
while not board.game_over and board.move < MAX_MOVES:
color = int2color(board.move)
players[color].network.make_decision(board, color)
outcome = board.check_check_mate()
position_difference = board.score_position('white') - board.score_position('black')
print(outcome, position_difference)
if (outcome and 'Draw' in outcome) or position_difference == 0:
keep_indices.append(np.random.choice([i, -i]))
elif outcome == 'Check mate white' or position_difference > 0:
keep_indices.append(order[0])
elif outcome == 'Check mate black' or position_difference < 0:
keep_indices.append(order[1])
else:
raise ValueError('Unrecognized outcome %s' % outcome)
return [genomes[i] for i in keep_indices]
class ConnectionWeight:
def __init__(self, weight):
self.weight = weight
class Node:
def __init__(self, loc):
self.loc = loc
self.activity = 0
self.next_nodes = dict()
self.previous_nodes = dict()
def connect(self, node, weight):
cw = ConnectionWeight(weight)
self.next_nodes[node.loc] = cw
node.previous_nodes[self.loc] = cw
class Network:
piece_dict = {'pawn': 0, 'rook': 1, 'knight': 2, 'bishop': 3,
'queen': 4, 'king': 5}
promotion_pieces = ['rook', 'knight', 'bishop', 'queen']
def __init__(self, layer_dims, tms, name='rock', seed=12, delta=0.1,
show=True):
np.random.seed(seed)
self.name = name
self.delta = delta # for backpropagation (depreciated)
self.show = show
self.input_layer = self.make_layer(layer_dims[0])
self.hidden_layers = []
if len(layer_dims) > 2:
hidden_layer = self.make_layer(layer_dims[1])
self.connect_layers(self.input_layer, hidden_layer, tms[0])
self.hidden_layers.append(hidden_layer)
for i, (hidden_dim, tm) in enumerate(zip(layer_dims[2:-1], tms[1:-1])):
hidden_layer = self.make_layer(hidden_dim)
if i < len(layer_dims) - 1:
self.connect_layers(self.hidden_layers[-1], hidden_layer, tm)
self.hidden_layers.append(hidden_layer)
self.output_layer = self.make_layer(layer_dims[-1])
self.connect_layers(hidden_layer, self.output_layer, tms[-1])
else:
self.hidden_layers = []
self.output_layer = self.make_layer(layer_dims[-1])
self.connect_layers(self.input_layer, self.output_layer, tms[-1])
def make_layer(self, shape):
layer = np.empty(shape=shape, dtype=object).flatten()
for i in range(layer.size):
layer[i] = Node(loc=i)
return layer.reshape(shape)
def connect_layers(self, layer, next_layer, tm):
tm = tm.flatten()
for i, node in enumerate(layer.flatten()):
for j, next_node in enumerate(next_layer.flatten()):
node.connect(next_node, tm[i * j])
def save(self):
with open(op.join('networks', self.name + 'net.pkl'), 'wb') as f:
pickle.dump(self, f)
def propagate(self, input_activity):
if input_activity.shape != self.input_layer.shape:
raise ValueError('Input activity dimension mismatch')
input_activity = input_activity.flatten()
for i, node in enumerate(self.input_layer.flatten()):
node.activity = input_activity[i]
if self.hidden_layers:
self.propagate_layer(self.input_layer, self.hidden_layers[0])
for i, hidden_layer in enumerate(self.hidden_layers[1:]):
self.propagate_layer(self.hidden_layers[i], hidden_layer)
self.propagate_layer(self.hidden_layers[-1], self.output_layer)
else:
self.propagate_layer(self.input_layer, self.output_layer)
if self.show:
self.show_activity()
def propagate_layer(self, layer, next_layer):
update_mat = np.zeros(shape=next_layer.shape).flatten()
for node in layer.flatten():
for loc, weight in node.next_nodes.items():
update_mat[loc] += node.activity * weight.weight
for i, node in enumerate(next_layer.flatten()):
node.activity = logistic(update_mat[i])
def show_activity(self):
input_fig, input_axes = plt.subplots(self.input_layer.shape[0])
for section, ax in zip(self.input_layer, input_axes):
self.plot_section(section, ax)
if self.hidden_layers:
hidden_fig, hidden_axes = plt.subplots(len(self.hidden_layers))
hidden_axes = hidden_axes if isinstance(hidden_axes, np.ndarray) else np.array([hidden_axes])
for hidden_layer, ax in zip(self.hidden_layers, hidden_axes):
self.plot_section(hidden_layer, ax)
output_fig, output_axes = plt.subplots(self.output_layer.shape[0])
for section, ax in zip(self.output_layer, output_axes):
self.plot_section(section, ax)
plt.show()
def plot_section(self, section, ax):
section_shape = section.shape
ax.axis('off')
activity_mat = np.zeros(section_shape).flatten()
for i, node in enumerate(section.flatten()):
activity_mat[i] = node.activity
ax.imshow(activity_mat.reshape(section_shape))
def train_king_hunt(self, n_games=1000):
for n in tqdm(range(n_games)):
board = Board()
while not board.game_over and board.move < MAX_MOVES:
color = int2color(board.move)
activity_mat = self.pieces2activity_mat(board.pieces[color], board.pieces[opposite_color(color)])
self.propagate(activity_mat)
output_activity_mat = self.layer2activity_mat(self.output_layer)
piece, move = self.activity_mat2move(output_activity_mat, board, board.pieces[color])
print(piece.name, piece.square.loc, move)
board.make_move(piece, move)
score = board.score_position(color)
output_loc = self.piece2output_layer(piece)
self.back_propagate(self.output_layer[output_loc], score, 0)
def back_propagate(self, node, score, i):
if score == 0 or i == len(self.hidden_layers) + 2:
return
if self.hidden_layers:
layer = self.hidden_layers[-i] if i < len(self.hidden_layers) else self.input_layer
else:
layer = self.input_layer
for loc, weight in node.previous_nodes.items():
node.previous_nodes[loc].weight = weight.weight + logistic(score)*self.delta
self.back_propagate(layer.flatten()[loc], score / 2, i + 1)
def make_decision(self, board, color):
activity_mat = self.pieces2activity_mat(board.pieces[color], board.pieces[opposite_color(color)])
self.propagate(activity_mat)
output_activity_mat = self.layer2activity_mat(self.output_layer)
piece, move = self.activity_mat2move(output_activity_mat, board, board.pieces[color])
board.make_move(piece, move)
self.check_promotion_or_game_end(board)
def get_promotion(self, board, loc):
output_activity_mat = self.layer2activity_mat(self.output_layer)
return self.activity_mat2promotion(output_activity_mat, loc)
def check_promotion_or_game_end(self, board):
piece, loc = board.moves[-1]
if piece.name == 'pawn' and is_last_rank(int2color(board.move - 1), loc):
name = self.get_promotion(board, loc)
board.take_piece(piece)
board.make_piece(name, piece.color, loc)
board.check_check_mate()
def pieces2activity_mat(self, my_pieces, other_pieces):
activity_mat = np.zeros(self.input_layer.shape)
for i, pieces in enumerate([my_pieces, other_pieces]):
for name in pieces:
for piece in pieces[name]:
column, row = piece.square.loc
column, row = loc2int(column, row)
activity_mat[i, self.piece_dict[name], column, row] = 1 # output_layer.shape == n_pieces
return activity_mat
def layer2activity_mat(self, layer):
activity_mat = np.zeros(layer.shape).flatten()
for i, node in enumerate(layer.flatten()):
activity_mat[i] = node.activity
return activity_mat.reshape(layer.shape)
def activity_mat2move(self, activity_mat, board, pieces):
best_move, best_score = None, -1
for name in pieces:
for piece in pieces[name]:
column, row = piece.square.loc
start_column, start_row = loc2int(column, row)
for move in board.get_moves(piece):
column, row = move
end_column, end_row = loc2int(column, row)
score = activity_mat[self.piece_dict[piece.name], start_column, start_row, end_column, end_row]
if score > best_score:
best_score = score
best_move = (piece, move)
return best_move
def activity_mat2promotion(self, activity_mat, loc):
column, row = loc
column, row = loc2int(column, row)
return self.promotion_pieces[int(np.argmax(activity_mat[1:4, column, row, column, row]))]
def piece2output_layer(self, piece):
column, row = piece.square.loc
column, row = loc2int(column, row)
return self.piece_dict[piece.name], column, row
def load_network(name):
if op.isfile(op.join('networks', name + 'net.pkl')):
with open(op.join('networks', name + 'net.pkl'), 'rb') as f:
network = pickle.load(f)
else:
raise ValueError('%s network does not exist' % name)
return network
class Genome:
DEPTH = 8
LENGTH = int(1e6)
MAX_LAYERS = 10
MAX_DIMS = 5
def __init__(self, name='rock', seed=12):
'''
name: String
for versioning
genome: String 'random' or 'load'
'random' generates a new genome, 'load' loads previously trained genome
seed: int
seed for numpy random number generator
'''
np.random.seed(seed)
self.name = name
self.i = 0
if op.isfile(op.join('genomes', self.name + 'gen.txt')):
self.load()
else:
self.genome = ''.join([format(np.random.randint(2**self.DEPTH), '0%ib' % self.DEPTH)
for _ in range(self.LENGTH)])
if op.isfile(op.join('networks', self.name + 'net.pkl')):
self.load_network()
else:
self.make_network()
def save(self):
with open(op.join('genomes', self.name + 'gen.txt'), 'w') as f:
f.write(self.genome)
def load(self):
with open(op.join('genomes', self.name + 'gen.txt'), 'r') as f:
self.genome = f.readline()
def delete(self):
os.remove(op.join('genomes', self.name + 'gen.txt'))
def make_network(self):
n_layers = max([(self.next_int() % self.MAX_LAYERS) + 1, 3])
tms = [] # transition matrices
input_dim = (2, N_PIECES, BOARD_DIM, BOARD_DIM)
layer_dims = [input_dim]
for n in range(n_layers - 2):
layer_dims.append(self.new_layer())
tms.append(self.generate_tm(layer_dims[-2], layer_dims[-1]))
output_dim = (N_PIECES, BOARD_DIM, BOARD_DIM, BOARD_DIM, BOARD_DIM)
layer_dims.append(output_dim)
tms.append(self.generate_tm(layer_dims[-2], layer_dims[-1]))
self.network = Network(layer_dims, tms, name=self.name, show=False)
def load_network(self):
self.network = load_network(self.name)
def next_int(self):
self.i += self.DEPTH
if self.i >= len(self.genome):
raise ValueError('Genome length exceeded')
return int(self.genome[self.i-self.DEPTH: self.i], base=2)
def generate_tm(self, dim0, dim1):
tm = np.zeros(dim0 + dim1).flatten()
for i in range(tm.size):
tm[i] = (self.next_int() - 2**(self.DEPTH - 1)) / 2**(self.DEPTH - 1)
return tm.reshape(dim0 + dim1)
def new_layer(self):
n_dims = (self.next_int() % self.MAX_DIMS) + 1
layer_dim = tuple((self.next_int() % BOARD_DIM) + 1 for _ in range(n_dims))
return layer_dim
if __name__ == '__main__':
ai = AI('white')
| 12,313 | 5,073 |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import traceback
import numpy as np
from caffe._caffe import log as LOG
from caffe._caffe import Layer as BaseLayer
class AdaptiveWeightingLossLayer(BaseLayer):
"""Layer for adaptive weighting between the input losses."""
def _load_params(self, param_str, num_variables):
"""Loads layer parameters.
:param param_str: Input str of parameters
"""
layer_params = eval(param_str)
self._scale = float(layer_params['scale']) if 'scale' in layer_params else 1.0
self._init = layer_params['init'] if 'init' in layer_params else 0.0
self._weights = layer_params['weights'] if 'weights' in layer_params else None
if self._weights is None:
self._weights = np.ones([num_variables], dtype=np.float32)
else:
assert len(self._weights) == num_variables
assert np.all([w > 0.0 for w in self._weights])
def _create_variables(self, num_params, init_value):
"""Initializes internal state"""
self.blobs.add_blob(num_params)
self.blobs[0].data[...] = init_value
def setup(self, bottom, top):
"""Initializes layer.
:param bottom: List of bottom blobs
:param top: List of top blobs
"""
try:
self._load_params(self.param_str, num_variables=len(bottom))
num_variables = len(bottom)
self._create_variables(num_variables, self._init)
except Exception:
LOG('AdaptiveWeightingLossLayer setup exception: {}'.format(traceback.format_exc()))
exit()
def forward(self, bottom, top):
"""Carry out forward pass.
:param bottom: List of bottom blobs
:param top: List of top blobs
"""
try:
num_variables = len(bottom)
assert num_variables > 0
assert len(top) == 1 or len(top) == 1 + num_variables
samples = []
losses = []
for i in xrange(num_variables):
loss_value = np.array(bottom[i].data, dtype=np.float32).reshape([-1])
assert len(loss_value) == 1
loss_value = loss_value[0]
if loss_value > 0.0:
param_value = self.blobs[0].data[i]
loss_factor = np.exp(-param_value)
new_loss_value = param_value + self._scale * loss_factor * loss_value
samples.append((i, self._scale * loss_factor, self._scale * loss_factor * loss_value))
losses.append(self._weights[i] * new_loss_value)
top[0].data[...] = np.sum(losses) if len(losses) > 0 else 0.0
if len(top) == 1 + num_variables:
for i in xrange(num_variables):
top[i + 1].data[...] = np.copy(bottom[i].data)
self._samples = samples
except Exception:
LOG('AdaptiveWeightingLossLayer forward pass exception: {}'.format(traceback.format_exc()))
exit()
def backward(self, top, propagate_down, bottom):
"""Carry out backward pass.
:param top: List of top blobs
:param propagate_down: List of indicators to carry out back-propagation for
the specified bottom blob
:param bottom: List of bottom blobs
"""
try:
num_variables = len(bottom)
for i in xrange(num_variables):
bottom[i].diff[...] = 0.0
top_diff_value = top[0].diff[0]
for i, loss_scale, var_scale in self._samples:
if propagate_down[i]:
bottom[i].diff[...] = self._weights[i] * loss_scale * top_diff_value
self.blobs[0].diff[i] += self._weights[i] * (1.0 - var_scale) * top_diff_value
except Exception:
LOG('AdaptiveWeightingLossLayer backward pass exception: {}'.format(traceback.format_exc()))
exit()
def reshape(self, bottom, top):
"""Carry out blob reshaping.
:param bottom: List of bottom blobs
:param top: List of top blobs
"""
top[0].reshape(1)
num_variables = len(bottom)
if len(top) == 1 + num_variables:
for i in xrange(num_variables):
top[i + 1].reshape(1)
| 4,901 | 1,441 |
#!/usr/bin/env python3
import logging
import re
import shlex
import subprocess as sp
import sys
import time
from typing import Optional
logger = logging.getLogger("__name__")
logger.setLevel(40)
MAX_STATUS_ATTEMPTS = 20
STATUS_CODES = {
"BOOT_FAIL": "failed",
"CANCELLED": "failed",
"COMPLETED": "success",
"DEADLINE": "failed",
"FAILED": "failed",
"NODE_FAIL": "failed",
"OUT_OF_MEMORY": "failed",
"PENDING": "running",
"PREEMPTED": "failed",
"RUNNING": "running",
"REQUEUED": "running",
"RESIZING": "running",
"REVOKED": "running",
"SUSPENDED": "failed",
"TIMEOUT": "failed",
}
def main():
job_id = int(sys.argv[1])
for _ in range(MAX_STATUS_ATTEMPTS):
job_status = check_sacct(job_id) or check_scontrol(job_id)
if job_status:
break
time.sleep(5)
print(job_status or "failed")
def check_sacct(job_id: int) -> Optional[str]:
try:
job_info = sp.check_output(shlex.split(f"sacct -P -b -j {job_id} -n"))
except sp.CalledProcessError as err:
logger.error("sacct process error")
logger.error(err)
return None
try:
status = {x.split("|")[0]: x.split("|")[1] for x in job_info.decode().strip().split("\n")}
return STATUS_CODES.get(status[f"{job_id}"], None)
except IndexError:
return None
def check_scontrol(job_id: int) -> Optional[str]:
try:
job_info = sp.check_output(shlex.split(f"scontrol -o show job {job_id}"))
except sp.CalledProcessError as err:
logger.error("scontrol process error")
logger.error(err)
return None
m = re.search(r"JobState=(\w+)", job_info.decode())
status = {job_id: m.group(1)} if m else {}
return STATUS_CODES.get(status[job_id], None)
if __name__ == "__main__":
main()
| 1,843 | 688 |
from minecraft.networking.packets import Packet
from minecraft.networking.types import (
Short, BitFieldEnum
)
class HeldItemChangePacket(Packet, BitFieldEnum):
@staticmethod
def get_id(context):
return 0x25 if context.protocol_version >= 738 else \
0x24 if context.protocol_version >= 712 else \
0x23 if context.protocol_version >= 464 else \
0x21 if context.protocol_version >= 389 else \
0x1F if context.protocol_version >= 386 else \
0x1A if context.protocol_version >= 345 else \
0x19 if context.protocol_version >= 343 else \
0x1A if context.protocol_version >= 332 else \
0x19 if context.protocol_version >= 318 else \
0x17 if context.protocol_version >= 94 else \
0x14 if context.protocol_version >= 70 else \
0x0A if context.protocol_version >= 69 else \
0x09
packet_name = "held item change"
get_definition = staticmethod(lambda context: [
{'slot': Short}
])
| 1,058 | 338 |
# -*- coding: utf-8 -*-
from Qt.QtGui import *
from Qt.QtCore import *
from Qt.QtWidgets import *
import re
def clamp(mn, mx, val):
if val < mn:
return mn
elif val > mx:
return mx
else:
return val
def highlightLine(widget, line=-1, clear=False):
if line == -1:
block = widget.textCursor().block()
else:
block = widget.document().findBlockByLineNumber(line)
if not block.isValid():
return
fmt = QTextCharFormat()
if not clear:
fmt.setBackground(QColor(50, 80, 100))
blockPos = block.position()
cursor = widget.textCursor()
cursor.setPosition(blockPos)
cursor.select(QTextCursor.LineUnderCursor)
cursor.setCharFormat(fmt)
cursor.clearSelection()
cursor.movePosition(QTextCursor.StartOfLine)
widget.setTextCursor(cursor)
class PythonHighlighter(QSyntaxHighlighter):
def __init__(self, parent=None):
super(PythonHighlighter, self).__init__(parent)
self.highlightingRules = []
assignFormat = QTextCharFormat()
assignFormat.setForeground(QColor(200, 150, 100))
assignRegexp = QRegExp("\\b(\\w+)\\s*(?=[-+*/]*=)")
assignRegexp.setMinimal(True)
self.highlightingRules.append((assignRegexp, assignFormat))
numFormat = QTextCharFormat()
numFormat.setForeground(QColor(150, 200, 150))
self.highlightingRules.append((QRegExp("\\b(0x[0-9]+)\\b|\\b[0-9\\.]+f*\\b"), numFormat))
functionFormat = QTextCharFormat()
functionFormat.setForeground(QColor(100, 150, 200))
self.highlightingRules.append((QRegExp("\\b\\w+(?=\\s*\\()"), functionFormat))
keywordFormat = QTextCharFormat()
keywordFormat.setForeground(QColor(150, 130, 200))
keywords = ["\\b%s\\b"%k for k in ["False", "await", "else", "import", "pass",
"None", "break", "except", "in", "raise",
"True", "class", "finally", "is", "return",
"and", "continue", "for", "lambda", "try",
"as", "def", "from", "nonlocal", "while","exec", "eval",
"assert", "del", "global", "not", "with",
"async", "elif", "if", "or", "yield", "print", "self"]]
self.highlightingRules += [(QRegExp(pattern), keywordFormat) for pattern in keywords]
boolFormat = QTextCharFormat()
boolFormat.setForeground(QColor(200, 100, 50))
self.highlightingRules.append((QRegExp("\\bTrue\\b|\\bFalse\\b|\\bNone\\b"), boolFormat))
attrFormat = QTextCharFormat()
attrFormat.setForeground(QColor(100, 180, 180))
self.highlightingRules.append((QRegExp("@\\b\\w+\\b"), attrFormat))
self.quotationFormat = QTextCharFormat()
self.quotationFormat.setForeground(QColor(130, 200, 130))
self.highlightingRules.append((QRegExp("(\"(\\\\\"|[^\"])*\")|(\'(\\\\\'|[^\'])*\')"), self.quotationFormat))
singleLineCommentFormat = QTextCharFormat()
singleLineCommentFormat.setForeground(QColor(90, 90, 90))
self.highlightingRules.append((QRegExp("#[^\\n]*"), singleLineCommentFormat))
self.multiLineCommentFormat = QTextCharFormat()
self.multiLineCommentFormat.setForeground(QColor(170, 170, 100))
self.highlightedWordFormat = QTextCharFormat()
self.highlightedWordFormat.setForeground(QColor(200, 200, 200))
self.highlightedWordFormat.setBackground(QBrush(QColor(100, 55, 170)))
self.highlightedWordRegexp = None
def highlightBlock(self, text):
for pattern, format in self.highlightingRules:
if not pattern:
continue
expression = QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, QRegExp("'''"), 1, self.multiLineCommentFormat)
if not in_multiline:
in_multiline = self.match_multiline(text, QRegExp('"""'), 2, self.multiLineCommentFormat)
if self.highlightedWordRegexp:
expression = QRegExp(self.highlightedWordRegexp)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, self.highlightedWordFormat)
index = expression.indexIn(text, index + length)
def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegExp`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
"""
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
start = delimiter.indexIn(text)
# Move past this match
add = delimiter.matchedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
end = delimiter.indexIn(text, start + add)
# Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = len(text) - start + add
# Apply formatting
self.setFormat(start, length, style)
# Look for the next match
start = delimiter.indexIn(text, start + length)
# Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True
else:
return False
class SwoopHighligher(QSyntaxHighlighter):
def __init__(self, parent=None):
super(SwoopHighligher, self).__init__(parent)
self.highlightingRules = []
linumFormat = QTextCharFormat()
linumFormat.setForeground(QColor(180, 100, 120))
self.highlightingRules.append((QRegExp("^\\s*\\d+\\s+"), linumFormat))
headerFormat = QTextCharFormat()
headerFormat.setForeground(QColor(120, 100, 180))
headerFormat.setFontWeight(QFont.Bold)
self.highlightingRules.append((QRegExp("^[a-zA-Z][\\w -]*"), headerFormat))
subHeaderFormat = QTextCharFormat()
subHeaderFormat.setForeground(QColor(120, 180, 120))
self.highlightingRules.append((QRegExp("\\[[\\w ]+\\]$"), subHeaderFormat))
commentFormat = QTextCharFormat()
commentFormat.setForeground(QColor(90, 90, 90))
self.highlightingRules.append((QRegExp("//.*$"), commentFormat))
highlightedWordsFormat = QTextCharFormat()
highlightedWordsFormat.setForeground(QColor(200, 200, 200))
highlightedWordsFormat.setBackground(QBrush(QColor(100, 55, 170)))
self.highlightingRules.append((None, highlightedWordsFormat))
def highlightBlock(self, text):
for pattern, format in self.highlightingRules:
if not pattern:
continue
expression = QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
class SwoopSearchDialog(QDialog):
def __init__(self, edit, **kwargs):
super(SwoopSearchDialog, self).__init__(**kwargs)
self.edit = edit
self.setWindowFlags(Qt.FramelessWindowHint)
self.setWindowTitle("Swoop")
layout = QVBoxLayout()
self.setLayout(layout)
self.filterWidget = QLineEdit()
self.filterWidget.setToolTip("Ctrl-C - case sensitive<br>Ctrl-W - word boundary<br>Ctrl-B - find inside brackets<br>Ctrl-D - down only<br>Ctrl-R - replace mode")
self.filterWidget.textChanged.connect(lambda:self.filterTextChanged())
self.filterWidget.keyPressEvent = self.filterKeyPressEvent
self.resultsWidget = QTextEdit()
self.resultsWidget.setReadOnly(True)
self.resultsWidget.setWordWrapMode(QTextOption.NoWrap)
self.resultsWidget.syntax = SwoopHighligher(self.resultsWidget.document())
self.resultsWidget.mousePressEvent = self.resultsMousePressEvent
self.resultsWidget.keyPressEvent = self.filterWidget.keyPressEvent
self.statusWidget = QLabel()
self.statusWidget.hide()
layout.addWidget(self.filterWidget)
layout.addWidget(self.resultsWidget)
layout.addWidget(self.statusWidget)
self.rejected.connect(self.whenRejected)
self.initialize()
def initialize(self):
self.useWordBoundary = False
self.findInsideBrackets = False
self.caseSensitive = True
self.downOnly = False
self.replaceMode = False
self.numberSeparator = " "
self.previousPattern = None
self.previousLines = []
self.savedSettings = {}
self.text = unicode(self.edit.toPlainText())
lines = self.text.split("\n")
cursor = self.edit.textCursor()
self.updateSavedCursor()
self.savedSettings["lines"] = lines
findText = unicode(cursor.selectedText())
if not findText:
findText = wordAtCursor(cursor)[0]
self.filterWidget.setText(findText)
self.filterWidget.setStyleSheet("")
self.hide()
def updateSavedCursor(self):
cursor = self.edit.textCursor()
brackets = findBracketSpans(self.text, cursor.position(), brackets="{")
self.savedSettings["cursor"] = cursor
self.savedSettings["scroll"] = self.edit.verticalScrollBar().value()
self.savedSettings["brackets"] = brackets
self.findInsideBrackets = brackets[0] and self.findInsideBrackets
def showEvent(self, event):
self.updateSavedCursor()
self.reposition()
self.filterWidget.setFocus()
def update(self):
self.initialize()
self.updateStatus()
self.filterTextChanged()
def resultsMousePressEvent(self, event):
cursor = self.resultsWidget.cursorForPosition(event.pos())
highlightLine(self.resultsWidget, clear=True)
highlightLine(self.resultsWidget, cursor.block().blockNumber())
self.resultsLineChanged()
def reposition(self):
c = self.edit.cursorRect().topLeft()
w = self.resultsWidget.document().idealWidth() + 30
h = self.resultsWidget.document().blockCount()*self.resultsWidget.cursorRect().height() + 110
self.setGeometry(c.x(), c.y() + 22, clamp(0, 500, w), clamp(0, 400, h))
def resultsLineChanged(self):
if self.replaceMode:
return
cursor = self.resultsWidget.textCursor()
cursor.select(QTextCursor.LineUnderCursor)
line = unicode(cursor.selectedText())
if not line:
return
lineNumber, text = re.search("^([0-9]+)\\s-*(.*)$", line).groups("")
self.edit.gotoLine(int(lineNumber))
currentFilter = self.getFilterPattern()
r = re.search(currentFilter, text, re.IGNORECASE if not self.caseSensitive else 0)
if r:
cursor = self.edit.textCursor()
pos = cursor.block().position() + r.start() - 1
if pos >- 0:
cursor.setPosition(pos)
self.edit.setTextCursor(cursor)
cursorY = self.edit.cursorRect().top()
scrollBar = self.edit.verticalScrollBar()
scrollBar.setValue(scrollBar.value() + cursorY - self.edit.geometry().height()/2)
self.reposition()
def updateStatus(self):
items = []
if self.useWordBoundary:
items.append("[word]")
if self.caseSensitive:
items.append("[case]")
if self.findInsideBrackets:
items.append("[brackets]")
if self.downOnly:
items.append("[down]")
if self.replaceMode:
items.append("[REPLACE '%s']"%self.previousPattern)
if items:
self.statusWidget.setText(" ".join(items))
self.statusWidget.show()
else:
self.statusWidget.hide()
def filterKeyPressEvent(self, event):
shift = event.modifiers() & Qt.ShiftModifier
ctrl = event.modifiers() & Qt.ControlModifier
alt = event.modifiers() & Qt.AltModifier
rw = self.resultsWidget
line = rw.textCursor().block().blockNumber()
lineCount = rw.document().blockCount()-1
if event.key() in [Qt.Key_Down, Qt.Key_Up, Qt.Key_PageDown, Qt.Key_PageUp]:
if event.key() == Qt.Key_Down:
highlightLine(rw, clamp(0, lineCount, line), clear=True)
highlightLine(rw, clamp(0, lineCount, line+1))
elif event.key() == Qt.Key_Up:
highlightLine(rw, clamp(0, lineCount, line), clear=True)
highlightLine(rw, clamp(0, lineCount, line-1))
elif event.key() == Qt.Key_PageDown:
highlightLine(rw, clamp(0, lineCount, line), clear=True)
highlightLine(rw, clamp(0, lineCount, line+5))
elif event.key() == Qt.Key_PageUp:
highlightLine(rw, clamp(0, lineCount, line), clear=True)
highlightLine(rw, clamp(0, lineCount, line-5))
self.resultsLineChanged()
elif ctrl and event.key() == Qt.Key_W: # use word boundary
if not self.replaceMode:
self.useWordBoundary = not self.useWordBoundary
self.updateStatus()
self.filterTextChanged()
elif ctrl and event.key() == Qt.Key_B: # find inside brackets
if not self.replaceMode:
self.findInsideBrackets = not self.findInsideBrackets
self.updateSavedCursor()
self.updateStatus()
self.filterTextChanged()
elif ctrl and event.key() == Qt.Key_D: # down only
if not self.replaceMode:
self.downOnly = not self.downOnly
self.updateSavedCursor()
self.updateStatus()
self.filterTextChanged()
elif ctrl and event.key() == Qt.Key_C: # case sensitive
if self.filterWidget.selectedText():
self.filterWidget.copy()
else:
if not self.replaceMode:
self.caseSensitive = not self.caseSensitive
self.updateStatus()
self.filterTextChanged()
elif ctrl and event.key() == Qt.Key_R: # replace mode
self.replaceMode = not self.replaceMode
if self.replaceMode:
self.filterWidget.setStyleSheet("background-color: #433567")
self.previousPattern = self.getFilterPattern()
else:
self.filterWidget.setStyleSheet("")
self.filterTextChanged()
self.updateStatus()
elif event.key() == Qt.Key_F3:
self.accept()
elif event.key() == Qt.Key_Return: # accept
if self.replaceMode:
cursor = self.edit.textCursor()
savedBlock = self.savedSettings["cursor"].block()
savedColumn = self.savedSettings["cursor"].positionInBlock()
doc = self.edit.document()
cursor.beginEditBlock()
lines = unicode(self.resultsWidget.toPlainText()).split("\n")
for line in lines:
if not line.strip():
continue
lineNumber, text = re.search("^([0-9]+)%s(.*)$"%self.numberSeparator, line).groups("")
lineNumber = int(lineNumber)
blockPos = doc.findBlockByLineNumber(lineNumber-1).position()
cursor.setPosition(blockPos)
cursor.select(QTextCursor.LineUnderCursor)
cursor.removeSelectedText()
cursor.insertText(text)
cursor.endEditBlock()
cursor.setPosition(savedBlock.position() + savedColumn)
self.edit.setTextCursor(cursor)
self.edit.verticalScrollBar().setValue(self.savedSettings["scroll"])
self.edit.setFocus()
self.accept()
else:
QLineEdit.keyPressEvent(self.filterWidget, event)
def whenRejected(self):
self.edit.setTextCursor(self.savedSettings["cursor"])
self.edit.verticalScrollBar().setValue(self.savedSettings["scroll"])
self.edit.setFocus()
def getFilterPattern(self):
currentFilter = re.escape(unicode(self.filterWidget.text()))
if not currentFilter:
return ""
if self.useWordBoundary:
currentFilter = "\\b" + currentFilter + "\\b"
return currentFilter
def filterTextChanged(self):
self.resultsWidget.clear()
self.resultsWidget.setCurrentCharFormat(QTextCharFormat())
if self.replaceMode: # replace mode
subStr = unicode(self.filterWidget.text()).replace("\\", "\\\\")
pattern = self.getFilterPattern()
lines = []
for line in self.previousLines:
n, text = re.search("^([0-9]+)%s(.*)$"%self.numberSeparator, line).groups("")
text = re.sub(self.previousPattern, subStr, text, 0, re.IGNORECASE if not self.caseSensitive else 0)
newLine = "%s%s%s"%(n, self.numberSeparator, text)
lines.append(newLine)
self.resultsWidget.setText("\n".join(lines))
self.resultsWidget.syntax.highlightingRules[-1] = (pattern, self.resultsWidget.syntax.highlightingRules[-1][1])
self.resultsWidget.syntax.rehighlight()
else: # search mode
startBlock, endBlock = 0, 0
if self.findInsideBrackets:
cursor = QTextCursor(self.savedSettings["cursor"])
cursor.setPosition(self.savedSettings["brackets"][1])
startBlock = cursor.block().blockNumber()
cursor.setPosition(self.savedSettings["brackets"][2])
endBlock = cursor.block().blockNumber()
if self.downOnly:
cursor = QTextCursor(self.savedSettings["cursor"])
startBlock = cursor.block().blockNumber()
currentFilter = self.getFilterPattern()
currentBlock = self.edit.textCursor().block().blockNumber()
self.previousLines = []
currentFilterText = unicode(self.filterWidget.text()).replace("\\", "\\\\")
counter = 0
currentIndex = 0
for i, line in enumerate(self.savedSettings["lines"]):
if not line.strip():
continue
if self.findInsideBrackets and (i < startBlock or i > endBlock):
continue
if self.downOnly and i < startBlock:
continue
if i == currentBlock:
currentIndex = counter
r = re.search(currentFilter, line, re.IGNORECASE if not self.caseSensitive else 0)
if r:
item = "%s%s%s"%(i+1, self.numberSeparator, line)
self.previousLines.append(item)
counter += 1
self.resultsWidget.setText("\n".join(self.previousLines))
self.resultsWidget.syntax.highlightingRules[-1] = (currentFilter, self.resultsWidget.syntax.highlightingRules[-1][1])
self.resultsWidget.syntax.rehighlight()
highlightLine(self.resultsWidget, currentIndex)
self.resultsLineChanged()
class CodeEditorWidget(QTextEdit):
editorState = {}
TabSpaces = 4
def __init__(self, **kwargs):
super(CodeEditorWidget, self).__init__(**kwargs)
self.formatFunction = None
self.preset = "default"
self.lastSearch = ""
self.lastReplace = ""
self.thread = None
self.canShowCompletions = True
self.currentFontPointSize = 16
self.words = []
self.currentWord = ("", 0, 0)
self.searchStartWord = ("", 0, 0)
self.prevCursorPosition = 0
self.swoopSearchDialog = SwoopSearchDialog(self, parent=self)
self.setContextMenuPolicy(Qt.DefaultContextMenu)
self.completionWidget = CompletionWidget([], parent=self)
self.completionWidget.hide()
self.setTabStopWidth(32)
self.setAcceptRichText(False)
self.setWordWrapMode(QTextOption.NoWrap)
self.cursorPositionChanged.connect(self.editorCursorPositionChanged)
self.verticalScrollBar().valueChanged.connect(lambda _: self.saveState(cursor=False, scroll=True, bookmarks=False))
self.textChanged.connect(self.editorTextChanged)
def event(self, event):
if event.type() == QEvent.KeyPress:
if event.key() == Qt.Key_Tab:
cursor = self.textCursor()
tabSpaces = " "*CodeEditorWidget.TabSpaces
start = cursor.selectionStart()
end = cursor.selectionEnd()
cursor.beginEditBlock()
if end == start:
cursor.insertText(tabSpaces)
else:
cursor.clearSelection()
cursor.setPosition(start)
while cursor.position() < end:
cursor.movePosition(QTextCursor.StartOfLine)
cursor.insertText(tabSpaces)
if not cursor.movePosition(QTextCursor.Down):
break
end += len(tabSpaces)
cursor.endEditBlock()
event.accept()
return True
return super(CodeEditorWidget, self).event(event)
def setBookmark(self, line=-1):
if line == -1:
block = self.textCursor().block()
else:
block = self.document().findBlockByNumber(line)
blockData = block.userData()
if not blockData:
blockData = TextBlockData()
blockData.hasBookmark = True
else:
blockData.hasBookmark = not blockData.hasBookmark
if isinstance(self.parent(), CodeEditorWithNumbersWidget):
self.parent().numberBarWidget.update()
block.setUserData(blockData)
self.saveState(cursor=False, scroll=False, bookmarks=True)
def gotoNextBookmark(self, start=-1):
doc = self.document()
if start == -1:
start = self.textCursor().block().blockNumber()+1
for i in range(start, doc.blockCount()):
b = doc.findBlockByNumber(i)
blockData = b.userData()
if blockData and blockData.hasBookmark:
self.setTextCursor(QTextCursor(b))
self.centerLine()
break
def loadState(self, cursor=True, scroll=True, bookmarks=True):
scrollBar = self.verticalScrollBar()
self.blockSignals(True)
scrollBar.blockSignals(True)
if not self.preset or not self.editorState.get(self.preset):
c = self.textCursor()
c.setPosition(0)
self.setTextCursor(c)
scrollBar.setValue(0)
else:
state = self.editorState[self.preset]
if cursor:
c = self.textCursor()
c.setPosition(state["cursor"])
self.setTextCursor(c)
if scroll:
scrollBar = self.verticalScrollBar()
scrollBar.setValue(state["scroll"])
if bookmarks:
doc = self.document()
for i in state.get("bookmarks", []):
b = doc.findBlockByNumber(i)
self.setBookmark(i)
self.blockSignals(False)
scrollBar.blockSignals(False)
def saveState(self, cursor=True, scroll=True, bookmarks=False):
if not self.preset:
return
if not self.editorState.get(self.preset):
self.editorState[self.preset] = {"cursor": 0, "scroll": 0, "bookmarks": []}
state = self.editorState[self.preset]
if cursor:
state["cursor"] = self.textCursor().position()
if scroll:
state["scroll"] = self.verticalScrollBar().value()
if bookmarks:
doc = self.document()
state["bookmarks"] = []
for i in range(doc.blockCount()):
b = doc.findBlockByNumber(i)
data = b.userData()
if data and data.hasBookmark:
state["bookmarks"].append(i)
def contextMenuEvent(self, event):
menu = QMenu(self)
if callable(self.formatFunction):
formatAction = QAction("Format\tALT-SHIFT-F", self)
formatAction.triggered.connect(lambda: self.setTextSafe((self.formatFunction(unicode(self.toPlainText())))))
menu.addAction(formatAction)
swoopAction = QAction("Swoop search\tF3", self)
swoopAction.triggered.connect(lambda: self.swoopSearch(True))
menu.addAction(swoopAction)
gotoLineAction = QAction("Goto line\tCtrl-G", self)
gotoLineAction.triggered.connect(self.gotoLine)
menu.addAction(gotoLineAction)
selectAllAction = QAction("Select All", self)
selectAllAction.triggered.connect(self.selectAll)
menu.addAction(selectAllAction)
menu.popup(event.globalPos())
def wheelEvent(self, event):
shift = event.modifiers() & Qt.ShiftModifier
ctrl = event.modifiers() & Qt.ControlModifier
alt = event.modifiers() & Qt.AltModifier
if ctrl:
d = event.delta() / abs(event.delta())
self.currentFontPointSize = clamp(8, 20, self.currentFontPointSize + d)
self.setStyleSheet("font-size: %dpx;"%self.currentFontPointSize)
else:
QTextEdit.wheelEvent(self, event)
def setTextSafe(self, text, withUndo=True):
scrollBar = self.verticalScrollBar()
self.blockSignals(True)
scrollBar.blockSignals(True)
scroll = scrollBar.value()
cursor = self.textCursor()
pos = cursor.position()
if withUndo:
cursor.select(QTextCursor.Document)
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText(text)
cursor.endEditBlock()
else:
self.setText(text)
if pos < len(text):
cursor.setPosition(pos)
self.setTextCursor(cursor)
scrollBar.setValue(scroll)
self.blockSignals(False)
scrollBar.blockSignals(False)
def keyPressEvent(self, event):
shift = event.modifiers() & Qt.ShiftModifier
ctrl = event.modifiers() & Qt.ControlModifier
alt = event.modifiers() & Qt.AltModifier
key = event.key()
if alt and shift and key == Qt.Key_F:
if callable(self.formatFunction):
self.setTextSafe((self.formatFunction(unicode(self.toPlainText()))))
elif alt and key == Qt.Key_M: # back to indentation
cursor = self.textCursor()
linePos = cursor.block().position()
cursor.select(QTextCursor.LineUnderCursor)
text = cursor.selectedText()
cursor.clearSelection()
found = re.findall("^\\s*", unicode(text))
offset = len(found[0]) if found else 0
cursor.setPosition(linePos + offset)
self.setTextCursor(cursor)
elif ctrl and key == Qt.Key_H: # highlight selected
self.highlightSelected()
elif ctrl and alt and key == Qt.Key_Space:
cursor = self.textCursor()
pos = cursor.position()
_, start, end = findBracketSpans(unicode(self.toPlainText()), pos)
if start != end:
cursor.setPosition(start+1)
cursor.setPosition(end, QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
elif key in [Qt.Key_Left, Qt.Key_Right]:
QTextEdit.keyPressEvent(self, event)
self.completionWidget.hide()
elif key == Qt.Key_F12: # full screen editor mode
pass
elif alt and key == Qt.Key_F2: # set bookmark
self.setBookmark()
elif key == Qt.Key_F2: # next bookmark
n = self.textCursor().block().blockNumber()
self.gotoNextBookmark()
if self.textCursor().block().blockNumber() == n:
self.gotoNextBookmark(0)
elif key == Qt.Key_F3: # emacs swoop
self.swoopSearch(not ctrl)
elif ctrl and key == Qt.Key_G: # goto line
self.gotoLine()
elif key == Qt.Key_Escape:
self.completionWidget.hide()
elif key == Qt.Key_Return:
if self.completionWidget.isVisible():
self.replaceWithAutoCompletion()
self.completionWidget.hide()
else:
cursor = self.textCursor()
block = unicode(cursor.block().text())
spc = re.search("^(\\s*)", block).groups("")[0]
QTextEdit.keyPressEvent(self, event)
if spc:
cursor.insertText(spc)
self.setTextCursor(cursor)
elif key == Qt.Key_Backtab:
cursor = self.textCursor()
tabSpaces = " "*CodeEditorWidget.TabSpaces
start, end = cursor.selectionStart(), cursor.selectionEnd()
cursor.clearSelection()
cursor.setPosition(start)
cursor.beginEditBlock()
while cursor.position() < end:
cursor.movePosition(QTextCursor.StartOfLine)
cursor.movePosition(QTextCursor.NextWord, QTextCursor.KeepAnchor)
selText = cursor.selectedText()
# if the text starts with the tab_char, replace it
if selText.startswith(tabSpaces):
text = selText.replace(tabSpaces, "", 1)
end -= len(tabSpaces)
cursor.insertText(text)
if not cursor.movePosition(QTextCursor.Down):
break
cursor.endEditBlock()
elif alt and key == Qt.Key_Up: # move line up
self.moveLineUp()
elif alt and key == Qt.Key_Down: # move line down
self.moveLineDown()
elif key in [Qt.Key_Up, Qt.Key_Down, Qt.Key_PageDown, Qt.Key_PageUp]:
if self.completionWidget.isVisible():
if key == Qt.Key_Down:
d = 1
elif key == Qt.Key_Up:
d = -1
elif key == Qt.Key_PageDown:
d = 10
elif key == Qt.Key_PageUp:
d = -10
line = self.completionWidget.currentLine()
highlightLine(self.completionWidget, line, clear=True)
highlightLine(self.completionWidget, clamp(0, self.completionWidget.lineCount()-1, line+d))
else:
QTextEdit.keyPressEvent(self, event)
elif ctrl and key == Qt.Key_L: # center line
self.centerLine()
elif ctrl and key == Qt.Key_K: # kill line
self.killLine()
elif ctrl and key == Qt.Key_O: # remove redundant lines
cursor = self.textCursor()
cursor.beginEditBlock()
if not unicode(cursor.block().text()).strip():
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.movePosition(QTextCursor.Up)
while not unicode(cursor.block().text()).strip() and not cursor.atStart(): # remove empty lines but last one
if unicode(cursor.block().previous().text()):
break
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.movePosition(QTextCursor.Up)
cursor.endEditBlock()
self.setTextCursor(cursor)
elif ctrl and key in [Qt.Key_BracketLeft, Qt.Key_BracketRight]:
cursor = self.textCursor()
pos = cursor.position()
_, start, end = findBracketSpans(unicode(self.toPlainText()), pos)
if start != end:
cursor.setPosition(start if key == Qt.Key_BracketLeft else end)
self.setTextCursor(cursor)
elif ctrl and key == Qt.Key_D: # duplicate line
cursor = self.textCursor()
line = cursor.block().text()
cursor.movePosition(QTextCursor.EndOfBlock)
cursor.beginEditBlock()
cursor.insertBlock()
cursor.insertText(line)
cursor.endEditBlock()
self.setTextCursor(cursor)
elif ctrl and key == Qt.Key_Semicolon: # comment
cursor = self.textCursor()
if cursor.selectedText():
self.toggleCommentBlock()
else:
self.toggleCommentLine()
else:
QTextEdit.keyPressEvent(self, event)
def swoopSearch(self, update=True):
if update:
self.swoopSearchDialog.update()
self.swoopSearchDialog.exec_()
def moveLineUp(self):
cursor = self.textCursor()
if not cursor.block().previous().isValid() or cursor.selectedText():
return
text = cursor.block().text()
pos = cursor.positionInBlock()
cursor.beginEditBlock()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.deletePreviousChar()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.insertText(text)
cursor.insertBlock()
cursor.endEditBlock()
cursor.movePosition(QTextCursor.Up)
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.Right, n=pos)
self.setTextCursor(cursor)
def moveLineDown(self):
cursor = self.textCursor()
if not cursor.block().next().isValid() or cursor.selectedText():
return
text = cursor.block().text()
pos = cursor.positionInBlock()
cursor.beginEditBlock()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.deleteChar()
cursor.movePosition(QTextCursor.EndOfBlock)
cursor.insertBlock()
cursor.insertText(text)
cursor.endEditBlock()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.Right, n=pos)
self.setTextCursor(cursor)
def centerLine(self):
cursorY = self.cursorRect().top()
scrollBar = self.verticalScrollBar()
scrollBar.setValue(scrollBar.value() + cursorY - self.geometry().height()/2)
def killLine(self):
cursor = self.textCursor()
if not cursor.block().text():
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)
else:
cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
self.setTextCursor(cursor)
def toggleCommentLine(self):
comment = "# "
commentSize = len(comment)
cursor = self.textCursor()
pos = cursor.position()
linePos = cursor.block().position()
cursor.select(QTextCursor.LineUnderCursor)
lineText = cursor.selectedText()
cursor.clearSelection()
found = re.findall("^\\s*", unicode(lineText))
offset = len(found[0]) if found else 0
cursor.setPosition(linePos + offset)
newPos = pos + commentSize
cursor.beginEditBlock()
if not re.match("^\\s*%s"%comment, lineText):
cursor.insertText(comment)
else:
for i in range(len(comment)):
cursor.deleteChar()
newPos = pos - commentSize
cursor.endEditBlock()
cursor.setPosition(newPos)
self.setTextCursor(cursor)
def gotoLine(self, line=-1):
if line == -1:
cursor = self.textCursor()
currentLine = cursor.blockNumber()+1
maxLine = self.document().lineCount()
line, ok = QInputDialog.getInt(self, "Editor", "Goto line number", currentLine, 1, maxLine)
if not ok:
return
self.setTextCursor(QTextCursor(self.document().findBlockByLineNumber(line-1)))
def replaceWithAutoCompletion(self):
if self.completionWidget.lineCount() == 0:
return
modifiers = QApplication.queryKeyboardModifiers()
shift = modifiers & Qt.ShiftModifier
ctrl = modifiers & Qt.ControlModifier
alt = modifiers & Qt.AltModifier
block = self.completionWidget.textCursor().block()
row = block.blockNumber() if block.isValid() else 0
if ctrl:
word = unicode(block.text())
else:
word = re.split("\\s*", unicode(block.text()))[0]
cursor = self.textCursor()
cursor.setPosition(self.currentWord[1])
cursor.setPosition(self.currentWord[2], QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.insertText(word)
self.setTextCursor(cursor)
self.canShowCompletions = False
def highlightSelected(self):
cursor = self.textCursor()
sel = cursor.selectedText()
reg = None
if sel:
reg = QRegExp("%s"%QRegExp.escape(sel))
else:
word, _,_ = wordAtCursor(cursor)
if word:
if word.startswith("@"):
reg = QRegExp("@\\b%s\\b"%QRegExp.escape(word[1:]))
else:
reg = QRegExp("\\b%s\\b"%QRegExp.escape(word))
self.syntax.highlightedWordRegexp = reg
self.blockSignals(True)
self.syntax.rehighlight()
self.blockSignals(False)
def editorCursorPositionChanged(self):
cursor = self.textCursor()
pos = cursor.position()
if abs(pos - self.prevCursorPosition) > 1:
self.completionWidget.hide()
if cursor.selectedText():
self.setExtraSelections([])
return
self.saveState(cursor=True, scroll=False, bookmarks=False)
self.prevCursorPosition = pos
text, start, end = findBracketSpans(unicode(self.toPlainText()), pos)
extra = []
if start != end:
for pos in [start, end]:
cursor = self.textCursor()
cursor.setPosition(pos)
cursor.setPosition(pos+1, QTextCursor.KeepAnchor)
es = QTextEdit.ExtraSelection()
es.cursor = cursor
es.format.setForeground(QColor(0, 0, 0))
es.format.setBackground(QBrush(QColor(70, 130, 140)))
extra.append(es)
self.setExtraSelections(extra)
def editorTextChanged(self):
text = unicode(self.toPlainText())
cursor = self.textCursor()
pos = cursor.position()
self.currentWord = wordAtCursor(cursor)
currentWord, start, end = self.currentWord
if start == 0 and end - start <= 1:
return
words = set(self.words)
words |= set(re.split("[^\\w@]+", text))
words -= set([currentWord])
if currentWord:
self.searchStartWord = self.currentWord
items = [w for w in words if re.match(currentWord, w, re.IGNORECASE)]
if items and cursor.position() == end:
self.showCompletions(items)
else:
self.completionWidget.hide()
else:
self.completionWidget.hide()
def showCompletions(self, items):
rect = self.cursorRect()
c = rect.center()
self.completionWidget.setGeometry(c.x(), c.y()+10, 200, 200)
if items:
self.completionWidget.update(items)
self.completionWidget.show()
def findBracketSpans(text, pos, brackets="([{"):
if not text:
return ("", 0, 0)
textLen = len(text)
# when no spaces at the current line then do nothing
start = pos-1
while start > 0 and text[start] != "\n":
start -= 1
if not re.search("^\\s+|[{\\(\\[]+", text[start+1:pos]):
return ("", 0, 0)
start = pos-1
end = pos
bracketDict = {"(":0, "[": 0, "{": 0}
bracketChar = ""
ok = False
while True:
if (bracketDict["("] < 0 and "(" in brackets) or\
(bracketDict["["] < 0 and "[" in brackets) or\
(bracketDict["{"] < 0 and "{" in brackets):
ok = True
break
if start < 0:
break
ch = text[start]
if ch in ["(", ")", "{", "}", "[", "]"]:
bracketChar = str(ch)
if ch == ")": bracketDict["("] += 1
elif ch == "(": bracketDict["("] -= 1
elif ch == "]": bracketDict["["] += 1
elif ch == "[": bracketDict["["] -= 1
elif ch == "}": bracketDict["{"] += 1
elif ch == "{": bracketDict["{"] -= 1
start -= 1
start += 1
if ok:
bracketDict = {"(":0, "[": 0, "{": 0}
ok = False
while True:
if bracketDict[bracketChar] < 0:
ok = True
break
if end >= textLen:
break
ch = text[end]
if ch in ["(", ")", "{", "}", "[", "]"]:
if ch == "(": bracketDict["("] += 1
elif ch == ")": bracketDict["("] -= 1
if ch == "[": bracketDict["["] += 1
elif ch == "]": bracketDict["["] -= 1
if ch == "{": bracketDict["{"] += 1
elif ch == "}": bracketDict["{"] -= 1
end += 1
end -= 1
if ok:
return (text[start:end], start, end)
return ("", 0, 0)
def wordAtCursor(cursor):
cursor = QTextCursor(cursor)
pos = cursor.position()
lpart = ""
start = pos-1
ch = unicode(cursor.document().characterAt(start))
while ch and re.match("[@\\w]", ch):
lpart += ch
start -= 1
if ch == "@": # @ can be the first character only
break
ch = unicode(cursor.document().characterAt(start))
rpart = ""
end = pos
ch = unicode(cursor.document().characterAt(end))
while ch and re.match("[\\w]", ch):
rpart += ch
end += 1
ch = unicode(cursor.document().characterAt(end))
return (lpart[::-1]+rpart, start+1, end)
class CompletionWidget(QTextEdit):
def __init__(self, items, **kwargs):
super(CompletionWidget, self).__init__(**kwargs)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_ShowWithoutActivating)
self.setReadOnly(True)
self.setWordWrapMode(QTextOption.NoWrap)
self.update([])
def lineCount(self):
return self.document().blockCount()
def currentLine(self):
return self.textCursor().block().blockNumber()
def mousePressEvent(self, event):
self.parent().setFocus()
event.accept()
def keyPressEvent(self, event):
shift = event.modifiers() & Qt.ShiftModifier
ctrl = event.modifiers() & Qt.ControlModifier
alt = event.modifiers() & Qt.AltModifier
line = self.textCursor().block().blockNumber()
lineCount = self.document().blockCount()-1
if event.key() == Qt.Key_Down:
highlightLine(self, clamp(0, lineCount, line), clear=True)
highlightLine(self, clamp(0, lineCount, line+1))
elif event.key() == Qt.Key_Up:
highlightLine(self, clamp(0, lineCount, line), clear=True)
highlightLine(self, clamp(0, lineCount, line-1))
elif event.key() == Qt.Key_PageDown:
highlightLine(self, clamp(0, lineCount, line), clear=True)
highlightLine(self, clamp(0, lineCount, line+5))
elif event.key() == Qt.Key_PageUp:
highlightLine(self, clamp(0, lineCount, line), clear=True)
highlightLine(self, clamp(0, lineCount, line-5))
elif event.key() == Qt.Key_Return: # accept
pass
else:
QTextEdit.keyPressEvent(self, event)
def update(self, items):
if not items:
return
self.clear()
self.setCurrentCharFormat(QTextCharFormat())
lines = []
for line in items:
lines.append(line)
self.setText("\n".join(lines))
highlightLine(self, 0)
self.autoResize()
def autoResize(self):
w = self.document().idealWidth() + 10
h = self.document().blockCount()*self.cursorRect().height() + 30
maxHeight = clamp(0, 400, self.parent().height() - self.parent().cursorRect().top() - 30)
self.setFixedSize(clamp(0, 500, w), clamp(0, maxHeight, h))
def showEvent(self, event):
self.autoResize()
class NumberBarWidget(QWidget):
def __init__(self, edit, *kwargs):
super(NumberBarWidget, self).__init__(*kwargs)
self.edit = edit
self.highest_line = 0
def update(self, *args):
self.setStyleSheet(self.edit.styleSheet())
width = self.fontMetrics().width(str(self.highest_line)) + 19
self.setFixedWidth(width)
QWidget.update(self, *args)
def paintEvent(self, event):
contents_y = self.edit.verticalScrollBar().value()
page_bottom = contents_y + self.edit.viewport().height()
font_metrics = self.fontMetrics()
current_block = self.edit.document().findBlock(self.edit.textCursor().position())
painter = QPainter(self)
line_count = 0
# Iterate over all text blocks in the document.
block = self.edit.document().begin()
while block.isValid():
line_count += 1
# The top left position of the block in the document
position = self.edit.document().documentLayout().blockBoundingRect(block).topLeft()
# Check if the position of the block is out side of the visible
# area.
if position.y() > page_bottom:
break
# Draw the line number right justified at the y position of the
# line. 3 is a magic padding number. drawText(x, y, text).
painter.drawText(self.width() - font_metrics.width(str(line_count)) - 3, round(position.y()) - contents_y + font_metrics.ascent(), str(line_count))
data = block.userData()
if data and data.hasBookmark:
painter.drawText(3, round(position.y()) - contents_y + font_metrics.ascent(), u"►")
block = block.next()
self.highest_line = self.edit.document().blockCount()
painter.end()
QWidget.paintEvent(self, event)
class TextBlockData(QTextBlockUserData):
def __init__(self):
super(TextBlockData, self).__init__()
self.hasBookmark = False
class CodeEditorWithNumbersWidget(QWidget):
def __init__(self, **kwargs):
super(CodeEditorWithNumbersWidget, self).__init__(**kwargs)
self.editorWidget = CodeEditorWidget()
self.numberBarWidget = NumberBarWidget(self.editorWidget)
self.editorWidget.document().blockCountChanged.connect(lambda _: self.numberBarWidget.update())
self.editorWidget.document().documentLayoutChanged.connect(self.numberBarWidget.update)
self.editorWidget.verticalScrollBar().valueChanged.connect(lambda _: self.numberBarWidget.update())
hlayout = QHBoxLayout()
hlayout.setContentsMargins(0, 0, 0, 0)
hlayout.addWidget(self.numberBarWidget)
hlayout.addWidget(self.editorWidget)
self.setLayout(hlayout)
'''
app = QApplication([])
e = CodeEditorWithNumbersWidget()
e.show()
app.exec_()
''' | 49,769 | 14,188 |
#!/usr/bin/python3
import sys
import importlib
import os
import inspect
from importlib import util as importlib_util
from .logb import getLogger
# from .pdf2text import simple_fact as pdf_sfact
from .html2text import simple_fact as html_sfact
from .plain2text import simple_fact as pln_sfact
################################
# Constants
################################
MODU_PATH = os.path.dirname(__file__) if os.path.dirname(__file__) else './'
''' Path of current module '''
################################
# Class Definition
################################
class TEAgent:
ERR_MSG_MTYPE_NOT_SUPPORT = 'Content type={mtype} is not supported yet!'
''' Error message for unsupported MIME'''
DEFAULT_RST = {'title': '', 'text': '', 'te_suc': False}
def __init__(self, ext_title=False, disable_policy=False, policy_path=None):
r'''
Constructor
:param ext_title: True to extract title; False otherwise
:param disable_policy: True to disable loading policy
'''
self.logger = getLogger(os.path.basename(__file__))
self.handlers = {
'text/html': html_sfact(ext_title=ext_title),
# 'application/pdf': pdf_sfact(ext_title=ext_title),
'text/plain': pln_sfact(ext_title=ext_title)
} # key as Media type; value as corresponding handler
if not disable_policy:
if policy_path is None:
policy_path = os.path.join(os.path.abspath(MODU_PATH), 'policy')
self.load_policy(policy_path)
def load_policy(self, policy_path, namespace=None, target_policy_names=None):
r'''
Loading policy stored in a given folder
:param policy_path: Path of folder to store policy file
:param namespace: Namespace used to control the import path
:param target_policy_names: If given, only the policy module name exist in here will be loaded.
:return:
Number of policy file being loaded
'''
if os.path.isdir(policy_path):
pc = 0
for pf in list(filter(lambda f: f.endswith('.py') and f.startswith('policy'), os.listdir(policy_path))):
if target_policy_names and pf.split('.')[0] not in target_policy_names:
self.logger.warning('Ignore {}!'.format(pf))
continue
self.logger.debug('Loading {}...'.format(pf))
try:
module_name = 'purifier.policy{}.{}'.format('' if namespace is None else ".{}".format(namespace), pf.split('.')[0])
spec = importlib_util.spec_from_file_location(module_name, os.path.join(policy_path, pf))
module = importlib_util.module_from_spec(spec)
spec.loader.exec_module(module)
for po, pn in list(filter(lambda t: callable(t[0]) and not inspect.isclass(t[0]), list(map(lambda n: (getattr(module, n), n), dir(module))))):
if hasattr(po, 'url_ptn'):
self.logger.debug('\tRegister {}'.format(po.url_ptn))
po.module_name = module_name
po.policy_name = pn
self.handlers[po.mime].regr(po.url_ptn, po)
pc += 1
except:
self.logger.exception('Fail to load policy from {}!'.format(pf))
return pc
else:
self.logger.warn('Policy folder={} does not exist!'.format(policy_path))
return -1
def parse(self, mtype, url, content, do_ext_link=False):
r'''
Parse the given content to do text extraction
:param mtype: Content type in string. e.g.: 'text/html'.
:param url: The source URL
:param content: The corresponding content.
:param do_ext_link: True to extract URL link from content (default:False)
:return
tuple(is_success, extraction result, reason)
'''
try:
mtype = mtype.split(';')[0].strip()
handler = self.handlers.get(mtype, None)
if handler:
try:
extract_rst = handler(url, content, do_ext_link)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
return (False, TEAgent.DEFAULT_RST, {'reason': handler.reason(), 'err': "{}: {}".format(exc_type, exc_value)})
if isinstance(extract_rst, dict) and 'title' not in extract_rst:
extract_rst['title'] = ''
if (isinstance(extract_rst, dict) and extract_rst.get('te_suc', True)) or (isinstance(extract_rst, str) and extract_rst):
return (True, extract_rst, {'reason': handler.reason()})
else:
return (False, extract_rst, {'reason': handler.reason(), 'err': 'Empty TE' if not handler.err_msg else handler.err_msg})
else:
self.logger.info("Use default agent...")
return (False, TEAgent.DEFAULT_RST, {'reason': '?', 'err': TEAgent.ERR_MSG_MTYPE_NOT_SUPPORT.format(mtype=mtype, url=url)})
except:
self.logger.exception('Fail to parse content from URL={}!'.format(url))
exc_type, exc_value, exc_traceback = sys.exc_info()
return (False, TEAgent.DEFAULT_RST, {'reason': '?', 'err': "{}: {}".format(exc_type, exc_value)})
| 5,523 | 1,561 |
import os
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from collections import defaultdict
# import fire
timestamp = lambda dt: (dt - datetime(2019, 1, 1)).total_seconds()
## ridgecrest
class Config:
degree2km = np.pi * 6371 / 180
center = (35.705, -117.504)
horizontal = 0.5
vertical = 0.5
def load_eqnet_catalog(fname, config=Config()):
catalog = pd.read_csv(fname, sep="\t", parse_dates=['time'])
catalog["date"] = catalog["time"]
catalog["X"] = catalog["x(km)"]
catalog["Y"] = catalog["y(km)"]
catalog["Z"] = catalog["z(km)"]
catalog["time"] = catalog["date"]
catalog["magnitude"] = 0.0
catalog["longitude"] = catalog["X"] / config.degree2km + (config.center[1] - config.horizontal)
catalog["latitude"] = catalog["Y"] / config.degree2km + (config.center[0] - config.vertical)
catalog["depth(m)"] = catalog["Z"] * 1e3
return catalog
def load_scsn(config=Config()):
if not os.path.exists("2019.catalog"):
os.system("wget https://raw.githubusercontent.com/SCEDC/SCEDC-catalogs/master/SCSN/2019.catalog")
catalog = defaultdict(list)
with open("2019.catalog", 'r') as fp:
for line in fp:
if line[0] in ['#', '\n', '\r\n']:
continue
catalog["YYY"].append(line[0:4].strip())
catalog["MM"].append(line[4:7].strip())
catalog["DD"].append(line[7:10].strip())
catalog["HH"].append(line[10:14].strip())
catalog["mm"].append(line[14:17].strip())
catalog["SS.ss"].append(line[17:23].strip())
catalog["LAT-deg"].append(line[23:27].strip())
catalog["LAT-sec"].append(line[27:33].strip())
catalog["LON-deg"].append(line[33:37].strip())
catalog["LON-sec"].append(line[37:43].strip())
catalog["Q"].append(line[43:45].strip())
catalog["MAG"].append(line[45:49].strip())
catalog["DEPTH"].append(line[49:59].strip())
catalog["NPH"].append(line[59:62].strip())
catalog["RMS"].append(line[62:71].strip())
catalog["EVID"].append(line[71:80].strip())
catalog = pd.DataFrame.from_dict(catalog)
catalog["LON"] = -(-catalog["LON-deg"].astype('float') + catalog["LON-sec"].astype('float') / 60)
catalog["LAT"] = catalog["LAT-deg"].astype('float').abs() + catalog["LAT-sec"].astype('float') / 60
catalog['DEPTH'] = catalog['DEPTH'].astype('float')
catalog["date"] = (
catalog["YYY"]
+ "-"
+ catalog["MM"]
+ "-"
+ catalog["DD"]
+ "T"
+ catalog["HH"]
+ ":"
+ catalog["mm"]
+ ":"
+ catalog["SS.ss"]
+ "0"
)
catalog["date"] = catalog["date"].map(datetime.fromisoformat)
catalog["X"] = (catalog["LON"].map(float) - (config.center[1] - config.horizontal)) * config.degree2km
catalog["Y"] = (catalog["LAT"].map(float) - (config.center[0] - config.vertical)) * config.degree2km
catalog["Z"] = catalog['DEPTH'].map(float)
catalog["mag"] = catalog["MAG"].map(float)
catalog["time"] = catalog["date"]
catalog["magnitude"] = catalog["mag"]
catalog["latitude"] = catalog["LAT"]
catalog["longitude"] = catalog["LON"]
catalog["depth(m)"] = catalog["Z"]*1e3
return catalog
def load_Ross2019(config=Config()):
if not os.path.exists("Ross2019.txt"):
os.system("wget https://service.scedc.caltech.edu/ftp/QTMcatalog-ridgecrest/ridgecrest_qtm.tar.gz")
os.system("tar -xzf ridgecrest_qtm.tar.gz")
os.system("rm ridgecrest_qtm.tar.gz")
os.system("mv ridgecrest_qtm.cat Ross2019.txt")
catalog = pd.read_csv(
"Ross2019.txt",
sep='\s+',
header=0,
names=[
"yr",
"mon",
"day",
"hr",
"min",
"sec",
"eID",
"latR",
"lonR",
"depR",
"mag",
"qID",
"cID",
"nbranch",
"qnpair",
"qndiffP",
"qndiffS",
"rmsP",
"rmsS",
"eh",
"ez",
"et",
"latC",
"lonC",
"depC",
],
dtype={
"yr": int,
"mon": int,
"day": int,
"hr": int,
"min": int,
"sec": float,
"eID": int,
"latR": float,
"lonR": float,
"depR": float,
"mag": float,
},
)
catalog["date"] = (
catalog["yr"].map("{:04d}".format)
+ "-"
+ catalog["mon"].map("{:02d}".format)
+ "-"
+ catalog["day"].map("{:02d}".format)
+ "T"
+ catalog["hr"].map("{:02d}".format)
+ ":"
+ catalog["min"].map("{:02d}".format)
+ ":"
+ catalog["sec"].map("{:06.3f}".format)
)
catalog["date"] = catalog["date"].map(datetime.fromisoformat)
catalog["X"] = (catalog["lonR"] - (config.center[1] - config.horizontal)) * config.degree2km
catalog["Y"] = (catalog["latR"] - (config.center[0] - config.vertical)) * config.degree2km
catalog["Z"] = catalog['depR']
catalog["time"] = catalog["date"]
catalog["magnitude"] = catalog["mag"]
catalog["latitude"] = catalog["latR"]
catalog["longitude"] = catalog["lonR"]
return catalog
def load_Shelly2020(config=Config()):
if not os.path.exists("Shelly2020.txt"):
os.system(
"wget -O Shelly2020.txt https://www.sciencebase.gov/catalog/file/get/5dd715f3e4b0695797650d18?f=__disk__db%2F88%2Fa1%2Fdb88a1f6754843800f25bd63712ed438dfa7699f"
)
catalog = pd.read_csv(
"Shelly2020.txt",
sep='\s+',
header=25,
names=["yr", "mon", "day", "hr", "min", "sec", "lat", "lon", "dep", "mag", "ID"],
dtype=str,
)
catalog["date"] = (
catalog["yr"]
+ "-"
+ catalog["mon"]
+ "-"
+ catalog["day"]
+ "T"
+ catalog["hr"]
+ ":"
+ catalog["min"]
+ ":"
+ catalog["sec"]
)
catalog["date"] = catalog["date"].map(datetime.fromisoformat)
catalog["X"] = (catalog["lon"].map(float) - (config.center[1] - config.horizontal)) * config.degree2km
catalog["Y"] = (catalog["lat"].map(float) - (config.center[0] - config.vertical)) * config.degree2km
catalog["Z"] = catalog['dep'].map(float)
catalog["mag"] = catalog["mag"].map(float)
catalog["time"] = catalog["date"]
catalog["magnitude"] = catalog["mag"]
catalog["latitude"] = catalog["lat"]
catalog["longitude"] = catalog["lon"]
return catalog
def load_Liu2020(config=Config()):
if not os.path.exists("Liu2020.txt"):
os.system(
"wget -O Liu2020.txt https://agupubs.onlinelibrary.wiley.com/action/downloadSupplement\?doi\=10.1029%2F2019GL086189\&file\=grl60250-sup-0002-2019GL086189-ts01.txt"
)
catalog = pd.read_csv(
"Liu2020.txt",
sep='\s+',
header=1,
names=["yr", "mon", "day", "hr", "min", "sec", "lat", "lon", "dep", "mag"],
dtype={
"yr": int,
"mon": int,
"day": int,
"hr": int,
"min": int,
"sec": float,
"lat": float,
"lon": float,
"dep": float,
"mag": float,
},
)
catalog["date"] = (
catalog["yr"].map("{:04d}".format)
+ "-"
+ catalog["mon"].map("{:02d}".format)
+ "-"
+ catalog["day"].map("{:02d}".format)
+ "T"
+ catalog["hr"].map("{:02d}".format)
+ ":"
+ catalog["min"].map("{:02d}".format)
+ ":"
+ catalog["sec"].map("{:06.3f}".format)
)
catalog["date"] = catalog["date"].map(datetime.fromisoformat)
catalog["X"] = (catalog["lon"] - (config.center[1] - config.horizontal)) * config.degree2km
catalog["Y"] = (catalog["lat"] - (config.center[0] - config.vertical)) * config.degree2km
catalog["Z"] = catalog['dep']
catalog["time"] = catalog["date"]
catalog["magnitude"] = catalog["mag"]
catalog["latitude"] = catalog["lat"]
catalog["longitude"] = catalog["lon"]
return catalog
def load_GaMMA_catalog(fname, config=Config()):
catalog = pd.read_csv(fname, sep='\t',)
catalog["date"] = catalog["time"].map(datetime.fromisoformat)
catalog["X"] = (catalog["longitude"].map(float) - (config.center[1] - config.horizontal)) * config.degree2km
catalog["Y"] = (catalog["latitude"].map(float) - (config.center[0] - config.vertical)) * config.degree2km
catalog["Z"] = catalog['depth(m)'].map(float)/1e3
catalog["mag"] = catalog["magnitude"]
return catalog
def filter_catalog(catalog, start_datetime, end_datetime, xmin, xmax, ymin, ymax, config=Config()):
selected_catalog = catalog[
(catalog["date"] >= start_datetime)
& (catalog["date"] <= end_datetime)
& (catalog['X'] >= xmin)
& (catalog['X'] <= xmax)
& (catalog['Y'] >= ymin)
& (catalog['Y'] <= ymax)
]
print(f"Filtered catalog {start_datetime}-{end_datetime}: {len(selected_catalog)} events")
t_event = []
xyz_event = []
mag_event = []
for _, row in selected_catalog.iterrows():
t_event.append(timestamp(row["date"]))
xyz_event.append([row['X'], row['Y'], row['Z']])
if "mag" in row:
mag_event.append(row["mag"])
t_event = np.array(t_event)
xyz_event = np.array(xyz_event)
mag_event = np.array(mag_event)
return t_event, xyz_event, mag_event, selected_catalog
def calc_detection_performance(t_pred, t_true, time_accuracy_threshold=3):
# time_accuracy_threshold = 3 #s
evaluation_matrix = np.abs(t_pred[np.newaxis, :] - t_true[:, np.newaxis]) < time_accuracy_threshold # s
recalls = np.sum(evaluation_matrix, axis=1) > 0
num_recall = np.sum(recalls)
num_precision = np.sum(np.sum(evaluation_matrix, axis=0) > 0)
if (len(t_true) > 0) and (len(t_pred) > 0):
recall = num_recall / len(t_true)
precision = num_precision / len(t_pred)
f1 = 2 * recall * precision / (recall + precision)
return recall, precision, f1
def calc_time_loc_error(t_pred, xyz_pred, t_true, xyz_true, time_accuracy_threshold):
evaluation_matrix = np.abs(t_pred[np.newaxis, :] - t_true[:, np.newaxis]) < time_accuracy_threshold # s
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
matched_idx = np.argmin(np.abs(diff_time), axis=1)[np.sum(evaluation_matrix, axis=1) > 0]
recalled_idx = np.arange(xyz_true.shape[0])[np.sum(evaluation_matrix, axis=1) > 0]
err_time = diff_time[np.arange(diff_time.shape[0]), np.argmin(np.abs(diff_time), axis=1)][
np.sum(evaluation_matrix, axis=1) > 0
]
err_z = []
err_xy = []
err_xyz = []
err_loc = []
t = []
for i in range(len(recalled_idx)):
# tmp_z = np.abs(xyz_pred[matched_idx[i], 2] - xyz_true[recalled_idx[i], 2])
tmp_z = xyz_pred[matched_idx[i], 2] - xyz_true[recalled_idx[i], 2]
tmp_xy = np.linalg.norm(xyz_pred[matched_idx[i], 0:2] - xyz_true[recalled_idx[i], 0:2])
tmp_xyz = xyz_pred[matched_idx[i], :] - xyz_true[recalled_idx[i], :]
tmp_loc = np.linalg.norm(xyz_pred[matched_idx[i], 0:3] - xyz_true[recalled_idx[i], 0:3])
err_z.append(tmp_z)
err_xy.append(tmp_xy)
err_xyz.append(tmp_xyz)
err_loc.append(tmp_loc)
t.append(t_true[recalled_idx[i]])
return np.array(err_time), np.array(err_xyz), np.array(err_xy), np.array(err_z), np.array(err_loc), np.array(t)
def calc_time_mag_error(t_pred, mag_pred, t_true, mag_true, time_accuracy_threshold):
evaluation_matrix = np.abs(t_pred[np.newaxis, :] - t_true[:, np.newaxis]) < time_accuracy_threshold # s
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
matched_idx = np.argmin(np.abs(diff_time), axis=1)[np.sum(evaluation_matrix, axis=1) > 0]
recalled_idx = np.arange(mag_true.shape[0])[np.sum(evaluation_matrix, axis=1) > 0]
err_time = diff_time[np.arange(diff_time.shape[0]), np.argmin(np.abs(diff_time), axis=1)][
np.sum(evaluation_matrix, axis=1) > 0
]
err_mag = []
t = []
mag = []
for i in range(len(recalled_idx)):
tmp_mag = mag_pred[matched_idx[i]] - mag_true[recalled_idx[i]]
err_mag.append(tmp_mag)
t.append(t_pred[matched_idx[i]])
mag.append(mag_true[recalled_idx[i]])
return np.array(err_time), np.array(err_mag), np.array(t), np.array(mag)
def plot_loc_error(
t_pred, xyz_pred, t_true, xyz_true, time_accuracy_threshold, fig_name, xlim=None, ylim=None, station_locs=None
):
evaluation_matrix = np.abs(t_pred[np.newaxis, :] - t_true[:, np.newaxis]) < time_accuracy_threshold # s
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
matched_idx = np.argmin(np.abs(diff_time), axis=1)[np.sum(evaluation_matrix, axis=1) > 0]
recalled_idx = np.arange(xyz_true.shape[0])[np.sum(evaluation_matrix, axis=1) > 0]
# err_time = diff_time[np.arange(diff_time.shape[0]), np.argmin(np.abs(diff_time), axis=1)][
# np.sum(evaluation_matrix, axis=1) > 0
# ]
plt.figure()
# plt.scatter(xyz_true[recalled_idx,0], xyz_true[recalled_idx,1], s=2, c="C3", alpha=0.8, label="SCSN")
# plt.scatter(xyz_pred[matched_idx, 0], xyz_pred[matched_idx, 1], s=2, c="C0", marker="x", alpha=0.8, label="End2End")
plt.plot(xyz_true[recalled_idx, 0], xyz_true[recalled_idx, 1], ".", color="C3", markersize=2, alpha=0.8)
plt.plot(xyz_pred[matched_idx, 0], xyz_pred[matched_idx, 1], ".", color="C0", markersize=2, alpha=0.8)
plt.plot(-100, -100, ".", color="C3", markersize=10, alpha=0.5, label="SCSN")
plt.plot(-100, -100, ".", color="C0", markersize=10, alpha=0.5, label="End2End")
if station_locs is not None:
plt.scatter(station_locs[:, 0], station_locs[:, 1], color="k", marker="^", label="Station")
plt.axis("scaled")
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.xlabel("X (km)")
plt.ylabel("Y (km)")
plt.legend()
# plt.title("Earthquake locati")
# for i in range(len(recalled_idx)):
# plt.plot([xyz_true[recalled_idx[i],0], xyz_pred[matched_idx[i], 0]], [xyz_true[recalled_idx[i],1], xyz_pred[matched_idx[i], 1]], '--')
# plt.plot([10,40], [10, 40], 'r-')
plt.savefig(fig_name + ".png", bbox_inches="tight")
# plt.savefig(fig_name + ".pdf", bbox_inches="tight")
def plot_waveform(
t_plot, xyz_plot, t_pred, t_true, station_locs, waveform, time, fig_dir, num_plot=50, type="pred", vp=6.0
):
dt = 0.01
for i in tqdm(range(min(len(t_plot), num_plot))):
t = [int(t_plot[i]) - 10, int(t_plot[i]) + 35]
dist = np.linalg.norm(xyz_plot[i] - station_locs, axis=1)
plt.figure(figsize=(15, 6))
for j in range(waveform.shape[0]):
plt.plot(
time[max([int(t[0] / dt), 0]) : int(t[1] / dt)],
waveform[j, -1, max([int(t[0] / dt), 0]) : int(t[1] / dt)] * 3 + dist[j],
linewidth=0.5,
color="k",
)
plt.xlim(t)
ylim = plt.gca().get_ylim()
t_selected = t_true[(t[0] - 30 < t_true) & (t_true < t[1] + 30)]
for j in range(len(t_selected)):
if j == 0:
label = "Catalog"
else:
label = ""
(tmp,) = plt.plot([t_selected[j], t_selected[j]], ylim, "--", color="C1", linewidth=2, label=label)
if type == "true":
plt.plot(
time[max([int(t[0] / dt), 0]) : int(t[1] / dt)],
(time[max([int(t[0] / dt), 0]) : int(t[1] / dt)] - t_true[i]) * vp,
":",
color="C1",
)
t_selected = t_pred[(t[0] - 30 < t_pred) & (t_pred < t[1] + 30)]
for j in range(len(t_selected)):
if j == 0:
label = "End2End"
else:
label = ""
(tmp,) = plt.plot([t_selected[j], t_selected[j]], ylim, "-", color="C0", linewidth=2, label=label)
if type == "pred":
plt.plot(
time[max([int(t[0] / dt), 0]) : int(t[1] / dt)],
(time[max([int(t[0] / dt), 0]) : int(t[1] / dt)] - t_pred[i]) * vp,
":",
color="C0",
)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.ylabel("Distance (km)")
plt.xlabel("Time (s)")
plt.savefig(os.path.join(fig_dir, f"{i:04d}.png"))
plt.close()
def plot_true_positive(
t_pred,
t_true,
threshold,
xyz_pred,
date,
fig_dir,
data_dir=None,
waveform=None,
station_locs=None,
num_plot=50,
vp=6.0,
):
"""
delta_time = [[pred1-true1, pred2-true1, pred3-true1, ...]
[pred1-true2, pred2-true2, pred3-true2, ...]
[pred1-true3, pred2-true3, pred3-true3, ...]
...]
"""
dt = 0.01
## load staion and waveforms
if (waveform is None) and (data_dir is not None):
station_locs = torch.load(os.path.join(data_dir, 'stations.pt'))[1]
waveform = []
for hour in tqdm(range(24), desc="Hour"):
tmp = torch.load(os.path.join(data_dir, f"{date}/{hour:02d}.pt"))
tmp = log_transform(tmp.type(torch.DoubleTensor))
waveform.append(tmp)
waveform = np.concatenate(waveform, axis=2)
np.nan_to_num(waveform, copy=False)
time = np.arange(waveform.shape[-1]) * dt
## find true positive
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
evaluation_matrix = np.abs(diff_time) < threshold # s
tp_idx = np.sum(evaluation_matrix, axis=0) > 0
t_tp = t_pred[tp_idx]
xyz_tp = xyz_pred[tp_idx]
if not os.path.exists(fig_dir):
os.makedirs(fig_dir, exist_ok=True)
np.seterr("ignore")
## plot true positive
plot_waveform(t_tp, xyz_tp, t_tp, t_true, station_locs, waveform, time, fig_dir, type="pred")
def plot_false_positive(
t_pred,
t_true,
threshold,
xyz_pred,
date,
fig_dir,
data_dir=None,
waveform=None,
station_locs=None,
num_plot=50,
vp=6.0,
):
"""
delta_time = [[pred1-true1, pred2-true1, pred3-true1, ...]
[pred1-true2, pred2-true2, pred3-true2, ...]
[pred1-true3, pred2-true3, pred3-true3, ...]
...]
"""
dt = 0.01
## load staion and waveforms
if (waveform is None) and (data_dir is not None):
station_locs = torch.load(os.path.join(data_dir, 'stations.pt'))[1]
waveform = []
for hour in tqdm(range(24), desc="Hour"):
tmp = torch.load(os.path.join(data_dir, f"{date}/{hour:02d}.pt"))
tmp = log_transform(tmp.type(torch.DoubleTensor))
waveform.append(tmp)
waveform = np.concatenate(waveform, axis=2)
np.nan_to_num(waveform, copy=False)
time = np.arange(waveform.shape[-1]) * dt
## find false positive
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
evaluation_matrix = np.abs(diff_time) < threshold # s
fp_idx = np.sum(evaluation_matrix, axis=0) == 0
t_fp = t_pred[fp_idx]
xyz_fp = xyz_pred[fp_idx]
if not os.path.exists(fig_dir):
os.makedirs(fig_dir, exist_ok=True)
np.seterr("ignore")
## plot false positive
plot_waveform(t_fp, xyz_fp, t_fp, t_true, station_locs, waveform, time, fig_dir, type="pred")
def plot_false_negative(
t_pred,
t_true,
threshold,
xyz_true,
date,
fig_dir,
data_dir=None,
waveform=None,
station_locs=None,
num_plot=50,
vp=6.0,
):
"""
delta_time = [[pred1-true1, pred2-true1, pred3-true1, ...]
[pred1-true2, pred2-true2, pred3-true2, ...]
[pred1-true3, pred2-true3, pred3-true3, ...]
...]
"""
dt = 0.01
## load staion and waveforms
if (waveform is None) and (data_dir is not None):
station_locs = torch.load(os.path.join(data_dir, 'stations.pt'))[1]
waveform = []
for hour in tqdm(range(24), desc="Hour"):
tmp = torch.load(os.path.join(data_dir, f"{date}/{hour:02d}.pt"))
tmp = log_transform(tmp.type(torch.DoubleTensor))
waveform.append(tmp)
waveform = np.concatenate(waveform, axis=2)
np.nan_to_num(waveform, copy=False)
time = np.arange(waveform.shape[-1]) * dt
## find false negative
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
evaluation_matrix = np.abs(diff_time) < threshold # s
fn_idx = np.sum(evaluation_matrix, axis=1) == 0
t_fn = t_true[fn_idx]
xyz_fn = xyz_true[fn_idx]
if not os.path.exists(fig_dir):
os.makedirs(fig_dir, exist_ok=True)
np.seterr("ignore")
## plot false negative
plot_waveform(t_fn, xyz_fn, t_pred, t_fn, station_locs, waveform, time, fig_dir, type="true")
if __name__ == "__main__":
# catalog = load_scsn()
# print(catalog.iloc[0])
# xmax = 101
# ymax = 101
# start_datetime = datetime.fromisoformat("2019-07-05T00:00:00.000")
# end_datetime = datetime.fromisoformat("2019-07-07T00:00:00.000")
# t_scsn, xyz_scsn = filter_scsn(load_scsn(), start_datetime, end_datetime, 0, xmax, 0, ymax)
# pass
fire.Fire(load_GaMMA_catalog)
| 21,664 | 8,352 |
#!python -u
import sys
import pyglet
import pyglet.gl as gl
import pyglet.window.mouse as mouse
import pyglet.window.key as key
import pyglet.input
#import pyglet.media
import random
import math
#print pyglet.version
show = ""
# Set up window
full = False
if full:
window = pyglet.window.Window(fullscreen=True)
else:
windowW, windowH = 180, 70
window = pyglet.window.Window(width=windowW, height=windowH)
window.set_location(20,35)
gJoyStick = None
def update(dt):
global gJoyStick
#print "update", dt
readStick(gJoyStick)
pass
@window.event
def on_key_press(symbol, modifiers):
#print "key", symbol
global shot
if symbol == key.Q:
pyglet.app.exit()
def readStick(js):
#print js
#print js.device.name
#print vars(js)
#for k,v in vars(js).iteritems():
# print k, v
xyz = " x: %f y: %f z: %f" % (js.x, js.y, js.z)
rxyz = "rx: %f ry: %f rz: %f" % (js.rx, js.ry, js.rz)
hxy = "hx: %f hy: %f" % (js.hat_x, js.hat_y)
bs = ""
for i, b in enumerate(js.buttons):
bs += "b%d: %d " % (i, b)
if show == 'buttons':
print bs, "\r",
else:
print xyz, rxyz, hxy, "\r",
#print rxyz
#print hxy
for x in js.device.get_controls():
#print x
pass
def main():
global gJoyStick
global show
gl.glClearColor(0.0, 0.0, 0.0, 0.0)
gl.glEnable( gl.GL_BLEND)
gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
#pyglet.clock.set_fps_limit(60)
pyglet.clock.schedule_interval(update, 1/30.)
window.set_vsync(True)
for x in pyglet.input.get_joysticks():
#readStick(x)
pass
gJoyStick = pyglet.input.get_joysticks()[0]
gJoyStick.open()
for x in gJoyStick.device.get_controls():
#print x
pass
if len(sys.argv) > 1 and sys.argv[1] == '-b':
show = 'buttons'
else:
show = 'axes'
pyglet.app.run()
print ""
if __name__ == '__main__':
main()
| 2,013 | 839 |
from django.conf.urls import url
from .views import coupon_apply
urlpatterns = [
url('^apply/$', coupon_apply, name='apply')
]
| 133 | 46 |
import os
from datetime import datetime
TIMESTAMP_SUFFIX = datetime.now().strftime("%Y%m%d-%H%M%S")
PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
GCS_BASE_STR = "gs://"
HTTP_URL_BASE_STR = "http://"
HTTPS_URL_BASE_STR = "https://"
LOCAL_FILE_BASE_STR = "file://"
NULL_STRING = "None"
DEFAULT_DATA_ROOT = "/data"
SYNTHETIC_SUBFOLDER = "synthetic"
# Default Unity Project ID where USim jobs was executed
DEFAULT_PROJECT_ID = "474ba200-4dcc-4976-818e-0efd28efed30"
USIM_API_ENDPOINT = "https://api.simulation.unity3d.com"
# Default Timing text for codetiming.Timer decorator
TIMING_TEXT = "[{name}] elapsed time: {:0.4f} seconds."
# Click CLI context settings
CONTEXT_SETTINGS = {
"help_option_names": ["-h", "--help"],
"show_default": True,
"ignore_unknown_options": True,
"allow_extra_args": True,
}
DEFAULT_DATASET_VERSION = "latest"
| 869 | 348 |
# Copyright (c) 2018-2019, Eduardo Rodrigues and Henry Schreiner.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/decaylanguage for details.
# Convenient access to the version number
from ._version import __version__
# Direct access to decay file parsing tools
from .dec import DecFileParser
# Direct access to decay chain visualization tools
from .decay import DecayChainViewer
# Direct access to decay chain representation classes
from .decay import DaughtersDict, DecayMode, DecayChain
| 559 | 166 |
# IMPORTANT: Change these file paths to be in the same repository as the webs server running Peeps.
# You can find the Peeps web server repository here: https://github.com/danielemoro/peeps/tree/peeps_finder
input_file = "D:/Google Drive/BSU/BSU 2018 Fall/CS401/website/peeps_finder_in.txt"
output_file = "D:/Google Drive/BSU/BSU 2018 Fall/CS401/website/peeps_finder_out.txt"
import collections
from collections import Counter
from peeps_finder import *
import re
import json
import time
from textblob import TextBlob
# important attributes
import_attr = ['email', 'phone', 'occupation', 'position held', 'organization',
'educated at', 'known for', 'knows', 'country', 'keyword']
# remove these attributes
blacklist_attr = ['number', 'important date', 'important time', 'family name']
# These words specify that the user is done validating information.
# Type these instead of a number to skip the validation step
end_words = ['end', 'stop', 'done', 'exit']
def print_attr(name, values, attr_max_len=50):
user_print(str(name).title())
for i, v in enumerate(values):
user_print((('[{0:2}] {1:' + str(attr_max_len) + '} {2:10}').format(i + 1, v[0].strip()[:attr_max_len],
v[1].strip())).replace(" ", " "))
def clean_info(info):
pdata = collections.defaultdict(list)
emails = sorted(list(Counter(info['email']).items()), key=lambda x: x[1], reverse=True)[:5]
pdata['email'] = [(i[0], 'Medium confidence (seen {} times)'.format(i[1])) for i in emails]
emails = sorted(list(Counter(info['phone']).items()), key=lambda x: x[1], reverse=True)[:5]
pdata['phone'] = [(i[0], 'Medium confidence (seen {} times)'.format(i[1])) for i in emails]
for i in info['rel_extr']:
pdata[i[0]].append((i[1], 'High confidence'))
for i in info['named_entities']:
pdata[i[0]].append((i[1], 'High confidence (seen {} times)'.format(i[2]) if i[2] > 3
else 'Medium confidence (seen {} times)'.format(i[2])))
keywords = sorted(list(Counter(info['noun_phrases'] + info['tfidf']).items()), key=lambda x: x[1], reverse=True)
keywords = [(i[0], 'Medium confidence (seen {} times)'.format(i[1]) if i[1] > 1
else 'Low confidence (seen 1 times)') for i in keywords]
pdata['keyword'] = keywords[:20]
return pdata
def print_all_info(info):
for attr in import_attr:
if attr in info:
print_attr(attr, info[attr])
for attr in info.keys():
if attr not in import_attr + blacklist_attr and attr is not None:
print_attr(attr, info[attr])
def user_print(string=''):
print(string + "\n")
if string == '': return
with open(output_file, 'a') as f:
f.write(string + "\n</br>")
def user_received_output():
done = False
while not done:
with open(output_file, 'r') as f:
lines = f.readlines()
if len(lines) == 0:
done = True
else:
time.sleep(0.01)
def user_input(string=''):
user_print(string)
done = False
while not done:
with open(input_file, 'r') as f:
lines = f.readlines()
curr_input = lines[-1].strip() if len(lines) >= 1 else ''
global last_len
if len(lines) > last_len:
done = True
last_len = len(lines)
else:
time.sleep(0.1)
return curr_input
def extract_nums(string_input, max_num):
return [int(i[0].replace(',', '')) - 1 for i in re.findall(r"([\d]+(\s|\,|$)){1}", string_input)
if int(i[0].replace(',', '')) <= max_num]
def user_search(peeps_finder, name=None, search_term=None, topn=20):
if name is None:
name = user_input("Who would you like to search for? ").strip()
name_check = re.match(r"([a-zA-Z]+(\s|$)){2}", name)
if name_check is None or name_check.group() != name:
user_print("I'm sorry, I didn't get that. Please enter a name consisting of two words separated by a space")
return user_search(peeps_finder)
user_print("\nSearching for {} ... please wait ...".format(name if search_term is None else search_term))
info = peeps_finder.retrieve_person_data(name, search=search_term, topn=topn)
info = clean_info(info)
user_print("Found some information</br>")
return info, name
def user_validation(info):
user_print("Please validate the following information. Type 'done' when done.<hr>")
attrs_to_ask = []
for attr in import_attr:
if attr in info:
attrs_to_ask.append(attr)
for attr in info.keys():
if attr not in import_attr + blacklist_attr and attr is not None:
attrs_to_ask.append(attr)
keep = []
for attr in attrs_to_ask:
print_attr("<div class=\".h3c\">" + attr + "</div>", info[attr])
num_input = user_input('\n</br>What number(s) would you like to keep? ')
if num_input.lower().strip() in end_words: break
nums = extract_nums(num_input, len(info[attr]))
if len(nums) > 0:
combined_values = ", ".join([str(info[attr][n][0])[:50] for n in nums])
user_print("\t{}: {}".format(attr, combined_values))
keep.append((attr, combined_values))
else:
user_print('\tNot keeping any {} values'.format(attr))
user_received_output()
user_print("Validation of collected information is complete!\n")
user_print("I am recording the following data:<hr>")
for i in keep:
user_print(" {:25}: {:100}".format(i[0], i[1]))
user_print()
return keep
def user_get_feedback(name, keep):
feedback = user_input("</br>How do you rate the collected data (great, ok, bad, etc)? ")
sentiment = TextBlob(feedback).sentiment.polarity
if sentiment < 0.5:
if user_input("Would you like to make a better search?").lower().strip() in ['yes', 'sure', 'ok', 'yep', 'y']:
user_print("Please select a new search term or provide your own")
for i, a in enumerate(keep):
user_print(" [{:2}]: {} {}".format(i + 1, name, a[1]))
redo = user_input()
nums = extract_nums(redo, len(keep))
search_term = str(name) + ' ' + keep[nums[0]][1] if len(nums) > 0 else str(redo)
user_print("Redoing search with the phrase {}\n".format(search_term))
return (search_term, sentiment, feedback)
return (False, sentiment, feedback)
def run_session(peeps_finder):
keep = None
feedbacks = []
keep_going = True
search_term = None
name = None
while keep_going:
info, name = user_search(peeps_finder, name=name, search_term=search_term)
keep = user_validation(info)
search_term, sentiment, feedback = user_get_feedback(name, keep)
feedbacks.append((feedback, sentiment, str(keep), name))
if not search_term:
keep_going = False
keep.insert(0, ('name', name))
with open(output_file, 'a') as f:
json.dump(keep, f)
print(keep)
with open('logfile.json', 'a') as f:
json.dump(feedbacks, f)
if user_input() == 'END':
print("SESSION ENDED")
return
if __name__ == "__main__":
last_len = 0
peeps_finder = PeepsFinder()
# Run indefinitely, as long as the partner web server is running
while True:
# Clear communications channels
last_len = 0
with open(output_file, 'w') as f:
f.write("")
with open(input_file, 'w') as f:
f.write("")
print("WAITING FOR NEW SESSION")
if user_input().strip().lower() == 'start':
print("STARTING SESSION")
run_session(peeps_finder)
else:
print("Error: unexpected input")
| 7,829 | 2,557 |
"""
Record data for later playback
Requirements:
ffmpeg must be installed.
On Linux you can install it with package manager
of your choise. For example with
ap-get: sudo apt-get install ffmpeg
yuM: sudo yum install ffmpeg
On Windows, you must download and install it from https://www.ffmpeg.org and
then update your environment Path variable to contain the binary path. To do
this, press Windows Key, type Path and press Enter. Open Environment Settings,
edit the row named Path and add location of the ffmpeg bin folder to the list,
for example: "C:\Program Files\ffmpeg\bin". To check that it works, open
command prompt and type ffmpeg, you should see version information.
To view the depth video file, you must use ffplay, because normal video players
cannot play 16bit grayscale video.
Plug in the OAK-D and run:
python examples/vio_record.py
"""
import depthai
import spectacularAI
import signal
import sys
import argparse
import subprocess
import os
import json
import threading
config = spectacularAI.depthai.Configuration()
p = argparse.ArgumentParser(__doc__)
p.add_argument("--output", help="Recording output folder", default="data")
p.add_argument("--no_rgb", help="Disable recording RGB video feed", action="store_true")
p.add_argument("--no_inputs", help="Disable recording JSONL and depth", action="store_true")
p.add_argument("--gray", help="Record (rectified) gray video data", action="store_true")
p.add_argument("--no_convert", help="Skip converting h265 video file", action="store_true")
p.add_argument('--no_preview', help='Do not show a live preview', action="store_true")
p.add_argument('--slam', help='Record SLAM map', action="store_true")
p.add_argument('--no_feature_tracker', help='Disable on-device feature tracking', action="store_true")
p.add_argument("--resolution", help="Gray input resolution (gray)",
default=config.inputResolution,
choices=['400p', '800p'])
args = p.parse_args()
pipeline = depthai.Pipeline()
config.inputResolution = args.resolution
if not args.no_inputs:
config.recordingFolder = args.output
if args.slam:
config.useSlam = True
try: os.makedirs(args.output) # SLAM only
except: pass
config.mapSavePath = os.path.join(args.output, 'slam_map._')
if args.no_feature_tracker:
config.useFeatureTracker = False
# Enable recoding by setting recordingFolder option
vio_pipeline = spectacularAI.depthai.Pipeline(pipeline, config)
# Optionally also record other video streams not used by the Spectacular AI SDK, these
# can be used for example to render AR content or for debugging.
if not args.no_rgb:
camRgb = pipeline.create(depthai.node.ColorCamera)
videoEnc = pipeline.create(depthai.node.VideoEncoder)
xout = pipeline.create(depthai.node.XLinkOut)
xout.setStreamName("h265-rgb")
camRgb.setBoardSocket(depthai.CameraBoardSocket.RGB)
camRgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
# no need to set input resolution anymore (update your depthai package if this does not work)
videoEnc.setDefaultProfilePreset(30, depthai.VideoEncoderProperties.Profile.H265_MAIN)
camRgb.video.link(videoEnc.input)
videoEnc.bitstream.link(xout.input)
if args.gray:
def create_gray_encoder(node, name):
videoEnc = pipeline.create(depthai.node.VideoEncoder)
xout = pipeline.create(depthai.node.XLinkOut)
xout.setStreamName("h264-" + name)
videoEnc.setDefaultProfilePreset(30, depthai.VideoEncoderProperties.Profile.H264_MAIN)
node.link(videoEnc.input)
videoEnc.bitstream.link(xout.input)
create_gray_encoder(vio_pipeline.stereo.rectifiedLeft, 'left')
create_gray_encoder(vio_pipeline.stereo.rectifiedRight, 'right')
should_quit = False
def main_loop(plotter=None):
frame_number = 1
with depthai.Device(pipeline) as device, \
vio_pipeline.startSession(device) as vio_session:
def open_gray_video(name):
grayVideoFile = open(args.output + '/rectified_' + name + '.h264', 'wb')
queue = device.getOutputQueue(name='h264-' + name, maxSize=10, blocking=False)
return (queue, grayVideoFile)
grayVideos = []
if args.gray:
grayVideos = [
open_gray_video('left'),
open_gray_video('right')
]
if not args.no_rgb:
videoFile = open(args.output + "/rgb_video.h265", "wb")
rgbQueue = device.getOutputQueue(name="h265-rgb", maxSize=30, blocking=False)
print("Recording!")
print("")
if plotter is not None:
print("Close the visualization window to stop recording")
while not should_quit:
if not args.no_rgb:
while rgbQueue.has():
frame = rgbQueue.get()
vio_session.addTrigger(frame.getTimestamp().total_seconds(), frame_number)
frame.getData().tofile(videoFile)
frame_number += 1
for (grayQueue, grayVideoFile) in grayVideos:
if grayQueue.has():
grayQueue.get().getData().tofile(grayVideoFile)
out = vio_session.waitForOutput()
if plotter is not None:
if not plotter(json.loads(out.asJson())): break
videoFileNames = []
if not args.no_rgb:
videoFileNames.append(videoFile.name)
videoFile.close()
for (_, grayVideoFile) in grayVideos:
videoFileNames.append(grayVideoFile.name)
grayVideoFile.close()
for fn in videoFileNames:
if not args.no_convert:
withoutExt = fn.rpartition('.')[0]
ffmpegCommand = "ffmpeg -framerate 30 -y -i {} -avoid_negative_ts make_zero -c copy {}.mp4".format(fn, withoutExt)
result = subprocess.run(ffmpegCommand, shell=True)
if result.returncode == 0:
os.remove(fn)
else:
print('')
print("Use ffmpeg to convert video into a viewable format:")
print(" " + ffmpegCommand)
if args.no_preview:
plotter = None
else:
from vio_visu import make_plotter
import matplotlib.pyplot as plt
plotter, anim = make_plotter()
reader_thread = threading.Thread(target = lambda: main_loop(plotter))
reader_thread.start()
if plotter is None:
input("---- Press ENTER to stop recording ----")
should_quit = True
else:
plt.show()
reader_thread.join()
| 6,497 | 1,988 |
# Faça um programa que leia três números e mostre qual é o maior e qual é o menor.
n1 = int(input('Digite o primeiro valor: '))
n2 = int(input('Digite o segundo valor: '))
n3 = int(input('Digite o terceiro valor: '))
nt= [n1,n2,n3]
print('O maior número digitado foi {}'.format(max(nt)))
print('O menor número digitado foi {}'.format(min(nt))) | 345 | 124 |
import os
def fileTest():
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path)
data_path = os.path.join(dir_path, '../FileTest/data.txt')
print(data_path)
file = open(data_path, 'r')
for line in file:
print(line)
if __name__ == '__main__':
fileTest() | 311 | 120 |
import json
import os.path
import time
# Third-party libraries
import zmq
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
socket.send(b"hello")
message = socket.recv()
print(message)
while True:
# Wait for next request from client
jsonStr = '{"measures":{"R":255.0, "G":125.0, "B":64}}'
socket.send(jsonStr.encode('ascii'))
# Do some 'work'
time.sleep(1)
message = socket.recv()
print("Received request: %s" % message) | 501 | 184 |
from source.couple import couple
import numpy as np
import requests
import json
import source.config as c
import googlemaps
from geopy.distance import geodesic
import itertools
gd = {"H":1,"V":0, "N":2}
gd_inv = {1:"H",0:"V", 2:"N"}
# CONVERT TIMES TO POSIX TIME
from datetime import timezone, datetime, timedelta
dinner_time = {}
for t in range(len(c.TIMES)):
h = int(c.TIMES[t][:2])
m = int(c.TIMES[t][3:5])
dinner_time[gd_inv[t]] = int(datetime(int(c.YEAR),int(c.MONTH),int(c.DAY),h,m, tzinfo=timezone(timedelta(hours=c.TIMEZONE))).strftime("%s"))
class group:
"""docstring for group."""
def __init__(self, couples, host):
self.couples = couples
self.dist = np.zeros((3,3))
self.group_loss = 0
self.gmaps_client = googlemaps.Client(key = c.API_KEY)
self.host = host
def get_dist(self,A,gmaps=False):
if gmaps:
if A.transp=="transit":
x = self.gmaps_client.directions(A.address,self.couples[self.host].address,mode=A.transp,arrival_time=dinner_time[self.couples[self.host].food])
else:
x = self.gmaps_client.directions(A.address,self.couples[self.host].address,mode=A.transp)
d = x[0]["legs"][0]["distance"]["value"]
d_min = x[0]["legs"][0]["duration"]["value"]
else:
d = geodesic(A.location.point,self.couples[self.host].location.point).km
if A.transp == "bicycling":
v = 15/60 # km/min
elif A.transp == "driving":
v = 50/60 # km/min
elif A.transp == "transit":
v = 5/60 # km/min
else:
print("ERROR: false transportation was chosen")
d_min = d / v
return d_min
def calc_group_loss(self,gmaps=False):
dist = 0
for A in self.couples:
dist += np.square(self.get_dist(A.pre,gmaps))
return np.sqrt(dist)
def get_loss(self,gmaps=False):
if self.couples[self.host].food=="V":
self.group_loss = 0
else:
self.group_loss = self.calc_group_loss(gmaps)
return self.group_loss
| 2,172 | 779 |
"""Functions to parse Olive XML data."""
import codecs
import copy
import re
from typing import List, Optional
from bs4 import BeautifulSoup
from impresso_commons.path.path_fs import canonical_path, IssueDir
from text_importer.importers.olive.helpers import (normalize_language,
normalize_line)
def parse_styles(text: str) -> List[dict]:
"""Turn Olive style file into a dictionary.
Style IDs may be referred to within the ``s`` property of token elements
as defined in the impresso JSON schema for newspaper pages (see
`documentation <https://github.com/impresso/impresso-schemas/blob/master/docs/page.schema.md>`__).
:param str text: textual content of file `styleGallery.txt`
:return: A list of styles; each style has ID, font, font size, color (rgb).
:rtype: List[dict]
"""
styles = []
regex = r'(\d{3})=(".*?"),(\d+\.?\d+),(\(.*?\))'
for line in text.split("\r\n"):
if line == "":
continue
n, font, font_size, color = re.match(regex, line).groups()
styles.append(
{
"id": int(n),
"f": font.replace('"', ""),
"fs": float(font_size),
"rgb": [
int(i)
for i in color.replace("(", "")
.replace(")", "").split(",")]
}
)
return styles
def olive_image_parser(text: bytes) -> Optional[dict]:
"""Parse the Olive XML file contaning image metadata.
:param bytes text: Content of the XML file to parse.
:return: A dictionary of image metadata.
:rtype: Optional[dict]
"""
soup = BeautifulSoup(text, "lxml")
root = soup.find("xmd-entity")
try:
assert root is not None
img = {
'id': root.get('id'),
'coords': root.img.get('box').split(),
'name': root.meta.get('name'),
'resolution': root.meta.get('images_resolution'),
'filepath': root.img.get('href')
}
return img
except AssertionError:
return None
def olive_toc_parser(
toc_path: str,
issue_dir: IssueDir,
encoding: str = "windows-1252"
) -> dict:
"""Parse the TOC.xml file (Olive format).
:param str toc_path: Path to the ToC XML file.
:param IssueDir issue_dir: Corresponding ``IssueDir`` object.
:param str encoding: XML file encoding.
:return: A dictionary where keys are content item IDs and values their
metadata.
:rtype: dict
"""
with codecs.open(toc_path, 'r', encoding) as f:
text = f.read()
toc_data = {}
global_counter = 0
for page in BeautifulSoup(text, 'lxml').find_all('page'):
page_data = {}
for n, entity in enumerate(page.find_all("entity")):
global_counter += 1
item_legacy_id = entity.get("id")
item = {
"legacy_id": item_legacy_id,
"id": canonical_path(
issue_dir,
name=f"i{str(global_counter).zfill(4)}",
extension=""
),
"type": entity.get("entity_type"),
"seq": n + 1
}
# if it's a picture we want to get also the article into which
# the image is embedded
if item['type'].lower() == "picture":
if entity.get("embedded_into") is not None:
item['embedded_into'] = entity.get("embedded_into")
page_data[item_legacy_id] = item
toc_data[int(page.get('page_no'))] = page_data
# gather the IDs of all content items int the issue
ids = [
toc_data[page][item]["id"]
for page in toc_data
for item in toc_data[page]
]
# check that these IDs are unique within the issue
assert len(ids) == len(list(set(ids)))
return toc_data
def olive_parser(text: str) -> dict:
"""Parse an Olive XML file (e.g. from Le Temps corpus).
The main logic implemented here was derived from
<https://github.com/dhlab-epfl/LeTemps-preprocessing/>. Each XML file
corresponds to one article, as detected by Olive.
:param text: content of the xml file to parse
:type text: string
:return: A dictionary with keys: ``meta``, ``r``, ``stats``, ``legacy``.
:rtype: dict
"""
soup = BeautifulSoup(text, "lxml")
root = soup.find("xmd-entity")
page_no = root['page_no']
identifier = root['id']
language = root['language']
title = soup.meta['name']
entity_type = root['entity_type']
issue_date = soup.meta['issue_date']
out = {
"meta": {
"language": None,
"type": {}
},
"r": [],
"stats": {},
"legacy": {"continuation_from": None, "continuation_to": None},
}
out["meta"]["title"] = title
out["meta"]["page_no"] = [int(page_no)]
out["meta"]["language"] = normalize_language(language)
out["meta"]["type"]["raw"] = entity_type
out["meta"]["issue_date"] = issue_date
new_region = {
"c": [],
"p": []
}
new_paragraph = {
"l": []
}
new_line = {
"c": [],
"t": []
}
new_token = {
"c": [],
"tx": ""
}
for primitive in soup.find_all("primitive"):
# store coordinate of text areas (boxes) by page
# 1) page number, 2) coordinate list
region = copy.deepcopy(new_region)
region["c"] = [int(i) for i in primitive.get('box').split(" ")]
para = None
line = None
line_counter = 0
for tag in primitive.find_all(recursive=False):
if tag.name == "l":
if para is None and line is None:
para = copy.deepcopy(new_paragraph)
line = copy.deepcopy(new_line)
if line_counter > 0 and line is not None:
line = normalize_line(line, out["meta"]["language"])
para["l"].append(line)
if tag.get("p") in ["S", "SA"] and line_counter > 0:
region["p"].append(para)
para = copy.deepcopy(new_paragraph)
line = copy.deepcopy(new_line)
line["c"] = [
int(i)
for i in tag.get('box').split(" ")
]
line_counter += 1
if tag.name in ["w", "q"]:
# store coordinates of each token
# 1) token, 2) page number, 3) coordinate list
t = copy.deepcopy(new_token)
t["c"] = [int(i) for i in tag.get('box').split(" ")]
t["tx"] = tag.string
t["s"] = int(tag.get('style_ref'))
if tag.name == "q" and tag.get('qid') is not None:
qid = tag.get('qid')
normalized_form = soup.find('qw', qid=qid).text
t["nf"] = normalized_form
t["qid"] = qid
# append the token to the line
line["t"].append(t)
# append orphan lines
if line is not None:
line = normalize_line(line, out["meta"]["language"])
para["l"].append(line)
region["p"].append(para)
if para is not None:
out["r"].append(region)
out["legacy"]["id"] = identifier
out["legacy"]["source"] = soup.link['source']
"""
# I suspect this could be deleted
out["legacy"]["word_count"] = int(soup.meta['wordcnt'])
out["legacy"]["chars_count"] = int(soup.meta['total_chars_count'])
suspicious_chars_count = int(soup.meta['suspicious_chars_count'])
out["legacy"]["suspicious_chars_count"] = int(suspicious_chars_count)
"""
out["legacy"]["first_id"] = soup.link['first_id']
out["legacy"]["last_id"] = soup.link['last_id']
out["legacy"]["next_id"] = soup.link['next_id']
out["legacy"]["prev_id"] = soup.link['prev_id']
if root.has_attr('continuation_from'):
out["legacy"]["continuation_from"] = root['continuation_from']
if root.has_attr('continuation_to'):
out["legacy"]["continuation_to"] = root['continuation_to']
return out
| 8,616 | 2,595 |
class Solution:
def addBinary(self, a, b):
result_reversed = ""
m, n = len(a), len(b)
if m > n:
a, b = b, a
m, n = n, m
diff = n - m
flag = False
for i in range(m - 1, -1, -1):
if a[i] == "1" and b[diff + i] == "1":
if flag:
result_reversed += "1"
flag = True
else:
result_reversed += "0"
flag = True
elif a[i] == "1" or b[diff + i] == "1":
if flag:
result_reversed += "0"
flag = True
else:
result_reversed += "1"
flag = False
else:
if flag:
result_reversed += "1"
flag = False
else:
result_reversed += "0"
flag = False
for j in range(n - m - 1, -1, -1):
if b[j] == "1":
if flag:
result_reversed += "0"
flag = True
else:
result_reversed += "1"
flag = False
if b[j] == "0":
if flag:
result_reversed += "1"
flag = False
else:
result_reversed += "0"
flag = False
# at the end of the add, if flag, add "1" at the head
if flag:
result_reversed += "1"
return result_reversed[::-1]
if __name__ == '__main__':
solution = Solution()
print(solution.addBinary("100", "110010"))
| 1,720 | 503 |
import pickle
databox = "/nfs/projects/paqs/qadatasetAstudy"
source = pickle.load(open(databox + "/val.pkl","rb"))
questions = databox + "/output/ques.val"
answers = databox + "/output/ans.val"
fqes = open(questions, 'w')
fans = open(answers, 'w')
for fid, value in source.items():
for sid, sentence in value.items():
if "Q" in sid:
fqes.write('{},{}, <s> {} </s>\n'.format(fid, sid, sentence))
elif "A" in sid:
fans.write('{},{}, <s> {} </s>\n'.format(fid, sid, sentence))
fqes.close()
fans.close()
| 550 | 213 |
#!/usr/bin/python
import urllib2
import sys
import os
import os.path
import tarfile
#-------------------------------------------------------------------------------
# OS specific setupls
if sys.platform == 'win32':
print "Windows detected..."
dest = "dataprocessing_external_win32.tar.gz"
elif sys.platform == 'darwin':
print "MacOS X detected..."
dest = "dataprocessing_external_osx.tar.gz"
elif 'linux' in sys.platform:
print "Linux detected..."
dest = "dataprocessing_external_linux.tar.gz"
else:
print "unsupported system: " + sys.platform
dest = "dataprocessing_external.tar.gz"
#-------------------------------------------------------------------------------
# FUNCTION: DOWNLOAD FILE
#-------------------------------------------------------------------------------
def download(url, filename):
print "Fetching " + url
webfile = urllib2.urlopen(url)
diskfile = open(filename,"wb")
diskfile.write(webfile.read())
diskfile.close()
webfile.close()
#-------------------------------------------------------------------------------
# MAIN
#-------------------------------------------------------------------------------
# Download external integration
url = "http://www.openwebglobe.org/downloads/" + dest
if (os.path.isfile(dest)):
print "externals are already downloaded... delete manually if you want to reinstall"
else:
print "Downloading externals, please wait... (500+ MB)"
download(url, dest)
print "Extracting externals..."
tar = tarfile.open(dest)
tar.extractall("../")
tar.close()
print "Ok."
print "Done."
| 1,598 | 454 |
from django.urls import path
from . import views
app_name = 'verifications'
urlpatterns = [
path('image_codes/<uuid:image_code_id>/', views.ImageCode.as_view(), name='image_codes'),
] | 194 | 69 |
import keras
from PIL import Image
from flask import Flask, request, jsonify
from flask_cors import CORS
from itsdangerous import Serializer
from concurrent.futures import ThreadPoolExecutor
from flask_apscheduler import APScheduler
import token_authorization
import AesCipher
import mysql
import functools
from yolo import YOLO
# 定时任务配置类
class SchedulerConfig(object):
JOBS = [
{
'id': 'automatic_seat',
# 任务执行程序
'func': '__main__:automatic_seat',
# 执行程序参数
'args': None,
# 任务执行类型
'trigger': 'cron',
'hour': 1,
'minute': 0
}
]
# 定义任务执行程序
def automatic_seat():
print("座位预约自动实现!")
result = mysql.appointment_automatic()
if result == 'True':
app.logger.info("座位预约自动实现成功!")
elif result == 'False':
app.logger.error("数据库操作错误!")
else:
app.logger.warn("无需操作的数据!")
executor = ThreadPoolExecutor(10)
app = Flask(__name__)
app.config.from_object(SchedulerConfig())
scheduler = APScheduler() # 实例化APScheduler
scheduler.init_app(app) # 把任务列表载入实例flask
scheduler.start() # 启动任务计划
CORS(app, supports_credentials=True)
# 座位获取(耗时任务)
def real_seat(classroom_id):
keras.backend.clear_session()
yolo = YOLO()
try:
image = Image.open("D:/SourceTree/yolov3/img/" + str(classroom_id) + ".jpg")
except:
app.logger.error("图片打开失败!")
else:
yolo.detect_image(image, classroom_id)
app.logger.info("座位实时获取成功!")
# 在上面的基础上导入
def login_required(view_func):
@functools.wraps(view_func)
def verify_token(*args, **kwargs):
try:
# 在请求头上拿到token
token = request.headers["Authorization"]
except Exception:
return jsonify(code=401, msg='缺少参数token')
s = Serializer("classroom")
try:
s.loads(token)
except Exception:
return jsonify(code=401, msg="登录已过期")
return view_func(*args, **kwargs)
return verify_token
# 登录
@app.route('/login', methods=['POST'])
def login():
if request.get_json().get('username') != 'null' and request.get_json().get('password') != 'null':
username = request.get_json().get('username')
pwd = request.get_json().get('password')
result = mysql.user_select(username)
password = str(AesCipher.encryption(pwd), 'utf-8')
if password != result[2]:
error = '密码错误!'
app.logger.error(error)
return jsonify({"code": 403, "error": error}), 403
else:
info = "登陆成功!"
app.logger.info(info)
tk = token_authorization.create_token(username)
data = {}
user = {
'id': result[0],
'userName': username,
'userRole': result[3]
}
data['userInfo'] = user
data['token'] = tk
return jsonify({"code": 200, "data": data, "info": info}), 200
else:
error = '请填写完整信息!'
app.logger.error(error)
return jsonify({"code": 403, "error": error}), 403
# 注册
@app.route('/register', methods=['POST'])
def register():
if request.get_json().get('username') != 'null' and request.get_json().get('password') != 'null':
username = request.get_json().get('username')
password = request.get_json().get('password')
return_id = mysql.user_insert(username, password)
if return_id == 0:
error = '已存在此用户'
app.logger.error(error)
return jsonify({"code": 403, "error": error}), 403
elif return_id is str:
error = return_id
app.logger.error(error)
return jsonify({"code": 403, "error": error}), 403
else:
info = '注册成功!'
app.logger.info(info)
return jsonify({"code": 200, "info": info}), 200
# 添加教室
@app.route('/classroom_insert', methods=['POST'])
def insert_classroom():
if request.get_json().get('classroomName') is not None and \
request.get_json().get('seatNums') is not None and \
request.get_json().get('classroomInfo') is not None:
classroom_name = request.get_json().get('classroomName')
seat_nums = request.get_json().get('seatNums')
classroom_info = request.get_json().get('classroomInfo')
result = mysql.classroom_insert(classroom_name, seat_nums, classroom_info)
if result is None:
error = '数据库操作错误!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
elif result == 0:
error = '该教室已存在!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
else:
info = classroom_name + '教室添加成功!'
app.logger.info(info)
return jsonify({"code": 200, "info": info})
else:
error = '教室信息不得为空!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
# 删除教室
@app.route('/classroom_delete', methods=['POST'])
def delete_classroom():
if request.get_json().get('id') != 'null':
classroom_id = request.get_json().get('id')
result = mysql.classroom_delete(classroom_id)
if result == 'False':
error = '数据库操作错误!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
else:
info = '教室删除成功!'
app.logger.info(info)
return jsonify({"code": 200, "info": info})
else:
error = '教室id返回为空!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
# 修改教室信息
@app.route('/classroom_update', methods=['POST'])
def update_classroom():
if request.get_json().get('seatNums') is not None or request.get_json().get('classroomInfo') is not None:
seat_num = request.get_json().get('seatNums')
classroom_info = request.get_json().get('classroomInfo')
classroom_id = request.get_json().get('id')
result = mysql.classroom_update(seat_num, classroom_info, classroom_id)
if result == 'False':
error = '数据库操作错误!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
else:
info = '教室信息修改成功!'
app.logger.info(info)
return jsonify({"code": 200, "info": info})
else:
error = '返回参数不得全为空!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
# 获取教室列表
@app.route('/classroom_show', methods=['GET'])
def get_classroom_info():
result = mysql.classroom_select()
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
elif result.__len__() == 0:
app.logger.error("搜索数据为空!")
return jsonify({"code": 403, "error": "搜索数据为空!"})
else:
data = {}
classrooms = []
for r in result:
classroom = {
'id': r[0],
'classroomName': r[1],
'seatNum': r[2],
'freeSeatNum': r[3],
'placeFreeSeat': 0,
'classroomInfo': r[4]
}
classrooms.append(classroom)
data['classrooms'] = classrooms
app.logger.info("教室信息返回成功!")
return jsonify({"code": 200, "data": data, "info": "教室信息返回成功!"})
# 获取座位数量
@app.route('/seat_num_get', methods=['get'])
def seat_num_get():
result1, result2, result3, result4 = mysql.count_seat_select()
if result1 is None or result2 is None or result3 is None or result4 is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
else:
data = {}
seat_nums = []
seat_num1 = {
'seatPlaceNo': 0,
'seatPlace': '普通',
'counts': result1[0]
}
seat_nums.append(seat_num1)
seat_num2 = {
'seatPlaceNo': 1,
'seatPlace': '靠窗',
'counts': result2[0]
}
seat_nums.append(seat_num2)
seat_num3 = {
'seatPlaceNo': 2,
'seatPlace': '靠门',
'counts': result3[0]
}
seat_nums.append(seat_num3)
data['allSeatNum'] = result4[0]
data['seatNums'] = seat_nums
app.logger.info("座位位置及数量返回成功!")
return jsonify({"code": 200, "data": data, "info": "座位位置及数量返回成功!"})
# 获取实时教室座位信息
@app.route('/seat_real', methods=['POST'])
def get_real_seat_info():
if request.get_json().get('classroomId') != 'null':
classroom_id = request.get_json().get('classroomId')
# 异步
# executor.submit(real_seat(classroom_id))
result_max = mysql.seat_max_select(classroom_id)
result = mysql.seat_real_select(classroom_id)
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
elif result.__len__() == 0:
app.logger.error("搜索数据为空!")
return jsonify({"code": 403, "error": "搜索数据为空!"})
else:
data = {}
seats = [[2 for i in range(result_max[1])] for j in range(result_max[0])]
for r in result:
seats[r[1]-1][r[2]-1] = r[3]
data['seats'] = seats
data['row'] = result_max[0]
data['col'] = result_max[1]
app.logger.info("座位信息返回成功!")
return jsonify({"code": 200, "data": data, "info": "座位信息返回成功!"})
else:
error = "返回教室id为空!"
app.logger.error(error)
return jsonify({"code": 403, "error": error})
# 教室页面特殊位置搜索
@app.route('/classroom_special', methods=['POST'])
def get_special_classroom_info():
if request.get_json().get('seatPlace') != 'null':
seat_place = request.get_json().get('seatPlace')
result = mysql.classroom_special_select(seat_place)
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
else:
data = {}
classrooms = []
for r in result:
if r[4] != 0:
classroom = {
'id': r[0],
'classroomName': r[1],
'seatNum': r[2],
'freeSeatNum': r[3],
'placeFreeSeat': r[4],
'classroomInfo': r[5]
}
classrooms.append(classroom)
if len(classrooms) == 0:
app.logger.info("所有教室已无此类型座位!")
return jsonify({"code": 400, "info": "所有教室已无此类型座位!"})
data['classrooms'] = classrooms
app.logger.info("位置推荐返回成功!")
return jsonify({"code": 200, "data": data, "info": "位置推荐返回成功!"})
else:
error = "特殊位置类型返回为空!"
app.logger.error(error)
return jsonify({"code": 403, "error": error})
# 获取教室信息
@app.route('/get_classInfo_by_id', methods=['POST'])
def get_class_info_by_id():
if request.get_json().get('classroomId') != 'null':
classroom_id = request.get_json().get('classroomId')
result = mysql.get_class_info_by_id(classroom_id)
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
else:
data = {}
classroom = {
'id': result[0],
'classroomName': result[1],
'seatNum': result[2],
'freeSeatNum': result[3],
'classroomInfo': result[4]
}
data['classroom'] = classroom
app.logger.info("教室信息返回成功!")
return jsonify({"code": 200, "data": data, "info": "教室信息返回成功!"})
else:
error = "返回教室id为空!"
app.logger.error(error)
return jsonify({"code": 403, "error": error})
# 预约座位
@app.route('/seat_appointment', methods=['POST'])
def appointment_seat():
if request.get_json().get('classroomId') != 'null' and request.get_json().get('seatX') != 'null' and \
request.get_json().get('seatY') != 'null' and request.get_json().get('startTime') != 'null' and \
request.get_json().get('userNo') != 'null':
classroom_id = request.get_json().get('classroomId')
seat_x = request.get_json().get('seatX')
seat_y = request.get_json().get('seatY')
start_time = request.get_json().get('startTime')
user_no = request.get_json().get('userNo')
result = mysql.appointment(start_time, classroom_id, seat_x, seat_y, user_no)
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
elif result == "OUT":
app.logger.error("预约已满5次!")
return jsonify({"code": 403, "error": "预约已满5次!"})
elif result == 'False':
app.logger.error("该座位该日期已被预约,请更换日期!")
return jsonify({"code": 403, "error": "该座位该日期已被预约,请更换日期!"})
elif result == 'Obsolete':
app.logger.error("预约日期不得小于当前日期!")
return jsonify({"code": 403, "error": "预约日期不得小于当前日期!"})
else:
app.logger.info("预约成功!")
return jsonify({"code": 200, "info": "预约成功!"})
else:
error = "返回数据为空!"
app.logger.error(error)
return jsonify({"code": 403, "error": error})
# 获取当前预约的座位
@app.route('/currently_appointment', methods=['POST'])
def get_currently_appointment():
if request.get_json().get('userNo') != 'null':
user_no = request.get_json().get('userNo')
result = mysql.currently_appointment(user_no)
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
elif result == 'False':
app.logger.warn("当前无预约记录!")
return jsonify({"code": 300, "warn": "当前无预约记录!"})
else:
data = {}
appointments = []
for r in result:
seat = "第 " + str(r[1]) + " 排 第 " + str(r[0]) + " 座"
appointment = {
'seat': seat,
'classroomId': r[2],
'classroomName': r[3],
'startTime': r[4]
}
appointments.append(appointment)
data['appointments'] = appointments
app.logger.info("当前预约记录返回成功!")
return jsonify({"code": 200, "data": data, "info": "当前预约记录返回成功!"})
else:
error = "返回数据为空!"
app.logger.error(error)
return jsonify({"code": 403, "error": error})
# 座位修改
@app.route('/seat_insert', methods=['POST'])
def seat_insert():
if request.get_json().get('seatData') != 'null':
seat_data = request.get_json().get('seatData')
all_data = []
classroom_id = seat_data[0].get('classroomId')
for seat in seat_data:
fk_classroom_id = seat.get('classroomId')
seat_x = seat.get('seatX')
seat_y = seat.get('seatY')
seat_state = seat.get('seatState')
seat_place = seat.get('seatPlace')
data = (fk_classroom_id, seat_x, seat_y, seat_state, seat_place)
all_data.append(data)
# 批量添加座位信息
result = mysql.seat_insert_many(classroom_id, all_data)
if result == 'True':
app.logger.warn("座位添加成功!")
return jsonify({"code": 200, "info": "座位添加成功!"})
elif result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
else:
app.logger.error("返回数据为空!")
return jsonify({"code": 403, "error": "返回数据为空!"})
if __name__ == '__main__':
app.run(threaded=True, debug=True)
| 15,849 | 5,879 |
"""
golden_section_algorithm.py
Returns the reduced uncertainty interval containing the minimizer of the function
func - anonimous function
interval0 - initial uncertainty interval
N_iter - number of iterations
"""
import math
import numpy as np
def golden_section_algorithm_calc_N_iter(interval0, uncertainty_range_desired):
N_iter = math.ceil(math.log(uncertainty_range_desired / (interval0[1] - interval0[0]), 0.618));
return N_iter;
def golden_section_algorithm(func, interval0, N_iter):
rho = (3 - np.sqrt(5)) / 2;
left_limit = interval0[0];
right_limit = interval0[1];
smaller = 'a';
a = left_limit + (1 - rho) * (right_limit - left_limit);
f_at_a = func(a);
for iter_no in range(N_iter):
if (smaller == 'a'):
c = a;
f_at_c = f_at_a;
a = left_limit + rho * (right_limit - left_limit);
f_at_a = func(a);
else:
a = c;
f_at_a = f_at_c;
c = left_limit + (1 - rho) * (right_limit - left_limit);
f_at_c = func(c);
if (f_at_a < f_at_c):
right_limit = c;
smaller = 'a';
else:
left_limit = a;
smaller = 'c';
interval = (left_limit, right_limit);
return interval;
| 1,350 | 458 |
import ipaddress
# =============================================== DEFAULT CONFIGURATION ================================================
# Default port to bind the translator's unicast server socket to.
DEFAULT_UNICAST_SRV_PORT = 9001
# Default address space to pick multicast destination addresses (groups) from for the translated unicast streams.
DEFAULT_MULTICAST_ADDR_SPACE = ipaddress.IPv4Network('232.0.0.0/8')
# Default port to use when forwarding payload received on the translator's unicast server socket as multicast.
DEFAULT_MULTICAST_PORT = 9002
# URL to use when submitting stream information to the Multicast Menu
MULTICASTMENU_ADD_URL = 'https://multicastmenu.herokuapp.com/add/'
# Email address to use when submitting stream information to the Multicast Menu. Lenny has OK'ed using his email address
# until we have a group email.
MULTICASTMENU_EMAIL = 'lenny@juniper.net'
# Number of worker threads dedicated to submitting stream information to the Multicast Menu.
MULTICASTMENU_THREADS = 10
# ======================================================================================================================
| 1,132 | 310 |
import typing as t
import logging
from telegram import Update
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
def start(update: Update, context: CallbackContext) -> None:
update.message.reply_text('Hi!')
def help_command(update: Update, context: CallbackContext) -> None:
update.message.reply_text('Just write me something :)')
def echo(update: Update, context: CallbackContext) -> None:
update.message.reply_text(update.message.text)
class Bot:
updater: Updater
def __init__(self, token: str, text_handler: t.Callable[[str], str] = lambda s: s):
self.updater = Updater(token, use_context=True)
self._text_handler = text_handler
dispatcher = self.updater.dispatcher
dispatcher.add_handler(CommandHandler("start", self.handle_start))
dispatcher.add_handler(CommandHandler("help", self.handle_help_command))
dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, self.handle_text))
def run(self):
self.updater.start_polling()
self.updater.idle()
def handle_text(self, update: Update, context: CallbackContext):
reply = self._text_handler(update.message.text)
update.message.reply_text(reply)
def handle_start(self, update: Update, context: CallbackContext):
update.message.reply_text('Hi!')
def handle_help_command(self, update: Update, context: CallbackContext):
update.message.reply_text('Just write me something :)')
| 1,710 | 530 |
# ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Contains the VectorTypeInfo object"""
import os
import re
import textwrap
import six
import CommonEnvironment
from CommonEnvironment import Interface
from CommonEnvironment import StringHelpers
from Plugins.SharedLibraryPluginImpl.TypeInfo import TypeInfo
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@Interface.staticderived
class VectorTypeInfo(TypeInfo):
# ----------------------------------------------------------------------
# |
# | Public Types
# |
# ----------------------------------------------------------------------
TypeName = Interface.DerivedProperty(re.compile(r"vector\<(?P<type>.+)\>"))
CppType = Interface.DerivedProperty(None)
# ----------------------------------------------------------------------
# |
# | Public Types
# |
# ----------------------------------------------------------------------
def __init__(
self,
*args,
member_type=None,
create_type_info_func=None,
**kwargs
):
if member_type is None:
return
assert create_type_info_func is not None
super(VectorTypeInfo, self).__init__(*args, **kwargs)
if self.IsOptional:
raise NotImplementedError("Optional vectors are not supported at this time")
match = self.TypeName.match(member_type)
assert match, member_type
the_type = match.group("type")
type_info = create_type_info_func(the_type)
assert type_info, the_type
# The content is expressed by a range of pointers.
self._type_info = type_info
self.CppType = "std::tuple<{type} const *, {type} const *>".format(
type=self._type_info.CppType,
)
# ----------------------------------------------------------------------
@Interface.override
def GetInputInfo(self, arg_name, invocation_template):
result = self._type_info.GetInputBufferInfo(arg_name, self._InvocationTemplate)
assert result.InputBufferType is not None, self._type_info
invocation_statements, invocation_tuple = self._ExtractDecoratedInvocationStatements(result.InvocationStatements)
assert len(invocation_tuple) == 2, invocation_tuple
return self.Result(
result.Parameters,
result.ValidationStatements,
"{}{}".format(
"{}\n\n".format(invocation_statements.rstrip()) if invocation_statements else "",
invocation_template.format(
"std::make_tuple({ptr}, {ptr} + {size})".format(
ptr=invocation_tuple[0],
size=invocation_tuple[1],
),
),
),
)
# ----------------------------------------------------------------------
@Interface.override
def GetInputBufferInfo(
self,
arg_name,
invocation_template,
items_var_name=None,
):
# Don't reuse the items var (if it exists)
items_var_name = "{}_items".format(arg_name)
result = self._type_info.GetInputBufferInfo(
"{}_item".format(arg_name),
self._InvocationTemplate,
items_var_name=items_var_name,
)
assert result.InputBufferType is not None, self._type_info
input_parameters = [self.Type("{} const *".format(p.Type), "{}_ptr".format(p.Name)) for p in result.Parameters]
invocation_statements, invocation_tuple = self._ExtractDecoratedInvocationStatements(result.InvocationStatements)
assert not invocation_statements, invocation_statements
# If the input buffer type is a pointer, it means that we don't
# have to transform the input prior to passing it on. If it is not
# a pointer, transformation is required.
if self._IsPointer(result.InputBufferType.Type):
# No transformation is required
buffer_type = self.Type(
"std::vector<std::tuple<{type}, {type}>>".format(
type=result.InputBufferType.Type,
),
"{}_buffer".format(arg_name),
)
buffer_assignment = "{name}_buffer.emplace_back({invocation_ptr}, {invocation_ptr} + {invocation_size});".format(
name=arg_name,
invocation_ptr=invocation_tuple[0],
invocation_size=invocation_tuple[1],
)
validation_suffix = ""
else:
# Transformation is required
buffer_type = self.Type(
"std::vector<{}>".format(result.InputBufferType.Type),
"{}_temp_buffer".format(arg_name),
)
buffer_assignment = "{buffer_name}.emplace_back(std::move({item}));".format(
buffer_name=buffer_type.Name,
item=result.InputBufferType.Name,
)
# We have a vector of the concrete types, but need to pass a vector of tuples
# to the featurizer itself. Create a new vector that has that info.
validation_suffix = textwrap.dedent(
"""\
std::vector<std::tuple<{type}, {type}>> {name}_buffer;
{name}_buffer.reserve({temp_buffer}.size());
for(auto const & {temp_buffer}_item : {temp_buffer})
{name}_buffer.emplace_back({temp_buffer}_item.data(), {temp_buffer}_item.data() + {temp_buffer}_item.size());
""",
).format(
name=arg_name,
type="typename {}::const_pointer".format(result.InputBufferType.Type),
temp_buffer=buffer_type.Name,
)
validation_statements = textwrap.dedent(
"""\
{parameter_validation}
if({items_var_name} == 0) throw std::invalid_argument("'{items_var_name}' is 0");
{buffer_type} {buffer_name};
{buffer_name}.reserve({items_var_name});
while({buffer_name}.size() < {items_var_name}) {{
{references}
{validation_statements}
{invocation_statements}
{buffer_assignment}
{increment_pointers}
}}{validation_suffix}
""",
).format(
parameter_validation="\n".join(
[
"""if({name} == nullptr) throw std::invalid_argument("'{name}' is null");""".format(
name=p.Name,
)
for p in input_parameters
]
),
name=arg_name,
items_var_name=items_var_name,
buffer_type=buffer_type.Type,
buffer_name=buffer_type.Name,
references=StringHelpers.LeftJustify(
"\n".join(
[
"{type}{const_and_ref}{name}(*{name}_ptr);".format(
type=p.Type,
name=p.Name,
const_and_ref=" const &" if not self._IsPointer(p.Type) else "",
)
for p in result.Parameters
]
),
4,
),
validation_statements=StringHelpers.LeftJustify(
result.ValidationStatements.rstrip(),
4,
),
invocation_statements=StringHelpers.LeftJustify(
invocation_statements.rstrip(),
4,
),
buffer_assignment=buffer_assignment,
increment_pointers=StringHelpers.LeftJustify(
"\n".join(["++{};".format(p.Name) for p in input_parameters]),
4,
),
validation_suffix="" if not validation_suffix else "\n\n{}".format(validation_suffix),
)
return self.Result(
input_parameters + [self.Type("size_t", items_var_name)],
validation_statements,
invocation_template.format(
"{name}_buffer.data(), {name}_buffer.size()".format(
name=arg_name,
),
),
input_buffer_type=self.Type(buffer_type, "{}_buffer".format(arg_name)),
)
# ----------------------------------------------------------------------
@Interface.override
def GetOutputInfo(
self,
arg_name,
result_name="result",
suppress_pointer=False,
):
result = self._type_info.GetOutputInfo(
"{}_item".format(arg_name),
result_name="{}_item".format(result_name),
)
input_parameters = [self.Type("{}*".format(p.Type), "{}_ptr".format(p.Name)) for p in result.Parameters]
if len(result.Parameters) == 1 and result.Parameters[0].Type == "bool *":
# We can't take a reference to bools within a vector, as the values are stored as bits rather than
# bool types.
for_loop = "for(bool {result_name}_item : {result_name})".format(
result_name=result_name,
)
else:
for_loop = "for(auto const & {result_name}_item : {result_name})".format(
result_name=result_name,
)
return self.Result(
input_parameters + [self.Type("size_t *", "{}_items".format(arg_name))],
textwrap.dedent(
"""\
{statements}
if({name}_items == nullptr) throw std::invalid_argument("'{name}_items' is null");
""",
).format(
statements="\n".join(
[
"""if({name} == nullptr) throw std::invalid_argument("'{name}' is null");""".format(
name=p.Name,
)
for p in input_parameters
]
),
name=arg_name,
),
textwrap.dedent(
"""\
if({result_name}.empty()) {{
{empty_allocations}
}}
else {{
// TODO: There are potential memory leaks if allocation fails
{allocations}
{initial_assignments}
{for_loop} {{
{validations}
{statements}
{ptr_increments}
}}
}}
*{name}_items = {result_name}.size();
""",
).format(
name=arg_name,
result_name=result_name,
empty_allocations=StringHelpers.LeftJustify(
"\n".join(
[
"*{}_ptr = nullptr;".format(p.Name)
for p in result.Parameters
]
),
4,
),
allocations=StringHelpers.LeftJustify(
"\n".join(
[
"*{name}_ptr = new {type}[{result_name}.size()];".format(
name=p.Name,
type=self._StripPointer(p.Type),
result_name=result_name,
)
for p in result.Parameters
]
),
4,
),
initial_assignments=StringHelpers.LeftJustify(
"\n".join(
[
"{type} {name}(*{name}_ptr);".format(
name=p.Name,
type=p.Type,
)
for p in result.Parameters
]
),
4,
),
for_loop=for_loop,
validations=StringHelpers.LeftJustify(result.ValidationStatements, 8).rstrip(),
statements=StringHelpers.LeftJustify(result.InvocationStatements, 8).rstrip(),
ptr_increments=StringHelpers.LeftJustify(
"\n".join(["++{};".format(p.Name) for p in result.Parameters]),
8,
),
),
)
# ----------------------------------------------------------------------
@Interface.override
def GetDestroyOutputInfo(
self,
arg_name="result",
):
result = self.GetOutputInfo(
arg_name,
)
input_parameters = [self.Type(self._StripPointer(p.Type), p.Name) for p in result.Parameters]
assert input_parameters[-1].Type == "size_t", input_parameters[-1].Type
assert input_parameters[-1].Name.endswith("_items"), input_parameters[-1].Name
pointer_parameters = input_parameters[:-1]
# Create the destroy statements
destroy_result = self._type_info.GetDestroyOutputInfo("{}_destroy_item".format(arg_name))
if destroy_result is not None:
assert len(destroy_result.Parameters) == len(result.Parameters) - 1
destroy_statements = textwrap.dedent(
"""\
{variable_statements}
while({name}_items--) {{
{assignment_statements}
{delete_statements}
{increment_statements}
}}
""",
).format(
name=arg_name,
variable_statements="\n".join(
[
"{type} this_{name}({name});".format(
type=p.Type,
name=p.Name,
)
for p in pointer_parameters
],
),
assignment_statements=StringHelpers.LeftJustify(
"\n".join(
[
"""{destroy_type} const & {destroy_name}(*this_{parameter_name});""".format(
destroy_type=destroy_p.Type,
destroy_name=destroy_p.Name,
parameter_name=standard_p.Name,
)
for destroy_p, standard_p in zip(destroy_result.Parameters, pointer_parameters)
]
),
4,
),
delete_statements=StringHelpers.LeftJustify(
textwrap.dedent(
"""\
{}
{}
""",
).format(
destroy_result.ValidationStatements.rstrip() if destroy_result.ValidationStatements else "// No validation statements",
destroy_result.InvocationStatements.rstrip(),
),
4,
),
increment_statements=StringHelpers.LeftJustify(
"\n".join([ "++this_{};".format(p.Name) for p in pointer_parameters]),
4,
),
)
else:
destroy_statements = "// No destroy statements"
return self.Result(
input_parameters,
textwrap.dedent(
"""\
if({initial_ptr_name} != nullptr && {name}_items == 0) throw std::invalid_argument("'{name}_items' is 0");
if({initial_ptr_name} == nullptr && {name}_items != 0) throw std::invalid_argument("'{name}_items' is not 0");
{ptr_validations}
""",
).format(
initial_ptr_name=input_parameters[0].Name,
name=arg_name,
ptr_validations="\n".join(
[
"""if(bool({name}) != bool({initial_ptr_name})) throw std::invalid_argument("'{name}' is not internally consistent");""".format(
initial_ptr_name=input_parameters[0].Name,
name=p.Name,
)
for p in input_parameters[1:]
]
),
),
textwrap.dedent(
"""\
if({initial_ptr_name} != nullptr) {{
{statements}
{delete_ptrs}
}}
""",
).format(
initial_ptr_name=input_parameters[0].Name,
statements=StringHelpers.LeftJustify(destroy_statements, 4).rstrip(),
delete_ptrs=StringHelpers.LeftJustify(
"\n".join(
[
"delete [] {};".format(p.Name)
for p in pointer_parameters
]
),
4,
),
),
)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@staticmethod
def _StripPointer(value):
value = value.strip()
if value.endswith("const"):
value = value[:-len("const")].rstrip()
assert value.endswith("*"), value
return value[:-1].rstrip()
# ----------------------------------------------------------------------
@staticmethod
def _IsPointer(value):
value = value.strip()
if value.endswith("const"):
value = value[:-len("const")].rstrip()
return value.endswith("*")
| 19,059 | 4,886 |
from .monte_carlo_prediction import *
from .monte_carlo_control import *
| 73 | 26 |
import os
import inspect
from copy import deepcopy
from tempfile import TemporaryDirectory
from functools import wraps
from ..server import run_threaded
class RedisServer:
def __init__(self):
self.stop_loop = None
self.tempdir = None
self.thread = None
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
def __enter__(self):
assert self.thread is None
self.tempdir = TemporaryDirectory()
socket_file = os.path.join(self.tempdir.name, 'redis.sock')
redis, self.thread, self.stop_loop = run_threaded(unix_domain_socket=socket_file)
class RedisProxy:
def __init__(self):
class Ext:
pass
self.ext = Ext()
def extend(self, attr, value):
setattr(self.ext, attr, value)
@property
def dict(self):
return deepcopy(redis.keys)
def __len__(self):
return len(redis.keys)
def __getitem__(self, key):
if isinstance(key, str):
key = key.encode()
return deepcopy(redis.keys[key])
@property
def sock(self):
return socket_file
return RedisProxy()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop_loop()
self.thread.join()
self.tempdir.cleanup()
self.stop_loop = None
self.tempdir = None
self.thread = None
def local_redis(func=None):
if func is None:
return RedisServer()
if inspect.isfunction(func):
@wraps(func)
def wrapper(*args, **kwargs):
with RedisServer():
return func(*args, **kwargs)
return wrapper
raise ValueError()
| 1,935 | 543 |
import time
import requests
import urllib3
from lxml import etree
from requests.models import Response
from requests.packages.urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from fake_useragent import UserAgent
ua = UserAgent()
class WebRequest(object):
name = "web_request"
def __init__(self, *args, **kwargs):
self.response = Response()
def req_header(self):
_header = {'User-Agent': ua.random,
'Accept': '*/*',
'Connection': 'keep-alive',
'Accept-Language': 'zh-CN,zh;q=0.8'}
return _header
def get(self, url, header=None, retry_time=3, retry_interval=5, timeout=10, *args, **kwargs):
"""
get method
:param url: target url
:param header: headers
:param retry_time: retry time
:param retry_interval: retry interval
:param timeout: network timeout
:return:
"""
headers = self.req_header()
if header and isinstance(header, dict):
headers.update(header)
while True:
try:
self.response = requests.get(
url
, headers=headers
, timeout=timeout
, verify=False
, *args
, **kwargs
)
return self
except Exception as e:
# self.log.error("requests: %s error: %s" % (url, str(e)))
retry_time -= 1
if retry_time <= 0:
resp = Response()
resp.status_code = 200
return self
# self.log.info("retry %s second after" % retry_interval)
time.sleep(retry_interval)
@property
def tree(self):
if self.response.status_code == 200:
return etree.HTML(self.response.content)
else:
return ""
@property
def text(self):
if self.response.status_code == 200:
return self.response.text
else:
return ""
@property
def json(self):
try:
if self.response.status_code == 200:
return self.response.json()
else:
return ""
except Exception as e:
return {}
| 2,492 | 679 |
import os
import shutil
import click
from flask import current_app
from flask.cli import AppGroup
### CLI for management of additional resources
res_cli = AppGroup('resources', short_help='Manage additional resources used by SpellvarDetection.')
@res_cli.command('list')
def list_resources():
"List existing resources."
click.echo('\n'.join(os.listdir(current_app.config['RESOURCES_PATH'])))
@res_cli.command('add')
@click.argument('filename')
def list_resources(filename):
"Add a resource."
if os.path.exists(os.path.join(current_app.config['RESOURCES_PATH'], os.path.basename(filename))):
click.echo('File does already exist in resource folder.')
else:
try:
newname = shutil.copy(filename, current_app.config['RESOURCES_PATH'])
except IOError as e:
print(e)
else:
click.echo('Added ' + os.path.basename(newname) + ' to the resources.')
@res_cli.command('remove')
@click.argument('filename')
def list_resources(filename):
"Remove a resources."
try:
os.remove(os.path.join(current_app.config['RESOURCES_PATH'], filename))
except IOError as e:
print(e)
else:
click.echo('Removed ' + filename + ' from the resources.')
def init_app(app):
app.cli.add_command(res_cli)
| 1,306 | 407 |
#!/usr/bin/env python
"""
summarizeRankings.py
dent earl, dearl (a) soe ucsc edu
27 April 2011
Python script to take a set of rankings files and aggregate their results.
"""
##############################
# Copyright (C) 2009-2011 by
# Dent Earl (dearl@soe.ucsc.edu, dent.earl@gmail.com)
# Benedict Paten (benedict@soe.ucsc.edu, benedict.paten@gmail.com)
# Mark Diekhans (markd@soe.ucsc.edu)
# ... and other members of the Reconstruction Team of David Haussler's
# lab (BME Dept. UCSC).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##############################
from optparse import OptionParser
import os
import sys
import signal # deal with broken pipes
signal.signal( signal.SIGPIPE, signal.SIG_DFL ) # broken pipes
class Assembly:
def __init__(self):
self.teamName = ''
self.name = ''
self.ranks = []
self.values = []
class Tab:
def __init__(self):
self.name = ''
self.rank = 0
self.value = -1.0
def initOptions( parser ):
parser.add_option( '--overall', dest='overall', default=False, action='store_true',
help=('Instead of ranking based on within Team, '
'ranks instead are global. default=%default'))
parser.add_option( '--retainValues', dest='retainValues', default=False,
action='store_true',
help=('Stores the second column of the tab and outputs the value in '
'parenthesis following the ranking.'))
parser.add_option( '--mode', dest='mode', default='s',
type='string',
help=('Mode can be "s" for a sum or "h" for harmonic mean.'))
def checkOptions( args, options, parser ):
if len( args ) < 1:
parser.error('no files in positional arguments. Input a filename.')
for f in args:
if not os.path.exists( f ):
parser.error('file "%s" does not exist.\n' % f )
if not f.endswith('.tab'):
parser.error('file "%s" does not end in ".tab".\n' % f )
options.mode = options.mode.lower()
if options.mode not in ('s', 'h'):
parser.error( 'Unrecognized --mode %s. Choose either "s" for '
'sum or "h" for harmonic.' % options.mode )
def readFiles( args, options ):
assemblies = {}
options.fileNames = []
for aFile in args:
options.fileNames.append( os.path.basename( aFile ) )
f = open( aFile, 'r' )
rank = 0
count = 0
prevValue = - sys.maxint
for line in f:
line = line.strip()
if line.startswith('#'):
continue
a = Tab()
d = line.split()
a.name = d[0]
a.value = d[1]
if a.value == prevValue:
count += 1
else:
count += 1
rank = count
a.rank = rank
prevValue = a.value
if a.name not in assemblies:
assemblies[ a.name ] = []
assemblies[ a.name ].append( a )
return assemblies
def printHeader( options ):
sys.stdout.write( '#Assembly\tOverall' )
for f in options.fileNames:
if os.path.basename( f ).endswith('.tab'):
name = os.path.basename( f )[:-4]
else:
name = os.path.basename( f )
if options.retainValues:
name += ' (value)'
sys.stdout.write('\t%s' % name )
sys.stdout.write('\n')
def reportRank( assemblies, options ):
printHeader( options )
if options.overall:
# ranking is between all assemblies
overallDict = {}
for t in assemblies:
overallDict[t] = 0
for a in t:
overallDict[t] += a.rank
ranked = sorted( overallDict, key=lambda x: overallDict[x] )
for r in ranked:
print '%s\t%d' % ( r, overallDict[r] )
for z in assemblies[r]:
if options.retainValue:
sys.stdout.write('\t%d (%s)' % (z.rank, z.value))
else:
sys.stdout.write('\t%d' % z.rank )
sys.stdout.write('\n')
else:
# ranking is within teams
teams = {}
for a in assemblies:
if a[0] not in teams:
teams[ a[0] ] = []
newAssemb = Assembly()
newAssemb.teamName = a[0]
newAssemb.name = a
for z in assemblies[ a ]:
newAssemb.ranks.append( z.rank )
newAssemb.values.append( z.value )
teams[ a[0] ].append( newAssemb )
# sort alphabetically
aemst = teams.keys()
aemst.sort()
for t in aemst:
print ''
if options.mode == 's':
ranked = sorted( teams[ t ], key=lambda x: sum( x.ranks ))
else:
ranked = sorted( teams[ t ], key=lambda x: harmonic( x.ranks ))
for r in ranked:
if options.mode == 's':
sys.stdout.write( '%s\t%d' % ( r.name, sum( r.ranks ) ))
else:
sys.stdout.write( '%s\t%.2f' % ( r.name, harmonic( r.ranks ) ))
for j in xrange( 0, len(r.ranks)):
if options.retainValues:
sys.stdout.write('\t%d (%s)' % (r.ranks[j], r.values[j] ))
else:
sys.stdout.write('\t%d' % r.ranks[j] )
sys.stdout.write('\n')
def harmonic( ranks ):
""" returns the harmonic mean of the assembly's ranks
"""
h = 0
for r in ranks:
h += 1.0 / r
h = 1.0 / h
h *= len( ranks )
return h
def main():
usage = ( 'usage: %prog [options] rankingFile1.tab rankingFile2.tab rankingFile3.tab\n\n'
'%prog takes in ranking files and reports the best overall or best within a Team.')
parser = OptionParser( usage=usage )
initOptions( parser )
options, args = parser.parse_args()
checkOptions( args, options, parser )
assemblies = readFiles( args, options )
reportRank( assemblies, options )
if __name__ == '__main__':
main()
| 6,919 | 2,239 |
# Generated by Django 2.2.13 on 2020-10-21 09:41
from django.db import migrations
import logging
trade_data = [
{
"Country code": "CO",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-andean-countries-trade-agreement",
"Mendel agreement label": "ANDEAN-COUNTRIES",
"TWUK content template label": "ANDEAN-COUNTRIES",
},
{
"Country code": "EC",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-andean-countries-trade-agreement",
"Mendel agreement label": "ANDEAN-COUNTRIES",
"TWUK content template label": "ANDEAN-COUNTRIES",
},
{
"Country code": "PE",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-andean-countries-trade-agreement",
"Mendel agreement label": "ANDEAN-COUNTRIES",
"TWUK content template label": "ANDEAN-COUNTRIES",
},
{
"Country code": "AG",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "BB",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "BZ",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "DM",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "DO",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "GD",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "GY",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "JM",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "KN",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "LC",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "VC",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "SR",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "BS",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "TT",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/cariforum-uk-economic-partnership-agreement",
"Mendel agreement label": "CARIFORUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "CR",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-central-america-association-agreement",
"Mendel agreement label": "CENTRAL-AMERICA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "SV",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-central-america-association-agreement",
"Mendel agreement label": "CENTRAL-AMERICA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "GT",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-central-america-association-agreement",
"Mendel agreement label": "CENTRAL-AMERICA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "HN",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-central-america-association-agreement",
"Mendel agreement label": "CENTRAL-AMERICA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "NI",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-central-america-association-agreement",
"Mendel agreement label": "CENTRAL-AMERICA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "PA",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-central-america-association-agreement",
"Mendel agreement label": "CENTRAL-AMERICA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "CL",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-chile-association-agreement",
"Mendel agreement label": "CHILE",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "MG",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/esa-uk-economic-partnership-agreement-epa--2",
"Mendel agreement label": "ESA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "MU",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/esa-uk-economic-partnership-agreement-epa--2",
"Mendel agreement label": "ESA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "SC",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/esa-uk-economic-partnership-agreement-epa--2",
"Mendel agreement label": "ESA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "ZW",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/esa-uk-economic-partnership-agreement-epa--2",
"Mendel agreement label": "ESA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "CI",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-AGR-SIGNED-NO-LINK",
"TWUK content template label": "EU-AGR-SIGNED-NO-LINK",
},
{
"Country code": "UA",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-AGR-SIGNED-NO-LINK",
"TWUK content template label": "EU-AGR-SIGNED-NO-LINK",
},
{
"Country code": "AT",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "BE",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "BG",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "HR",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "CY",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "CZ",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "DK",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "EE",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "FI",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "FR",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "DE",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "GR",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "HU",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "IE",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "IT",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "LV",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "LT",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "LU",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "MT",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "NL",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "PL",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "PT",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "RO",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "SK",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "SI",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "ES",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "SE",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-MEMBER",
"TWUK content template label": "EU-MEMBER",
},
{
"Country code": "DZ",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-NON-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-NON-WTO",
},
{
"Country code": "AD",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-NON-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-NON-WTO",
},
{
"Country code": "BA",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-NON-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-NON-WTO",
},
{
"Country code": "SZ",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-NON-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-NON-WTO",
},
{
"Country code": "MK",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-NON-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-NON-WTO",
},
{
"Country code": "SM",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-NON-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-NON-WTO",
},
{
"Country code": "XS",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-NON-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-NON-WTO",
},
{
"Country code": "AL",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-WTO",
},
{
"Country code": "CM",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-WTO",
},
{
"Country code": "CA",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-WTO",
},
{
"Country code": "EG",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-WTO",
},
{
"Country code": "GH",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-WTO",
},
{
"Country code": "KE",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-WTO",
},
{
"Country code": "MX",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-WTO",
},
{
"Country code": "MD",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-WTO",
},
{
"Country code": "ME",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-WTO",
},
{
"Country code": "SG",
"GOVUK FTA URL": "",
"Mendel agreement label": "EU-NOAGR-FOR-EXIT-WTO",
"TWUK content template label": "EU-NOAGR-FOR-EXIT-WTO",
},
{
"Country code": "FO",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-faroe-islands-free-trade-agreement-fta",
"Mendel agreement label": "FAROE-ISLANDS",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "GE",
"GOVUK FTA URL": "https://www.gov.uk/government/publications/ukgeorgia-strategic-partnership-and-cooperation-agreement-cs-georgia-no12019",
"Mendel agreement label": "GEORGIA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "IS",
"GOVUK FTA URL": "",
"Mendel agreement label": "ICELAND-NORWAY",
"TWUK content template label": "ICELAND-NORWAY",
},
{
"Country code": "NO",
"GOVUK FTA URL": "",
"Mendel agreement label": "ICELAND-NORWAY",
"TWUK content template label": "ICELAND-NORWAY",
},
{
"Country code": "IL",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-israel-trade-and-partnership-agreement",
"Mendel agreement label": "ISRAEL",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "JP",
"GOVUK FTA URL": "",
"Mendel agreement label": "JAPAN",
"TWUK content template label": "JAPAN",
},
{
"Country code": "JO",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-jordan-association-agreement",
"Mendel agreement label": "JORDAN",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "XK",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-kosovopartnership-trade-and-cooperationagreement",
"Mendel agreement label": "KOSOVO",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "LB",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-lebanon-association-agreement",
"Mendel agreement label": "LEBANON",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "LI",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-switzerland-liechtenstein-trade-agreement",
"Mendel agreement label": "LIECHTENSTEIN",
"TWUK content template label": "CH-LI",
},
{
"Country code": "MA",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-morocco-association-agreement",
"Mendel agreement label": "MOROCCO",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "FJ",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-pacific-economic-partnership-agreement",
"Mendel agreement label": "PACIFIC-STATES",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "PG",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-pacific-economic-partnership-agreement",
"Mendel agreement label": "PACIFIC-STATES",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "PS",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-palestinian-authority-political-trade-and-partnership-agreement",
"Mendel agreement label": "PALESTINIAN-AUTHORITY",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "BW",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/sacum-uk-economic-partnership-agreement-epa",
"Mendel agreement label": "SACUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "LS",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/sacum-uk-economic-partnership-agreement-epa",
"Mendel agreement label": "SACUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "MZ",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/sacum-uk-economic-partnership-agreement-epa",
"Mendel agreement label": "SACUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "NA",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/sacum-uk-economic-partnership-agreement-epa",
"Mendel agreement label": "SACUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "ZA",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/sacum-uk-economic-partnership-agreement-epa",
"Mendel agreement label": "SACUM",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "KR",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-south-korea-trade-agreement",
"Mendel agreement label": "SOUTH-KOREA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "CH",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-switzerland-trade-agreement",
"Mendel agreement label": "SWITZERLAND",
"TWUK content template label": "CH-LI",
},
{
"Country code": "TN",
"GOVUK FTA URL": "https://www.gov.uk/government/collections/uk-tunisia-association-agreement",
"Mendel agreement label": "TUNISIA",
"TWUK content template label": "EU-AGR-SIGNED-LINK",
},
{
"Country code": "TR",
"GOVUK FTA URL": "",
"Mendel agreement label": "TURKEY",
"TWUK content template label": "TURKEY",
},
{
"Country code": "VN",
"GOVUK FTA URL": "",
"Mendel agreement label": "VN",
"TWUK content template label": "VN",
},
{
"Country code": "AF",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "AO",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "AR",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "AM",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "AU",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "BH",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "BD",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "BJ",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "BO",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "BR",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "BN",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "BF",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "BI",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "KH",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "CV",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "CF",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "TD",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "CN",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "CG",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "CD",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "CU",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "DJ",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "GA",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "GN",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "GW",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "HT",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "IN",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "KZ",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "KW",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "KG",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "LA",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "LR",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "MW",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "MY",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "MV",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "ML",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "MR",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "MN",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "MM",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "NP",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "NZ",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "NE",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "NG",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "OM",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "PK",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "PY",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "PH",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "QA",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "RU",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "RW",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "WS",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "SA",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "SN",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "SL",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "SB",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "LK",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "TJ",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "TZ",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "TH",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "GM",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "TG",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "UG",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "AE",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "US",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "UY",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "VU",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "VE",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "YE",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "ZM",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "HK",
"GOVUK FTA URL": "",
"Mendel agreement label": "WTO",
"TWUK content template label": "WTO",
},
{
"Country code": "EU",
"GOVUK FTA URL": "",
"Mendel agreement label": "",
"TWUK content template label": "EU-MEMBER",
},
]
def populate_trade_scenarios(apps, schema_editor):
Country = apps.get_model("countries", "Country")
for item in trade_data:
country_code = item["Country code"]
trade_scenario = item["TWUK content template label"]
content_url = item["GOVUK FTA URL"]
try:
country = Country.objects.get(country_code=country_code)
except Country.DoesNotExist:
logging.error("Could not find country with country_code=%s", country_code)
else:
country.scenario = trade_scenario
country.content_url = content_url
country.save()
def unpopulate_trade_scenarios(apps, schema_editor):
Country = apps.get_model("countries", "Country")
Country.objects.all().update(scenario="WTO")
class Migration(migrations.Migration):
dependencies = [("countries", "0003_auto_20201021_1041")]
operations = [
migrations.RunPython(populate_trade_scenarios, unpopulate_trade_scenarios)
]
| 34,014 | 12,043 |
from __future__ import absolute_import
import unittest
import nose
import time
from standup_and_prosper_sdk import ApiClient, StandupsApi
class GetStandupThreadsTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def get_standup_threads(self):
# Enable test
# def test_get_standup_threads(self):
"""Test case for getting standup threads
Get the standup threads
"""
access_key = "eyARB5k-..."
team_id = ""
standup_id = ""
api_client = ApiClient(access_key)
api = StandupsApi(api_client)
threadResponse = api.get_standup_threads(team_id, standup_id)
print(threadResponse.threads)
pass
if __name__ == '__main__':
unittest.main() | 719 | 247 |
from apistar import App
from apistar.commands import create_tables
from pharmap.commands import sample_data
from pharmap.storage.models import * # noqa
from pharmap.routes import routes
from pharmap.settings import settings
commands = [
create_tables,
sample_data,
]
app = App(routes=routes, settings=settings, commands=commands)
| 343 | 102 |
import pytest
@pytest.fixture
def star_quality_metric(pipeline, analysis_step_run, bam_file):
return {
'status': "finished",
'pipeline': pipeline['uuid'],
'step_run': analysis_step_run['uuid'],
'schema_version': '2',
'quality_metric_of': [bam_file['uuid']]
}
def test_star_quality_metric_upgrade(registry, star_quality_metric,
bam_file, lab, award):
from snovault import UPGRADER
upgrader = registry[UPGRADER]
value = upgrader.upgrade('star_quality_metric',
star_quality_metric, registry=registry,
current_version='2', target_version='3')
assert value['lab'] == lab['@id'] and value['award'] == award['@id']
| 771 | 237 |
from typing import List
import numpy as np
import cv2
import scipy.signal
def find_line(image: np.ndarray, window_size: int = 30) -> np.ndarray:
"""Extracts a single line from the image"""
image_inverted = cv2.bitwise_not(image)
image_as_column = np.sum(image_inverted, axis=1)
window_values = [np.sum(image_as_column[idx:idx + window_size])
for idx in range(image_as_column.shape[0])]
best_window_start = np.argmax(window_values)
start = max(best_window_start, 0)
end = min(best_window_start + window_size, image_as_column.shape[0])
return image[start:end]
def find_lines(image: np.ndarray) -> List[np.ndarray]:
"""Split a cell image into multiple text lines."""
window_size = 30
image_blurred = cv2.medianBlur(image, 5)
image_inverted = cv2.bitwise_not(image_blurred)
image_squeezed = np.sum(image_inverted, axis=1)
image_horizontal_squared_diffs = \
np.sum(np.square(np.diff(image_inverted, axis=1)), axis=1)
image_squeezed = image_squeezed + image_horizontal_squared_diffs
gaussian_window = scipy.signal.windows.gaussian(window_size, 5)
values = np.convolve(image_squeezed, gaussian_window, 'same')
value_diffs = np.diff(values)
diff_signs = np.sign(value_diffs)
sign_diffs = np.diff(diff_signs)
local_maxima = [i for i, sign_diff in enumerate(sign_diffs) if sign_diff == -2]
local_maxima = [local_maximum for local_maximum in local_maxima
if local_maximum > 8 and local_maximum + 8 < image.shape[0]]
lines = []
for local_maximum in local_maxima:
start = max(local_maximum - 15, 0)
end = min(local_maximum + 15, image.shape[0])
lines.append(image[start:end])
return lines
| 1,759 | 646 |
package = 'org.bonadza.openrtb'
model = {
'bidResponse' : {
'id' : 'str',
'seatbid': [{
'bid' : [{
'id' : 'str',
'impid' : 'str',
'price' : 'float',
'adid' : 'str',
'nurl' : 'str',
'adm' : 'str',
'adomain' : 'str[]',
'bundle' : 'str',
'iurl' : 'str',
'cid' : 'str',
'crid' : 'str',
'cat' : 'str[]',
'attr' : 'int[]',
'dealid' : 'str',
'w' : 'int',
'h' : 'int',
'ext' : 'ext'
}],
'seat' : 'str',
'group' : 'int',
'ext' : 'ext'
}],
'bidid' : 'str',
'cur' : 'str',
'customdata' : 'str',
'nbr' : 'int',
'ext' : 'ext'
},
'bidRequest' : {
'id' : 'str',
'imp' : [{
'id' : 'str',
'banner' : {
'w' : 'int',
'h' : 'int',
'wmax' : 'int',
'hmax' : 'int',
'wmin' : 'int',
'hmin' : 'int',
'id' : 'str',
'btype' : 'int[]',
'battr' : 'int[]',
'pos' : 'int',
'mimes' : 'str[]',
'topframe' : 'int',
'expdir' : 'int[]',
'api' : 'int[]',
'ext' : 'ext',
},
'video' : {
'mimes' : 'str[]',
'minduration' : 'int',
'maxduration' : 'int',
'protocol' : 'int',
'protocols' : 'int[]',
'w' : 'int',
'h' : 'int',
'startdelay' : 'int',
'linearity' : 'int',
'sequence' : 'int',
'battr' : 'int[]',
'maxextended' : 'int',
'minbitrate' : 'int',
'maxbitrate' : 'int',
'boxingallowed' : 'int',
'playbackmethod' : 'int[]',
'delivery' : 'int[]',
'pos' : 'int',
# 'companionad' : ''
'api' : 'int[]',
'ext' : 'ext',
'companiontype' : 'int[]'
},
'native' : {
'request' : 'str[]',
'ver' : 'str',
'api' : 'int[]',
'battr' : 'int[]',
'ext' : 'ext'
},
'displaymanager' : 'str',
'displaymanagerver' : 'str',
'instl' : 'int',
'tagid' : 'str',
'bidfloor' : 'float',
'bidfloorcur' : 'str',
'secure' : 'int',
'iframebuster' : 'str[]',
'pmp' : {
'private_auction' : 'int',
'deals' : [{
'id' : 'str',
'bidfloor' : 'float',
'bidfloorcur' : 'str',
'at' : 'int',
'wseat' : 'str[]',
'wadomain' : 'str[]',
'ext' : 'ext'
}],
'ext' : 'ext'
},
'ext' : 'ext'
}],
'site' : {
'id' : 'str',
'name' : 'str',
'domain' : 'str',
'cat' : 'str[]',
'sectioncat' : 'str[]',
'pagecat' : 'str[]',
'page' : 'str',
'ref' : 'str',
'search' : 'str',
'mobile' : 'int',
'privacypolicy' : 'int',
'publisher' : {
'id' : 'str',
'name' : 'str',
'cat' : 'str[]',
'domain' : 'str',
'ext' : 'ext'
},
'content' : {
},
'keywords' : 'str',
'ext' : 'ext'
},
'app': {
'id' : 'str',
'name' : 'str',
'bundle' : 'str',
'domain' : 'str',
'storeurl' : 'str',
'cat' : 'str[]',
'sectioncat' : 'str[]',
'pagecat' : 'str[]',
'ver' : 'str',
'privacypolicy' : 'int',
'paid' : 'int',
'publisher' : {},
'content' : {},
'keywords' : 'str',
'ext' : 'ext',
},
'device' : {
'ua' : 'str',
'geo' : {
'lat' : 'float',
'lon' : 'float',
'type' : 'int',
'country' : 'str',
'region' : 'str',
'regionfips104' : 'str',
'metro' : 'str',
'city' : 'str',
'zip' : 'str',
'utcoffset' : 'int',
'ext' : 'ext',
},
'dnt' : 'int',
'lmt' : 'int',
'ip' : 'str',
'ipv6' : 'str',
'devicetype' : 'int',
'make' : 'str',
'model' : 'str',
'os' : 'str',
'osv' : 'str',
'hwv' : 'str',
'osv' : 'str',
'w' : 'int',
'h' : 'int',
'js' : 'int',
'ppi' : 'int',
'pxratio' : 'float',
'flashver' : 'str',
'language' : 'str',
'carrier' : 'str',
'connectiontype' : 'int',
'ifa' : 'str',
'didsha1' : 'str',
'didmd5' : 'str',
'dpidsha1' : 'str',
'dpidmd5' : 'str',
'macsha1' : 'str',
'macmd5' : 'str',
'ext' : 'ext',
},
'user' : {
'id' : 'str',
'buyeruid' : 'str',
'yob' : 'int',
'gender' : 'str',
'keywords' : 'str',
'customdata' : 'str',
'geo' : {},
'data' : [{
'id' : 'str',
'name' : 'str',
'segment' : [{
'id' : 'str',
'name' : 'str',
'value' : 'str',
'ext' : 'ext'
}],
'ext' : 'ext'
}],
'ext' : 'ext',
},
'test' : 'int',
'at' : 'int',
'tmax' : 'int',
'wseat' : 'str[]',
'allimps' : 'int',
'cur' : 'str[]',
'bcat' : 'str[]',
'badv' : 'str[]',
'regs' : {
'coppa' : 'int',
'ext' : 'ext'
},
'ext' : 'ext'
}
} | 4,406 | 2,599 |
if __name__ == '__main__':
n = int(input())
qus=tuple(map(int,input().split()))
print(hash(qus))
| 109 | 44 |
from StringIO import StringIO
textinfile = '''Given$a$text$file$of$many$lines,$where$fields$within$a$line$
are$delineated$by$a$single$'dollar'$character,$write$a$program
that$aligns$each$column$of$fields$by$ensuring$that$words$in$each$
column$are$separated$by$at$least$one$space.
Further,$allow$for$each$word$in$a$column$to$be$either$left$
justified,$right$justified,$or$center$justified$within$its$column.'''
j2justifier = dict(L=str.ljust, R=str.rjust, C=str.center)
def aligner(infile, justification = 'L'):
''' \
Justify columns of textual tabular input where the row separator is the newline
and the field separator is a 'dollar' character.
justification can be L, R, or C; (Left, Right, or Centered).
Return the justified output as a string
'''
assert justification in j2justifier, "justification can be L, R, or C; (Left, Right, or Centered)."
justifier = j2justifier[justification]
fieldsbyrow= [line.strip().split('$') for line in infile]
# pad to same number of fields per row
maxfields = max(len(row) for row in fieldsbyrow)
fieldsbyrow = [fields + ['']*(maxfields - len(fields))
for fields in fieldsbyrow]
# rotate
fieldsbycolumn = zip(*fieldsbyrow)
# calculate max fieldwidth per column
colwidths = [max(len(field) for field in column)
for column in fieldsbycolumn]
# pad fields in columns to colwidth with spaces
fieldsbycolumn = [ [justifier(field, width) for field in column]
for width, column in zip(colwidths, fieldsbycolumn) ]
# rotate again
fieldsbyrow = zip(*fieldsbycolumn)
return "\n".join( " ".join(row) for row in fieldsbyrow)
for align in 'Left Right Center'.split():
infile = StringIO(textinfile)
print "\n# %s Column-aligned output:" % align
print aligner(infile, align[0])
| 1,814 | 600 |
#!/usr/bin/env python3
import argparse
import os
import re
import sys
def readSource(source):
data = open(source).read()
parts = re.split('(\\$[-.\\w]+)', data)
for idx, chunk in enumerate(parts):
if chunk.startswith('$') and len(chunk) > 1:
parts[idx] = readSource(os.path.join(os.path.dirname(source), chunk[1:] + '.glsl'))
return ''.join(parts)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Preprocess glsl files to handle includes in the same way '
'as shader-loader. The output of this can sent to glslangValidator.')
parser.add_argument('source', help='Source file')
args = parser.parse_args()
data = readSource(args.source)
sys.stdout.write(data)
| 760 | 244 |
import os
import json
_location = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(_location, 'small.json')) as f:
fixtures = json.load(f)
with open(os.path.join(_location, '1000.json')) as f:
fixtures['1000'] = json.load(f)
| 286 | 117 |
"""
Views.py
Author: Will Larson
Contact: lethain@gmail.com
Contains one custom view for displaying articles.
Mostly necessary to presort the articles in order
of descending size.
"""
import datetime, time, random, cgi, md5
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponseRedirect
from django.conf import settings
from django.core.paginator import QuerySetPaginator
from lifeflow.models import Series, Flow, Entry, Comment
from lifeflow.forms import CommentForm
def server_error(request):
return render_to_response('500.html',{},RequestContext(request,{}))
def articles(request):
object_list = Series.objects.all()
return render_to_response('lifeflow/articles.html', {'object_list' : object_list},RequestContext(request, {}))
def comments(request, entry_id=None, parent_id=None):
def make_identifier(id, time):
secret = getattr(settings, 'SECRET_KEY')
time = time[:-4]
data = "%s%s%s%s" % ("lifeflow", id, time, secret)
return md5.md5(data).hexdigest()
# if an entry ID has been posted, use that
if request.POST.has_key('entry_id'):
id = int(request.POST['entry_id'])
# otherwise use the parameter
else:
id = int(entry_id)
# TODO: validate ID, throw 500 otherwise
entry = Entry.objects.get(pk=id)
if request.POST.has_key('parent_id') and request.POST['parent_id'] != u"":
parent_id = int(request.POST['parent_id'])
parent = Comment.objects.get(pk=parent_id)
elif parent_id is None:
parent = None
else:
parent_id = int(parent_id)
parent = Comment.objects.get(pk=parent_id)
# add an identifier to the post, part of the
# anti-spam implementation
if request.POST.has_key('identifier') is False:
now = unicode(time.time()).split('.')[0]
identifier = make_identifier(id, now)
# or make a new identifier
else:
identifier = request.POST['identifier']
now = request.POST['time']
form = CommentForm(request.POST)
form.is_valid()
# Initial submission from entry_detail.html
if request.POST.has_key('submit'):
for i in xrange(5,8):
name = u"honey%s" % i
value = request.POST[name]
if value != u"":
raise Http404
if time.time() - int(now) > 3600:
raise Http404
if identifier != make_identifier(id, now):
raise Http404
name = form.cleaned_data['name']
email = form.cleaned_data['email']
webpage = form.cleaned_data['webpage']
rendered = form.cleaned_data['rendered']
body = form.cleaned_data['body']
c = Comment(entry=entry,parent=parent,name=name,email=email,
webpage=webpage,body=body,html=rendered)
c.save()
url = u"%s#comment_%s" % (entry.get_absolute_url(), c.pk)
return HttpResponseRedirect(url)
return render_to_response(
'lifeflow/comment.html',
{'object':entry,'parent':parent,'identifier':identifier,'time':now,'form':form},
RequestContext(request, {}))
def flow(request, slug):
try:
flow = Flow.objects.get(slug=slug)
except Flow.DoesNotExist:
raise Http404
try:
page = int(request.GET["page"])
except:
page = 1
page = QuerySetPaginator(Flow.objects.get(slug=slug).entry_set.all(), 5).page(page)
return render_to_response('lifeflow/flow_detail.html',
{'object' : flow, 'page' : page,},
RequestContext(request, {}))
def front(request):
try:
page = int(request.GET["page"])
except:
page = 1
page = QuerySetPaginator(Entry.current.all(), 3).page(page)
return render_to_response('lifeflow/front.html', {'page':page}, RequestContext(request, {}))
def rss(request):
flows = Flow.objects.all()
return render_to_response('lifeflow/meta_rss.html', {'flows' : flows }, RequestContext(request, {}))
| 4,102 | 1,276 |
from PIL import ImageTk, Image
from tkinter import filedialog, messagebox
import tkinter as tk
import pandas as pd
import numpy as np
import platform
import math
import os
import traceback
class Menu:
def __init__(self, main):
self.main = main
self.main.title("Single Cell Labelling Tool")
self.main.geometry("1050x600")
# Declare global variables
self.os = platform.system()
self.homepath = os.path.expanduser('~')
self.global_coordfilename = tk.StringVar()
self.global_ptypefilename = tk.StringVar()
self.global_stats = tk.StringVar()
self.global_labeledcellcnt = tk.IntVar()
self.global_currentpage = tk.IntVar()
self.global_displaycellcnt = tk.IntVar()
self.global_cropsize = tk.IntVar()
self.global_limitcell = tk.StringVar()
self.global_limitmax = tk.StringVar()
self.global_colcount = tk.IntVar()
self.global_cid_input = tk.IntVar()
self.global_coordext = ['csv', 'xls', 'xlsx']
self.global_ptypeext = ['txt']
# Initialization
self.initialize()
# Initial Frame - Widgets
self.frame_initial = tk.Frame(self.main)
self.label_coordfile = tk.Label(self.frame_initial, text="Cell data file", width=13, anchor="w")
self.label_ptypefile = tk.Label(self.frame_initial, text="Phenotype list", width=13, anchor="w")
self.label_uploadedcoord = tk.Label(self.frame_initial, textvariable=self.global_coordfilename,
anchor="w", wraplength=600)
self.label_uploadedptype = tk.Label(self.frame_initial, textvariable=self.global_ptypefilename,
anchor="w", wraplength=600)
self.label_limitcell = tk.Label(self.frame_initial, text="Index minimum", width=13, anchor="w")
self.entry_limitcell = tk.Entry(self.frame_initial, textvariable=self.global_limitcell, width=12)
self.label_defaultlimitcell = tk.Label(self.frame_initial, text="Index of the first cell to be processed."
"This is optional. "
"By default, minimum is set to 1.")
self.label_limitmax = tk.Label(self.frame_initial, text="Index maximum", width=13, anchor="w")
self.entry_limitmax = tk.Entry(self.frame_initial, textvariable=self.global_limitmax, width=12)
self.label_defaultlimitmax = tk.Label(self.frame_initial, text="Index of the last cell to be processed. This "
"is optional. By default, maximum is set to "
"total number of cells on the file.")
self.label_displaycell = tk.Label(self.frame_initial, text="Display limit", width=13, anchor="w")
self.entry_displaycell = tk.Entry(self.frame_initial, textvariable=self.global_displaycellcnt, width=12)
self.label_defaultdisplaycell = tk.Label(self.frame_initial, text="Number of cells to be displayed on a "
"single page. The default is 20.")
self.label_cropsize = tk.Label(self.frame_initial, text="Crop size", width=13, anchor="w")
self.entry_cropsize = tk.Entry(self.frame_initial, textvariable=self.global_cropsize, width=12)
self.label_defaultcropsize = tk.Label(self.frame_initial, text="Pixel size to be used in cropping cells "
"from the image. The default is 50.")
self.checkbox_cid_input =tk.Checkbutton(self.frame_initial, text="Cell ID", variable=self.global_cid_input,
onvalue=1, offvalue=0, width=13, anchor="w")
self.label_cid_input = tk.Label(self.frame_initial, text="Check this box if 'Cell ID' information is included "
"in the input file")
self.button_coordfile = tk.Button(self.frame_initial, text="Choose file", anchor="w", command=self.coordfile)
self.button_ptypefile = tk.Button(self.frame_initial, text="Choose file", anchor="w", command=self.ptypefile)
self.button_start = tk.Button(self.frame_initial, text="START", state="disabled", command=self.start)
# Initial Frame - Layout
self.frame_initial.pack(fill='both', expand=True)
self.label_coordfile.grid(row=0, column=0, padx=5, pady=5)
self.button_coordfile.grid(row=0, column=1, padx=5, pady=5, sticky="w")
self.label_uploadedcoord.grid(row=0, column=2, padx=5, pady=5, sticky="w")
self.label_ptypefile.grid(row=1, column=0, padx=5, pady=5)
self.button_ptypefile.grid(row=1, column=1, padx=5, pady=5, sticky="w")
self.label_uploadedptype.grid(row=1, column=2, padx=5, pady=5, sticky="w")
self.label_limitcell.grid(row=2, column=0, padx=5, pady=5)
self.entry_limitcell.grid(row=2, column=1, padx=5, pady=5)
self.label_defaultlimitcell.grid(row=2, column=2, padx=5, pady=5, sticky="w")
self.label_limitmax.grid(row=3, column=0, padx=5, pady=5)
self.entry_limitmax.grid(row=3, column=1, padx=5, pady=5)
self.label_defaultlimitmax.grid(row=3, column=2, padx=5, pady=5, sticky="w")
self.label_displaycell.grid(row=4, column=0, padx=5, pady=5)
self.entry_displaycell.grid(row=4, column=1, padx=5, pady=5)
self.label_defaultdisplaycell.grid(row=4, column=2, padx=5, pady=5, sticky="w")
self.label_cropsize.grid(row=5, column=0, padx=5, pady=5)
self.entry_cropsize.grid(row=5, column=1, padx=5, pady=5)
self.label_defaultcropsize.grid(row=5, column=2, padx=5, pady=5, sticky="w")
self.checkbox_cid_input.grid(row=6, column=0, padx=5, pady=5)
self.label_cid_input.grid(row=6, column=2, padx=5, pady=5, sticky="w")
self.button_start.grid(row=8, column=0, padx=5, pady=15, sticky="w")
def check_uploads(self):
if (self.global_coordfilename.get() != "No file chosen") \
and (self.global_ptypefilename.get() != "No file chosen"):
self.coord_ext = self.global_coordfilename.get().split('.')[1]
ptype_ext = self.global_ptypefilename.get().split('.')[1]
if (self.coord_ext in self.global_coordext) and (ptype_ext in self.global_ptypeext):
self.button_start.config(state="normal")
else:
self.button_start.config(state="disabled")
else:
self.button_start.config(state="disabled")
def coordfile(self):
coord_filename = filedialog.askopenfilename(initialdir="/home/myra/phenomics/apps/singlecell", # self.homepath
title="Select coordinates file",
filetypes=(("CSV files", "*.csv"),
("Excel files", "*.xls*"),
("All files", "*.*")))
self.global_coordfilename.set(coord_filename)
self.check_uploads()
def ptypefile(self):
ptype_filename = filedialog.askopenfilename(initialdir="/home/myra/phenomics/apps/singlecell",
title="Select phenotype list file",
filetypes=(("Text files", "*.txt"),
("All files", "*.*")))
self.global_ptypefilename.set(ptype_filename)
self.check_uploads()
def start(self):
self.frame_initial.pack_forget()
# Process phenotype list
ptypefile = open(self.global_ptypefilename.get(), 'r')
self.phenotypes = [p.strip() for p in ptypefile.readlines()]
# Main canvas display
self.canvas_display = tk.Canvas(self.main)
self.scroll_vertical = tk.Scrollbar(self.main, orient='vertical', command=self.canvas_display.yview)
self.canvas_display.pack(expand='yes', fill='both', side='left')
self.scroll_vertical.pack(fill='y', side='right')
self.canvas_display.configure(yscrollcommand=self.scroll_vertical.set)
if self.os == 'Linux':
self.canvas_display.bind_all("<4>", self.on_mousewheel)
self.canvas_display.bind_all("<5>", self.on_mousewheel)
else:
self.canvas_display.bind_all("<MouseWheel>", self.on_mousewheel)
# Initialize frame display map
self.frame_alldisplay = {}
self.canvas_allframes = {}
# Inside the canvas
self.button_restart = tk.Button(self.canvas_display, text="HOME", command=self.restart)
self.button_export = tk.Button(self.canvas_display, text="Export labeled data", command=self.exportdata)
self.label_stats = tk.Label(self.canvas_display, textvariable=self.global_stats)
self.canvas_display.create_window(10, 10, window=self.button_restart, anchor='nw')
self.canvas_display.create_window(80, 10, window=self.button_export, anchor='nw')
self.canvas_display.create_window(700, 10, window=self.label_stats, anchor='nw')
# Process coordinates file
if self.coord_ext == 'csv':
self.coord_df = pd.read_csv(self.global_coordfilename.get())
else:
self.coord_df = pd.read_excel(self.global_coordfilename.get())
self.is_cid = self.global_cid_input.get()
try:
self.cellcnt_min = int(self.global_limitcell.get()) - 1
except ValueError:
self.cellcnt_min = 0
try:
self.cellcnt_max = int(self.global_limitmax.get())
except ValueError:
self.cellcnt_max = self.coord_df.shape[0]
self.total_cellcnt = self.cellcnt_max - self.cellcnt_min
self.coord_df = self.coord_df[self.cellcnt_min:self.cellcnt_max]
self.global_colcount.set(self.coord_df.shape[1])
self.total_batchpage = int(math.ceil(self.total_cellcnt / self.global_displaycellcnt.get()))
self.global_stats.set("Label count: %d out of %d" %(self.global_labeledcellcnt.get(), self.total_cellcnt))
# self.testdf = self.coord_df[:self.global_displaycellcnt.get()]
self.coord_df['Saved Label'] = [None for _i in range(self.total_cellcnt)]
self.selected_options = [tk.StringVar(value=self.phenotypes[0]) for _i in range(self.total_cellcnt)]
self.create_cellframes(self.coord_df, self.global_currentpage.get()) # create frame for each cell
def create_cellframes(self, dataframe, currentpage):
# Create new frame display
self.frame_display = tk.Frame(self.canvas_display)
self.frame_alldisplay[currentpage] = self.frame_display
self.canvas_allframes[currentpage] = self.canvas_display.create_window(0, 50, window=self.frame_display,
anchor='nw')
start = (currentpage-1)*self.global_displaycellcnt.get()
end = currentpage*self.global_displaycellcnt.get()
currentbatch_df = dataframe[start:end]
pos = 1
# for idx, path, center_x, center_y in currentbatch_df.iloc[:,:3].itertuples():
for data in currentbatch_df.iterrows():
idx = data[0]
alldata = data[1]
if self.global_cid_input.get() == 0:
info_startid = 0
else:
info_startid = 1
path = alldata[info_startid]
center_x = alldata[info_startid+1]
center_y = alldata[info_startid+2]
modpos = pos % 2
if modpos == 0:
row = int(pos/2) - 1
col = 1
else:
row = int(pos/2)
col = 0
# print('\tINDEX: %d - POSITION: %s - COORDINATE: %d,%d' %(idx, pos, row, col))
pos += 1
cell = self.imagecrop(path, int(center_x), int(center_y))
cellimage = ImageTk.PhotoImage(cell)
self.labelframe_cell = tk.LabelFrame(self.frame_display, text="%d" %(idx+1), bd=3)
self.labelframe_cell.grid(row=row, column=col, padx=10, pady=20)
self.label_cellimage = tk.Label(self.labelframe_cell, image=cellimage)
self.label_cellimage.image = cellimage
self.label_cellimage.grid(row=0, column=0, sticky="nw", rowspan=5)
self.label_cellpath = tk.Label(self.labelframe_cell, text="%s" % os.path.basename(path).split('.')[0])
self.label_cellcoord = tk.Label(self.labelframe_cell, text="x=%s, y=%s" % (center_x, center_y))
# self.optionmenu = tk.OptionMenu(self.labelframe_cell, self.selected_options[idx%self.total_cellcnt], *self.phenotypes)
try:
self.curidx = idx + (self.total_cellcnt - int(self.global_limitmax.get()))
except ValueError:
self.curidx = idx
initlabel = None
if info_startid == 0:
if (self.global_colcount.get() == 4):
initlabel = self.coord_df.ix[:,3].values[self.curidx]
if isinstance(initlabel, float):
initlabel = None
else:
if (self.global_colcount.get() == 5):
initlabel = self.coord_df.ix[:, 4].values[self.curidx]
if isinstance(initlabel, float):
initlabel = None
self.optionmenu = tk.OptionMenu(self.labelframe_cell, self.selected_options[self.curidx], *self.phenotypes)
self.optionmenu.config(width=20)
self.button_saveptype = tk.Button(self.labelframe_cell, text="Save", name="%s" % str(idx+1))
self.button_saveptype.configure(command=lambda bid=self.curidx, bsave=self.button_saveptype,
opts=self.optionmenu: self.save_phenotype(bid, bsave, opts))
self.label_cellpath.grid(row=0, column=1, sticky="w", padx=5, pady=(20,0))
self.label_cellcoord.grid(row=1, column=1, sticky="w", padx=5, pady=0)
if initlabel:
self.label_initiallabel = tk.Label(self.labelframe_cell, wraplength=200,
text="Initial label: %s" % initlabel)
self.label_initiallabel.grid(row=2, column=1, sticky="w", padx=5, pady=0)
self.optionmenu.grid(row=3, column=1, padx=5, pady=(20, 0))
self.button_saveptype.grid(row=4, column=1, padx=5, pady=0)
# LabelFrame for next button/batch
self.labelframe_cell = tk.LabelFrame(self.frame_display, text="", bd=0)
self.labelframe_cell.grid(row=row+1, column=0, columnspan=2, pady=15)
if self.total_batchpage > 1:
self.button_prevbatch = tk.Button(self.labelframe_cell, text="Prev",
command=lambda type='prev': self.prevnextbatch(type))
self.button_nextbatch = tk.Button(self.labelframe_cell, text="Next",
command=lambda type='next': self.prevnextbatch(type))
self.label_batchpage = tk.Label(self.labelframe_cell, text="Batch %d of %d" % (currentpage,
self.total_batchpage))
self.button_nextbatch.pack(side='right')
self.button_prevbatch.pack(side='right')
self.label_batchpage.pack(side='left')
# Setup canvas scroll region
self.frame_display.update_idletasks()
self.canvas_display.yview_moveto(0)
self.canvas_display.configure(scrollregion=(0, 0, self.frame_display.winfo_width(),
self.labelframe_cell.winfo_y() + 90))
def prevnextbatch(self, type):
if type == 'next':
if self.global_currentpage.get() != self.total_batchpage:
page = self.global_currentpage.get() + 1
else:
page = 1
else:
if self.global_currentpage.get() != 1:
page = self.global_currentpage.get() - 1
else:
page = self.total_batchpage
self.global_currentpage.set(page)
if page in self.frame_alldisplay.keys():
self.canvas_display.yview_moveto(0)
self.canvas_display.configure(scrollregion=(0, 0, self.frame_alldisplay[page].winfo_width(),
self.frame_alldisplay[page].winfo_height() + 45))
self.frame_alldisplay[page].tkraise()
self.canvas_display.itemconfigure(self.canvas_allframes[page], state='normal')
for p in self.frame_alldisplay.keys():
if p != page:
self.canvas_display.itemconfigure(self.canvas_allframes[p], state='hidden')
else:
for p in self.frame_alldisplay.keys():
if p != page:
self.canvas_display.itemconfigure(self.canvas_allframes[p], state='hidden')
self.create_cellframes(self.coord_df, page)
def initialize(self):
self.global_coordfilename.set("No file chosen")
self.global_ptypefilename.set("No file chosen")
self.global_labeledcellcnt.set(0)
self.global_currentpage.set(1)
self.global_displaycellcnt.set(20)
self.global_cropsize.set(64)
self.global_limitcell.set("")
self.global_limitmax.set("")
self.global_colcount.set(0)
self.global_cid_input.set(0)
def restart(self):
self.canvas_display.delete('all')
self.canvas_display.pack_forget()
self.scroll_vertical.pack_forget()
self.frame_initial.pack(fill=tk.BOTH, expand=True)
self.initialize()
# self.frame_alldisplay = {}
self.check_uploads()
def exportdata(self):
outpath = filedialog.asksaveasfilename(initialdir="/home/myra/phenomics/apps/singlecell",
title="Select output folder and filename")
if outpath.endswith('.csv'):
outpath = outpath.split('.')[0] + '.csv'
else:
if '.' in outpath:
outpath = outpath.split('.')[0] + '.csv'
outpath = outpath + '.csv'
save_df = self.coord_df.dropna(subset=['Saved Label'])
save_df.to_csv(outpath, index=False)
def save_phenotype(self, bid, bsave, opts):
self.coord_df.iloc[bid, self.global_colcount.get()] = self.selected_options[bid].get()
self.global_labeledcellcnt.set(self.global_labeledcellcnt.get() + 1)
self.global_stats.set("Label count: %d out of %d" % (self.global_labeledcellcnt.get(), self.total_cellcnt))
bsave.config(state="disabled", text="Saved")
opts.config(state="disabled")
def imagecrop(self, imagepath, center_x, center_y):
loc_left = center_x - self.global_cropsize.get()/2
loc_upper = center_y - self.global_cropsize.get()/2
loc_right = center_x + self.global_cropsize.get()/2
loc_lower = center_y + self.global_cropsize.get()/2
image = Image.open(imagepath)
im_arr = np.array(image).astype(float)
im_scale = 1 / im_arr.max()
im_new = ((im_arr * im_scale) * 255).round().astype(np.uint8)
image = Image.fromarray(im_new)
return image.crop((loc_left, loc_upper, loc_right, loc_lower)).resize((200, 200), Image.LANCZOS)
def on_mousewheel(self, event):
if self.os == 'Linux':
scroll = -1 if event.delta > 0 else 1
if event.num == 4:
scroll = scroll * -1
elif self.os == 'Windows':
scroll = (-1) * int((event.delta / 120) * 1)
elif self.os == 'Darwin':
scroll = event.delta
else:
scroll = 1
self.canvas_display.yview_scroll(scroll, "units")
def show_error(self, *args):
err = traceback.format_exception(*args)
messagebox.showerror('Exception', err)
# catch errors and show message to user
tk.Tk.report_callback_exception = show_error
if __name__ == "__main__":
root = tk.Tk()
menu = Menu(root)
root.mainloop()
| 20,566 | 6,406 |
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/ping')
def pong():
return jsonify('pong!')
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 176 | 75 |
from django.db import migrations
def _forward_func_deduplication_feed_entries(apps, schema_editor):
FeedEntry = apps.get_model('api', 'FeedEntry')
unique_set = set()
delete_list = []
for feed_entry in FeedEntry.objects.all():
unique_desc = (feed_entry.feed_id, feed_entry.url,
feed_entry.updated_at)
if unique_desc in unique_set:
delete_list.append(feed_entry)
else:
unique_set.add(unique_desc)
for feed_entry in delete_list:
feed_entry.delete()
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20210331_1716'),
]
operations = [
migrations.RunPython(_forward_func_deduplication_feed_entries),
]
| 761 | 251 |
# Generated by Django 2.2.26 on 2022-02-26 08:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0002_auto_20220225_1606"),
]
operations = [
migrations.AddField(
model_name="user",
name="adresse",
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name="user",
name="datenaissance",
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name="user",
name="licence",
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="user",
name="nbanneeclub",
field=models.SmallIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="user",
name="nbanneepratique",
field=models.SmallIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="user",
name="photo",
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name="user",
name="prenomnom",
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name="user",
name="role",
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AddField(
model_name="user",
name="telephone",
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="user",
name="email",
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
| 1,931 | 576 |
import datetime
from typing import Any
import dateutil.tz
def json_defaults(obj: Any) -> str:
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime.datetime):
return obj.astimezone(dateutil.tz.UTC).isoformat()
elif isinstance(obj, datetime.date):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
| 409 | 119 |
from setuptools import setup, find_packages, Extension
import os
from setuptools import setup, find_packages, Extension
import os,sys
import os
import shlex
import shutil
import setuptools.command.build_ext
import setuptools.command.build_py
import setuptools.command.install
import setuptools.command.sdist
import setuptools.dist
from setuptools import dist
from setuptools.command.install import install
import sysconfig
import tempfile
import pkg_resources
from distutils.command.build import build
with open("Readme.md", "r") as fh:
long_description = fh.read()
def _configure_macos_deployment_target():
# TensorStore requires MACOSX_DEPLOYMENT_TARGET >= 10.14 in
# order to support sized/aligned operator new/delete.
min_macos_target = '10.14'
key = 'MACOSX_DEPLOYMENT_TARGET'
python_macos_target = str(sysconfig.get_config_var(key))
macos_target = python_macos_target
if (macos_target and (pkg_resources.parse_version(macos_target) <
pkg_resources.parse_version(min_macos_target))):
macos_target = min_macos_target
# macos_target_override = os.getenv(key)
# if macos_target_override:
# if (pkg_resources.parse_version(macos_target_override) <
# pkg_resources.parse_version(macos_target)):
# print('%s=%s is set in environment but >= %s is required by this package '
# 'and >= %s is required by the current Python build' %
# (key, macos_target_override, min_macos_target, python_macos_target))
# sys.exit(1)
# else:
# macos_target = macos_target_override
# Set MACOSX_DEPLOYMENT_TARGET in the environment, because the `wheel` package
# checks there. Note that Bazel receives the version via a command-line
# option instead.
os.environ[key] = macos_target
return macos_target
if 'darwin' in sys.platform:
_macos_deployment_target = _configure_macos_deployment_target()
class CustomBuild(build):
def run(self):
self.build_lib = '_build'
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
class bdist_wheel(_bdist_wheel):
def finalize_options(self):
_bdist_wheel.finalize_options(self)
self.root_is_pure = False
except ImportError:
bdist_wheel = None
class BinaryDistribution(dist.Distribution):
def is_pure(self):
return False
def has_ext_modules(self):
return True
class InstallPlatlib(install):
def finalize_options(self):
install.finalize_options(self)
if self.distribution.has_ext_modules():
self.install_lib = self.install_platlib
class BuildExtCommand(setuptools.command.build_ext.build_ext):
"""Overrides default build_ext command to invoke bazel."""
def run(self):
if not self.dry_run:
prebuilt_path = os.getenv('BARK_ML_PREBUILT_DIR')
if not prebuilt_path:
# Ensure python_configure.bzl finds the correct Python verison.
os.environ['PYTHON_BIN_PATH'] = sys.executable
bazelisk = os.getenv('BARK_ML_BAZELISK', 'bazelisk.py')
# Controlled via `setup.py build_ext --debug` flag.
default_compilation_mode = 'dbg' if self.debug else 'opt'
compilation_mode = os.getenv('BARK_ML_BAZEL_COMPILATION_MODE',
default_compilation_mode)
build_command = [sys.executable, '-u', bazelisk] + [
'build',
'-c',
compilation_mode,
'//bark_ml:pip_package',
'--verbose_failures'
]
if 'darwin' in sys.platform:
# Note: Bazel does not use the MACOSX_DEPLOYMENT_TARGET environment
# variable.
build_command += ['--macos_minimum_os=%s' % _macos_deployment_target]
build_command += ['--define=build_platform=macos']
if sys.platform == 'win32':
# Disable newer exception handling from Visual Studio 2019, since it
# requires a newer C++ runtime than shipped with Python.
#
# https://cibuildwheel.readthedocs.io/en/stable/faq/#importerror-dll-load-failed-the-specific-module-could-not-be-found-error-on-windows
build_command += ['--copt=/d2FH4-']
self.spawn(build_command)
suffix = '.pyd' if os.name == 'nt' else '.so'
built_ext_path = os.path.join(
'bazel-bin/bark_ml/pip_package.runfiles/bark_ml/bark_ml/core' + suffix)
# os.makedirs(os.path.dirname(ext_full_path), exist_ok=True)
copy_to = os.path.dirname(os.path.abspath(__file__)) + "/bark_ml/core.so"
copy_to_manifest = os.path.dirname(os.path.abspath(__file__)) + "/MANIFEST.in"
print('Copying extension %s -> %s' % (
built_ext_path,
copy_to
))
shutil.copyfile(built_ext_path, copy_to)
setup(
name = "bark-ml",
version = "0.4.29",
description = "Gym Environments and Agents for Autonomous Driving",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers = ["Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8"],
keywords = "gym environments, reinforcement learning, autonomous driving, machine learning",
url = "https://github.com/bark-simulator/bark-ml",
author = "Patrick Hart, Julian Bernhard, Klemens Esterle, Tobias Kessler",
author_email = "patrickhart.1990@gmail.com",
license = "MIT",
packages=find_packages(),
install_requires=[
'pygame>=1.9.6',
'gym>=0.17.2',
'tensorflow>=2.2.0',
'tensorboard>=2.2.2',
'tf-agents>=0.5.0',
'tensorflow-probability>=0.10.0',
'bark-simulator>=1.4.8',
'graph-nets>=1.1.0'
],
cmdclass={
'bdist_wheel': bdist_wheel,
'build_ext': BuildExtCommand,
'install': InstallPlatlib,
'build': CustomBuild
},
test_suite='nose.collector',
tests_require=['nose'],
include_package_data=True,
zip_safe=False,
distclass=BinaryDistribution,
python_requires='>=3.7',
)
| 6,200 | 2,083 |
"""Pytorch neural network to predict zernike coefficients from donut images.
My implementation of the network presented in David Thomas's PhD Thesis
at Stanford.
"""
import numpy as np
import torch
from torch import nn
class DavidNet(nn.Module):
"""Network to predict wavefront Zernike coefficients from donut images.
Consists of a DonutNet that creates image features from the donut image.
These are concatenated with a set of meta parameters (usually the donut's
location on the focal plane), which is then passed to the MetaNet, which
predicts a set of Zernike coefficients.
"""
def __init__(self, n_meta_layers: int) -> None:
"""Create a WaveNet to predict Zernike coefficients for donut images.
Parameters
----------
n_meta_layers: int
Number of fully connected layers in the MetaNet.
"""
super().__init__()
self.donut_net = DonutNet()
self.meta_net = MetaNet(n_meta_layers)
def forward(
self,
image: torch.Tensor,
fx: torch.Tensor,
fy: torch.Tensor,
intra: torch.Tensor,
) -> torch.Tensor:
"""Predict Zernike coefficients for the donut image.
Parameters
----------
image: torch.Tensor
The donut image
fx: torch.Tensor
The x angle of the source with respect to the optic axis
fy: torch.Tensor
The y angle of the source with respect to the optic axis
intra: torch.Tensor
Boolean indicating whether the donut is intra or extra focal
Returns
-------
torch.Tensor
Array of Zernike coefficients
"""
image_features = self.donut_net(image)
features = torch.cat([image_features, fx, fy, intra], axis=1)
return self.meta_net(features)
class DonutNet(nn.Module):
"""Network encodes donut image as latent_dim dimensional latent vector.
Takes batches of 1x256x256 donut images as input and produces a
(1 x 1024) dimensional representation.
"""
def __init__(self) -> None:
"""Create the donut encoder network."""
super().__init__()
# first apply a convolution that maintains the image dimensions
# but increases the channels from 1 to 8
self.layers = nn.ModuleList(
[
nn.Conv2d(1, 8, 3, stride=1, padding=1),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
]
)
# now apply a series of DownBlocks that increases the number of
# channels by a factor of 2, while decreasing height and width
# by a factor of 2.
for i in range(7):
in_channels = 2 ** (i + 3)
out_channels = 2 ** (i + 3 + 1)
self.layers.append(DownBlock(in_channels, out_channels))
# a final down block that doesn't increase the number of channels
self.layers.append(DownBlock(2 ** 10, 2 ** 10))
# Finally, flatten the output
self.layers.append(nn.Flatten())
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return latent space encoding of the donut image.
Parameters
----------
x: torch.Tensor
Input images of shape (batch x 256 x 256)
Returns
-------
torch.Tensor
Latent space encoding of shape (batch x 1024)
"""
for layer in self.layers:
x = layer(x)
return x
class DownBlock(nn.Module):
"""Convolutional block that decreases height and width by factor of 2.
Consists of a convolutional residual/skip layer, followed by a regular
convolutional layer that decreases the dimensions by a factor of 2.
"""
def __init__(self, in_channels: int, out_channels: int) -> None:
"""Create a downblock that reduces image dimensions.
Parameters
----------
in_channels: int
The number of input channels
out_channels: int
The number of output channels
"""
super().__init__()
# create the list of layers
self.layers = nn.ModuleList(
[
# residual layer with convolution that preserves dimensions
SkipBlock(in_channels),
# this convolution decreases height and width by factor of 2
nn.Conv2d(in_channels, out_channels, 3, stride=2, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return a convolved image with half the height and weight.
Parameters
----------
x: torch.Tensor
Input image of shape (batch x in_channels x height x width)
Returns
-------
torch.Tensor
Output image of shape (batch x out_channels x height/2 x width/2)
"""
for layer in self.layers:
x = layer(x)
return x
class SkipBlock(nn.Module):
"""Convolutional layer with a residual/skip connection."""
def __init__(self, channels: int) -> None:
"""Create a convolution layer with a skip connection.
Parameters
----------
channels: int
The number of input and output channels for the convolution.
"""
super().__init__()
# layers to compute dx
self.layers = nn.Sequential(
nn.Conv2d(channels, channels, 3, stride=1, padding="same"),
nn.BatchNorm2d(channels),
nn.ReLU(inplace=True),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Convolve image and add to original via the skip connection.
Parameters
----------
x: torch.Tensor
Input image of shape (batch x channels x height x width)
Returns
-------
torch.Tensor
Output image of shape (batch x channels x height x width)
"""
dx = self.layers(x)
return x + dx
class MetaNet(nn.Module):
"""Network that maps image features and meta parameters onto Zernikes.
Consists of several fully connected layers.
"""
# number of Zernike coefficients to predict
N_ZERNIKES = 18
# number of meta parameters to use in prediction
N_METAS = 3
# the dimenson of the image features. This is determined by looking
# at the dimension of outputs from DonutNet
IMAGE_DIM = 1024
def __init__(self, n_layers: int) -> None:
"""Create a MetaNet to map image features and meta params to Zernikes.
Parameters
----------
n_layers: int
The number of layers in the MetaNet.
"""
super().__init__()
# set number of nodes in network layers using a geometric series
n_nodes = np.geomspace(
self.IMAGE_DIM + self.N_METAS,
self.N_ZERNIKES,
n_layers + 1,
dtype=int,
)
# create the hidden layers, which all have BatchNorm and ReLU
self.layers = nn.ModuleList()
for i in range(n_layers - 1):
self.layers.extend(
[
nn.Linear(n_nodes[i], n_nodes[i + 1]),
nn.BatchNorm1d(n_nodes[i + 1]),
nn.ReLU(inplace=True),
]
)
# we will add dropout to the first layer for regularization
if i == 0:
self.layers.append(nn.Dropout(0.1))
# create the output layer, which doesn't have BatchNorm or ReLU
self.layers.append(nn.Linear(n_nodes[-2], n_nodes[-1]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Map image features and meta parameters onto Zernikes.
Parameters
----------
x: torch.Tensor
Input vector of image features and meta parameters.
Returns
-------
torch.Tensor
Array of Zernike coefficients. Size = cls.N_ZERNIKES
"""
for layer in self.layers:
x = layer(x)
return x
| 8,198 | 2,379 |
class NoScheduleError(Exception):
pass
class SchedulingError(Exception):
pass
| 88 | 27 |
# Description: A library of common utilities
# Author: Matt Ferraro
import numpy as np
import cv2
from scipy.signal import convolve2d
import matplotlib.pyplot as plt
def imagify(fft):
'''
2D ffts usually have real and imaginary components, and they also usually
have way too much dynamic range to be sensibly viewed. This function
takes a 2D FFT and returns the log10 of the absolute value of the 2D FFT,
so it is suitable for displaying as an image
'''
return np.log10(np.absolute(fft) + np.ones(fft.shape))
def pupil_function(width=11):
'''
Create a pupil: a square matrix with a raised cylinder of the specified
width. The square is width * 2 + 1 pixels wide
'''
background = np.zeros((width * 2 + 1, width * 2 + 1))
cv2.circle(background, (width, width), width / 2, 1, thickness=-1)
return background
def pupil_to_transfer_function_fft(pupil_func):
'''
Given a pupil function, return the FFT of its transfer function
This is achieved by just convolving the pupil function with itself
'''
return convolve2d(pupil_func, pupil_func, boundary='symm', mode='same')
def plot_image_and_slice(image, title=None):
'''
Given an image, plot the image itself and on a shared x axes, a single
slight through the middle of the image
'''
plt.figure(1)
image_plot = plt.subplot(210)
plt.imshow(image, cmap='gray', interpolation='nearest')
plt.subplot(211, sharex=image_plot)
plt.plot(image[image.shape[0] / 2], 'ro', )
if title:
plt.title(title)
plt.show()
def pad_to_match(reference_array, candidate_array):
'''
Given a reference array and a candidate array, pad the candidate array with
zeros equally on all sides so that it is equal in size to the reference
'''
# This code is fragile and makes a lot of assumptions:
# - the reference and the candidate are both square
# - the reference image is larger than the candidate
# - the difference is a multiple of 2
# The code should be improved to not rely on those assumptions
padding = (reference_array.shape[0] - candidate_array.shape[0]) / 2
return np.pad(candidate_array, padding, mode='constant')
| 2,229 | 686 |
import torch
import open3d
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import itertools
import imageio
from torch.autograd import Variable
from src.utils import point_cloud_utils as pcu
from sklearn.metrics import confusion_matrix
def weights_init(m):
classname = m.__class__.__name__
if classname in ('Conv1d', 'Linear'):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
def pc_to_grid(point_cloud, grid_rate):
B, C, N = point_cloud.shape
device = point_cloud.device
grid_pc = point_cloud.to(device)
for c in range(C):
point_matrix = grid_pc[:, c, :] # (B, N)
sorted_matrix = torch.sort(point_matrix, dim=-1) # (B, N)
indices_matrix = torch.stack([sorted_matrix[1]] * 3, dim=1)
grid_pc = torch.gather(grid_pc, -1, indices_matrix).view(B, C, pow(grid_rate, c + 1), -1)
return grid_pc
def pc_to_regular_grid(point_cloud, grid_rate):
B, C, N = point_cloud.shape
device = point_cloud.device
grid_scale = torch.stack([torch.linspace(-1, 1, grid_rate + 1)] * 3, dim=0).to(device)
seg_masks = torch.zeros([0], dtype=torch.bool).to(device)
seg_mean = torch.zeros([0], dtype=torch.float).to(device)
seg_std = torch.zeros([0], dtype=torch.float).to(device)
for idx in list(itertools.product(list(range(grid_rate)), repeat=3)):
idx = torch.tensor(idx).unsqueeze(1).to(device)
seg_mask = (torch.gather(grid_scale, 1, idx) < point_cloud) * \
(torch.gather(grid_scale, 1, idx + 1) >= point_cloud)
# seg_masks = torch.cat([seg_masks, seg_mask], dim=1)
seg_mean = torch.cat([seg_mean, (point_cloud * seg_mask).mean(-1).unsqueeze(-1)], dim=2)
seg_std = torch.cat([seg_std, (point_cloud * seg_mask).std(-1).unsqueeze(-1)], dim=2)
seg_mean = torch.stack([seg_mean.mean(dim=1)] * seg_mean.size(2), dim=1) \
- torch.stack([seg_mean.mean(dim=1)] * seg_mean.size(2), dim=2)
seg_std = torch.stack([seg_std.mean(dim=1)] * seg_std.size(2), dim=1) \
- torch.stack([seg_std.mean(dim=1)] * seg_std.size(2), dim=2)
return torch.stack([seg_mean, seg_std], dim=1)
def plot_3d_point_cloud(x, y, z, show=True, show_axis=True, in_u_sphere=False,
marker='.', s=8, alpha=.8, figsize=(5, 5), elev=10,
azim=240, axis=None, title=None, *args, **kwargs):
plt.switch_backend('agg')
if axis is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
else:
ax = axis
fig = axis
if title is not None:
plt.title(title)
sc = ax.scatter(x, y, z, marker=marker, s=s, alpha=alpha, *args, **kwargs)
ax.view_init(elev=elev, azim=azim)
if in_u_sphere:
ax.set_xlim3d(-0.5, 0.5)
ax.set_ylim3d(-0.5, 0.5)
ax.set_zlim3d(-0.5, 0.5)
else:
# Multiply with 0.7 to squeeze free-space.
miv = 0.7 * np.min([np.min(x), np.min(y), np.min(z)])
mav = 0.7 * np.max([np.max(x), np.max(y), np.max(z)])
ax.set_xlim(miv, mav)
ax.set_ylim(miv, mav)
ax.set_zlim(miv, mav)
plt.tight_layout()
if not show_axis:
plt.axis('off')
if 'c' in kwargs:
plt.colorbar(sc)
if show:
plt.show()
return fig
def plot_3d_colormap(point_clouds, max_points, max_count, show_axis=True, in_u_sphere=False,
marker='.', s=10, alpha=.8, figsize=(10, 10), elev=10,
azim=240, axis=None, title=None, *args, **kwargs):
plt.switch_backend('agg')
x, y, z = point_clouds
m_x, m_y, m_z = max_points
if axis is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
# ax2 = fig.add_subplot(122, projection='3d')
else:
ax = axis
fig = axis
if title is not None:
plt.title(title)
sc_pc = ax.scatter(x, y, z, marker=marker, c='lightgray', s=s, alpha=alpha)
sc_max_pc = ax.scatter(m_x, m_y, m_z, marker=marker, c=max_count, cmap='rainbow', s=s, alpha=alpha)
plt.colorbar(sc_max_pc, label='max_count')
ax.view_init(elev=elev, azim=azim)
if in_u_sphere:
ax.set_xlim3d(-0.5, 0.5)
ax.set_ylim3d(-0.5, 0.5)
ax.set_zlim3d(-0.5, 0.5)
else:
# Multiply with 0.7 to squeeze free-space.
miv = 0.7 * np.min([np.min(x), np.min(y), np.min(z)])
mav = 0.7 * np.max([np.max(x), np.max(y), np.max(z)])
ax.set_xlim(miv, mav)
ax.set_ylim(miv, mav)
ax.set_zlim(miv, mav)
plt.tight_layout()
if not show_axis:
plt.axis('off')
return fig
def colormap_save(dataloader, model, device, domain, save_dir, num_class, max_num_sample, target_domain=None):
idx_to_label = {0: "bathtub", 1: "bed", 2: "bookshelf", 3: "cabinet",
4: "chair", 5: "lamp", 6: "monitor",
7: "plant", 8: "sofa", 9: "table"}
sample_num = torch.zeros([num_class], dtype=torch.int).to(device)
with torch.no_grad():
model.eval()
for data in dataloader:
point_clouds = data['point_cloud'].to(device)
labels = data['label'].to(device)
pred, max_idx = model(point_clouds)
if domain == 'source':
save_path = os.path.join(save_dir, 'src')
mask = (labels == pred.max(dim=1)[1])
point_clouds = point_clouds[mask, :]
labels = labels[mask]
max_idx = max_idx[mask, :]
elif domain == 'target':
save_path = os.path.join(save_dir, 'trg_{}'.format(target_domain))
pred_labels = pred.max(dim=1)[1]
else:
raise NotImplementedError
point_clouds = point_clouds.cpu()
for k in range(point_clouds.size(0)):
class_idx = int(labels[k])
if domain == 'target': class_idx = int(pred_labels[k])
if sample_num[class_idx] == max_num_sample: continue
sample_num[class_idx] += 1
class_label = idx_to_label[class_idx]
image_path = os.path.join(save_path, '{}'.format(class_label))
os.makedirs(image_path, exist_ok=True)
max_list, max_count = np.unique(max_idx[k].cpu(), return_counts=True)
max_list = torch.tensor(max_list)
max_count = (max_count - max_count.min()) / (max_count.max() - max_count.min())
max_pc = torch.gather(point_clouds[k, :, :], 1,
torch.stack([max_list] * 3, dim=0))
# Colormap
if domain == 'source':
img_title = '{}'.format(class_label)
else:
true_label = idx_to_label[int(labels[k])]
img_title = 'true label : {}\npred label : {}'.format(true_label, class_label)
fig = plot_3d_colormap(point_clouds[k, :, :], max_pc, max_count,
in_u_sphere=True, show=False, title=img_title)
fig.savefig(os.path.join(image_path, '{}.png'.format(sample_num[class_idx])))
plt.close(fig)
if sample_num.sum() == max_num_sample * num_class: break
def image_save(point_cloud, save_dir, save_folder, save_name, img_title, batch_idx=0, folder_numbering=True):
for k in range(point_cloud.size(0)):
fig = plot_3d_point_cloud(point_cloud[k][0], point_cloud[k][1], point_cloud[k][2],
in_u_sphere=True, show=False,
title='{}'.format(img_title))
if folder_numbering:
save_path = os.path.join(save_dir, '{}_{}'.format(save_folder, batch_idx * point_cloud.size(0) + k))
os.makedirs(save_path, exist_ok=True)
fig.savefig(os.path.join(save_path, '{}.png'.format(save_name)))
else:
save_path = os.path.join(save_dir, '{}'.format(save_folder))
os.makedirs(save_path, exist_ok=True)
fig.savefig(
os.path.join(save_path, '{}_{}.png'.format(save_name, batch_idx * point_cloud.size(0) + k)))
plt.close(fig)
def make_training_sample(point_cloud):
B, C, N = point_cloud.shape
device = point_cloud.device
sample = torch.randn(B, C, int(N / 4)).to(device)
sigma = [0.1, 0.15, 0.2]
clip = [0.2, 0.3, 0.4]
for i in range(3):
jittering_sample = pcu.jitter(point_cloud, sigma=sigma[i], clip=clip[i])[:, :, torch.randperm(N)[:int(N / 4)]]
sample = torch.cat([sample, jittering_sample], dim=2)
sample_dist = point_cloud_distance_cp(point_cloud, sample, sampling=True).squeeze(dim=-1)
return sample, sample_dist
def knn_point_sampling(point_cloud, target_points, sample_num):
device = point_cloud.device
B, C, N = point_cloud.shape
_, _, M = target_points.shape
point_cloud_matrix = torch.stack([point_cloud] * M, dim=2)
target_points_matrix = torch.stack([target_points] * N, dim=3)
distance_matrix = (point_cloud_matrix - target_points_matrix).pow(2).sum(dim=1).sqrt().to(device)
knn_matrix = torch.topk(distance_matrix, sample_num, largest=False)
knn_indices_matrix = torch.stack([knn_matrix[1]] * 3, dim=1)
knn_points_matrix = torch.gather(point_cloud_matrix, 3, knn_indices_matrix)
return knn_points_matrix
def point_cloud_distance_svd(point_cloud, target_points, k=5, p=0.01, sampling=False):
if point_cloud.shape != target_points.shape:
raise NotImplementedError
device = point_cloud.device
B, C, N = point_cloud.shape
knn_points_matrix = knn_point_sampling(point_cloud, target_points, k)
p_hat_matrix = torch.mean(knn_points_matrix, dim=3)
p_matrix = (knn_points_matrix - p_hat_matrix.unsqueeze(dim=3))
M_matrix = torch.matmul(p_matrix.permute(0, 2, 1, 3), p_matrix.permute(0, 2, 3, 1)) / k
U_matrix, S_matrix, V_matrix = torch.svd(M_matrix)
norm_matrix = U_matrix[:, :, :, 2]
random_point_matrix = torch.gather(knn_points_matrix, 3,
torch.randint(k, knn_points_matrix.shape)[:, :, :, 0:1].to(device)).squeeze()
tangent_dist_matrix = torch.abs(torch.matmul(norm_matrix.unsqueeze(dim=2),
(target_points - random_point_matrix).permute(0, 2, 1).unsqueeze(3)))
# regularize
if sampling:
return tangent_dist_matrix
else:
point_cloud_matrix = torch.stack([point_cloud] * N, dim=2)
points_matrix = torch.stack([point_cloud] * N, dim=3)
self_dist_matrix = (point_cloud_matrix - points_matrix).pow(2).sum(dim=1).sqrt()
knn_matrix = torch.topk(self_dist_matrix, k, largest=False, sorted=True)
reg = torch.clamp(torch.mean(knn_matrix[0]), min=0.1)
loss = tangent_dist_matrix.mean() + (1 / reg) * p
return loss
def point_cloud_distance_cp(point_cloud, target, k=3, sampling=False):
if point_cloud.shape != target.shape:
raise NotImplementedError
knn_points_matrix = knn_point_sampling(point_cloud, target, k)
# Cross product
ref_point = knn_points_matrix[:, :, :, 0]
cross_norm_matrix = torch.cross((ref_point - knn_points_matrix[:, :, :, 1]).transpose(2, 1),
(ref_point - knn_points_matrix[:, :, :, 2]).transpose(2, 1))
normalize_norm = torch.mul(cross_norm_matrix,
1 / torch.stack([cross_norm_matrix.pow(2).sum(axis=2).sqrt()] * 3, dim=2))
cross_tangent_dist_matrix = torch.abs(torch.matmul(normalize_norm.unsqueeze(dim=2),
(target - ref_point).transpose(2, 1).unsqueeze(dim=3)))
if sampling:
return cross_tangent_dist_matrix
else:
loss2 = cross_tangent_dist_matrix.mean()
return loss2
def point_cloud_segmentation_tangent_loss(point_clouds, pred, knn_num, device):
tangent_loss_sum = 0.0
num_seg_class = pred.size(1)
part_mean_points = torch.zeros([0], dtype=torch.float).to(device)
weighted_part_mean_points = torch.zeros([0], dtype=torch.float).to(device)
for seg_class in range(num_seg_class):
weight = torch.softmax(pred, dim=1)[:, seg_class, :]
weight, weight_index = torch.topk(weight, k=knn_num)
part_pc = torch.gather(point_clouds, 2, torch.stack([weight_index] * 3, dim=1))
weight_part_pc = part_pc * torch.stack([weight] * 3, dim=1)
p_matrix = (part_pc - part_pc.mean(dim=2).unsqueeze(-1)) * torch.stack([weight] * 3, dim=1)
cov_matrix = torch.matmul(p_matrix[:, :, :17], p_matrix[:, :, :17].transpose(2, 1)) / knn_num
try:
U, S, V = torch.svd(cov_matrix.cpu())
except:
import ipdb;
ipdb.set_trace()
U_matrix = torch.stack([U[:, :, 2].to(device)] * knn_num, dim=1).unsqueeze(2)
tangent_dist = torch.abs(torch.matmul(U_matrix, p_matrix.transpose(2, 1).unsqueeze(-1)))
tangent_loss_sum += tangent_dist.mean()
part_mean_points = torch.cat([part_mean_points, part_pc.mean(dim=2).unsqueeze(1)], dim=1)
weighted_part_mean_points = torch.cat([weighted_part_mean_points, weight_part_pc.mean(dim=2).unsqueeze(1)],
dim=1)
tangent_loss = tangent_loss_sum / num_seg_class
return tangent_loss, part_mean_points, weighted_part_mean_points
def point_cloud_segmentation_std_loss(point_clouds, part_mean_points, pred):
num_seg_class = pred.size(1)
sum_part_std = 0.0
weight_matrix = torch.softmax(pred, dim=1)
for seg_class in range(num_seg_class):
distance_matrix = (point_clouds - part_mean_points[:, seg_class, :].unsqueeze(-1)).pow(2).sum(dim=1).sqrt()
weighted_distance = (weight_matrix[:, seg_class, :] * distance_matrix).mean(dim=1)
seg_class_std = weighted_distance / weight_matrix[:, seg_class, :].mean(dim=1)
sum_part_std += seg_class_std.mean()
part_std = sum_part_std / num_seg_class
return part_std
def point_cloud_segmentation_contrastive_loss(point_clouds, pred, theta_regressor, emd_loss, device):
num_seg_class = pred.size(1)
random_idx = torch.randperm(point_clouds.size(0)).to(device)
target_point_cloud = torch.stack([point_clouds[0, :, :]] * point_clouds.size(0), dim=0)
theta = theta_regressor(torch.cat([point_clouds, target_point_cloud], dim=1))
aligned_point_clouds = pcu.rotate_shape(point_clouds, 'z', theta)
shuffled_point_clouds = aligned_point_clouds[random_idx, :, :].to(device)
emd_loss, emd_matching_idx = emd_loss(aligned_point_clouds.permute(0, 2, 1),
shuffled_point_clouds.permute(0, 2, 1), 0.05, 3000)
emd_matching_idx = emd_matching_idx.type(torch.LongTensor).to(device)
pos_loss = 0.0
neg_loss = 0.0
for seg_class in range(num_seg_class):
softmax_weight = torch.softmax(pred, dim=1)[:, seg_class, :]
for sc in range(num_seg_class):
shuffled_weight = torch.softmax(pred, dim=1)[:, sc, :][random_idx, :]
shuffled_weight = torch.gather(shuffled_weight, 1, emd_matching_idx)
max_weight = torch.cat([softmax_weight.unsqueeze(0), shuffled_weight.unsqueeze(0)],
dim=0).max(dim=0)[0]
max_weight, seg_class_idx = torch.topk(max_weight, 50, dim=1)
if sc == seg_class:
pos_loss += (max_weight * torch.gather(emd_loss, 1, seg_class_idx)).mean()
else:
neg_loss += (max_weight * torch.gather(emd_loss, 1, seg_class_idx)).mean()
pos_loss = pos_loss / num_seg_class
neg_loss = neg_loss / (num_seg_class * num_seg_class - num_seg_class)
return pos_loss, neg_loss
def segmentation_cosine_similarity_contrastive_loss(point_clouds, pred, sim_feature_extractor, device, tau=1.0):
cosine_similarity_loss = torch.nn.CosineSimilarity(dim=-1)
seg_mask = torch.zeros([0], dtype=torch.bool).to(device)
sim_feature = torch.zeros([0], dtype=torch.float).to(device)
segmentation_label = torch.max(pred, dim=1)[1] # (B, N)
num_seg_class = pred.size(1)
softmax_layer = torch.nn.Softmax(dim=1)
pred = softmax_layer(pred)
for seg_class in range(num_seg_class):
seg_class_mask = (segmentation_label == seg_class).unsqueeze(1)
seg_class_sim_feature = sim_feature_extractor(point_clouds, seg_class_mask).unsqueeze(1)
seg_mask = torch.cat([seg_mask, seg_class_mask], dim=1)
sim_feature = torch.cat([sim_feature, seg_class_sim_feature], dim=1)
while 1:
rand_seg_idx = torch.randperm(num_seg_class)
if torch.sum(rand_seg_idx == torch.tensor(list(range(num_seg_class)))) == 0: break
rand_batch_idx = torch.randperm(pred.size(0))
rand_sim_feature = sim_feature[rand_batch_idx]
pos_loss = cosine_similarity_loss(sim_feature, rand_sim_feature) / tau
neg_loss = cosine_similarity_loss(sim_feature, rand_sim_feature[:, rand_seg_idx, :]) / tau
return pos_loss.mean(), neg_loss.mean()
def cosine_sim_loss(pred, labels, criterion, tau):
B = pred.size(0)
device = pred.device
pos_pred = torch.zeros([0], dtype=torch.float).to(device)
neg_pred = torch.zeros([0], dtype=torch.float).to(device)
for b in range(B):
rand_idx = torch.randperm(B).to(device)
pos_idx = (labels[rand_idx] == labels[b]).nonzero()[0]
neg_idx = (labels[rand_idx] != labels[b]).nonzero()[:int(B / 4) - 1].squeeze(-1)
pos_pred = torch.cat([pos_pred, pred[rand_idx, :][pos_idx, :]], dim=0)
try:
neg_pred = torch.cat([neg_pred, pred[rand_idx, :][neg_idx, :].unsqueeze(0)], dim=0)
except:
continue
sample_pred = torch.cat([pos_pred.unsqueeze(dim=1), neg_pred], dim=1)
similarity_matrix = torch.nn.CosineSimilarity(dim=-1)(torch.stack([pred] * sample_pred.size(1), dim=1),
sample_pred) / tau
sim_labels = torch.zeros(similarity_matrix.size(0), dtype=torch.long).to(device)
loss = criterion(similarity_matrix, sim_labels)
positives = similarity_matrix[:, 0].mean()
negatives = similarity_matrix[:, 1:].mean()
return positives, negatives, loss
def grid_colormap(point_grid, color, save_dir):
matplotlib.use('TkAgg')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
point_grid = point_grid.cpu()
x = point_grid[:, 0, :]
y = point_grid[:, 1, :]
z = point_grid[:, 2, :]
c = color.cpu()
img = ax.scatter(x, y, z, s=1.5, c=c, cmap=plt.hot())
fig.colorbar(img)
plt.savefig(save_dir)
def optimize_visualize(point_cloud, encoder, decoder, learning_rate, num_epoch, knn_num, save_dir, batch_idx=0):
B, C, N = point_cloud.shape
device = point_cloud.device
z = Variable(torch.randn(B, C, N).cuda(), requires_grad=True).to(device)
save_dir = os.path.join(save_dir, 'optimize_visualize')
for epoch in range(num_epoch):
knn_sampling = knn_point_sampling(point_cloud, z, knn_num)
source_latent_vector = encoder(knn_sampling)
loss = torch.abs(decoder(z, source_latent_vector)).mean()
loss.backward()
if loss < 0.05:
learning_rate = 1
elif loss < 0.01:
learning_rate = 0.1
elif loss < 0.001:
learning_rate = 0.01
with torch.no_grad():
my_vector_size = torch.stack([z.pow(2).sum(axis=1).sqrt()] * 3, dim=1)
my_norm = z / my_vector_size
my_grad = (z.grad * my_norm).sum(axis=1)
my_grad = my_norm * torch.stack([my_grad] * 3, dim=1)
z -= my_grad * learning_rate
z.grad.zero_()
if epoch % 100 == 0:
image_save(z.detach().cpu(), save_dir, 'test', 'epoch_{}'.format(epoch), 'epoch : {}'.format(epoch),
batch_idx=batch_idx)
def grid_visualize(point_clouds, encoder, decoder, grid_scale, threshold, knn_num, save_dir, batch_idx=0):
B, C, N = point_clouds.shape
device = point_clouds.device
with torch.no_grad():
scale = torch.linspace(-1.0, 1.0, grid_scale)
point_grid = torch.stack([torch.cartesian_prod(scale, scale, scale).transpose(1, 0)] * B, dim=0).to(device)
partial_size = 100
test_pred = torch.Tensor([]).to(device)
for i in range(int((grid_scale ** 3) / partial_size)):
partial_point_grid = point_grid[:, :, i * partial_size:(i + 1) * partial_size]
temp_latent_vector = encoder(knn_point_sampling(point_clouds, partial_point_grid, knn_num))
test_pred = torch.cat([test_pred, decoder(partial_point_grid, temp_latent_vector).squeeze(dim=-1)
], dim=2)
for b in range(B):
test_pred_sample = test_pred[b, :, :]
masked_index = (test_pred_sample.squeeze() < threshold).nonzero()
pred_pc = torch.gather(point_grid[b, :, :], 1, torch.stack([masked_index.squeeze()] * 3, dim=0)) \
.unsqueeze(dim=0)
if pred_pc.size(2) > N:
pred_pc, _ = pcu.random_point_sample(pred_pc, N)
elif pred_pc.size(2) < N:
new_pred_pc = pred_pc
while new_pred_pc.size(2) < N:
new_pred_pc = torch.cat([new_pred_pc, pcu.jitter(pred_pc)], dim=2)
pred_pc, _ = pcu.random_point_sample(new_pred_pc, N)
# pcu.visualize(point_clouds)
# pcu.visualize(pred_pc)
image_save(pred_pc.detach().cpu(), save_dir, 'grid_visualize', 'prediction', 'predict_pc',
batch_idx=batch_idx * B + b, folder_numbering=False)
def visualize_animation(point_cloud):
if point_cloud.size(0) != 1:
raise NotImplementedError
pcd = open3d.geometry.PointCloud()
permute = [0, 2, 1]
point_cloud = point_cloud[:, permute, :]
pcd.points = open3d.utility.Vector3dVector(np.array(point_cloud.squeeze(axis=0).permute(1, 0).cpu()))
# def capture_image(vis):
# image = vis.capture_screen_float_buffer()
# plt.imsave(os.path.join(save_dir, '{}_{}.png'.format(save_name, len(os.listdir(save_dir)))),
# np.asarray(image))
# return False
def rotate_view(vis):
ctr = vis.get_view_control()
ctr.rotate(10.0, 0.0)
# capture_image(vis)
return False
open3d.visualization.draw_geometries_with_animation_callback([pcd], rotate_view)
def save_gif(point_cloud, save_name, save_path, save_num=1):
if point_cloud.size(0) > save_num:
raise NotImplementedError
for k in range(point_cloud.size(0)):
img_list = []
img_path_list = []
point_cloud_sample = point_cloud[k, :, :].unsqueeze(0)
for i in range(20):
point_cloud_sample = point_cloud_sample.cpu()
fig = plot_3d_point_cloud(point_cloud_sample[0][0], point_cloud_sample[0][1], point_cloud_sample[0][2],
in_u_sphere=True, show=False, show_axis=False)
point_cloud_sample = pcu.rotate_shape(point_cloud_sample, 'z', rotation_angle=18 * np.pi / 180)
img_path = os.path.join(save_path, '{}.png'.format(i))
fig.savefig(img_path)
img_path_list.append(img_path)
img_list.append(imageio.imread(img_path))
plt.close(fig)
imageio.mimsave(os.path.join(save_path, '{}_{}.gif'.format(save_name, str(k))), img_list, fps=7)
for img_file in img_path_list:
if os.path.exists(img_file):
os.remove(img_file)
def save_confusion_matrix(pred_list, labels_list, num_class, save_path, save_name, cmap=None, title=None,
normalize=True):
plt.switch_backend('agg')
cm = confusion_matrix(labels_list.cpu(), pred_list.cpu())
accuracy = np.trace(cm) / float(np.sum(cm))
mis_class = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
if title is None:
title = 'Confusion matrix'
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.xticks(np.arange(num_class))
plt.yticks(np.arange(num_class))
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, mis_class))
os.makedirs(save_path, exist_ok=True)
plt.savefig(os.path.join(save_path, '{}.png'.format(save_name)))
plt.close()
def save_cos_sim_confusion_matrix(sim_confusion_matrix, num_class, save_path, save_name, cmap=None, title=None,
normalize=False):
plt.switch_backend('agg')
if cmap is None:
cmap = plt.get_cmap('Blues')
if title is None:
title = 'Confusion matrix'
plt.figure(figsize=(8, 6))
plt.imshow(sim_confusion_matrix, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.xticks(np.arange(num_class))
plt.yticks(np.arange(num_class))
if normalize:
sim_confusion_matrix = sim_confusion_matrix.type(torch.float) / sim_confusion_matrix.sum(axis=1)[:, np.newaxis]
thresh = sim_confusion_matrix.max() / 1.5 if normalize else sim_confusion_matrix.max() / 2
for i, j in itertools.product(range(sim_confusion_matrix.shape[0]), range(sim_confusion_matrix.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(sim_confusion_matrix[i, j]),
horizontalalignment="center",
color="white" if sim_confusion_matrix[i, j] > thresh else "black")
else:
plt.text(j, i, "{:0.4f}".format(sim_confusion_matrix[i, j]),
horizontalalignment="center",
color="white" if sim_confusion_matrix[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label\nsimilarity value')
os.makedirs(save_path, exist_ok=True)
plt.savefig(os.path.join(save_path, '{}.png'.format(save_name)))
plt.close()
| 26,845 | 9,925 |
import os
import json
from conf.settings import CONFIG_PATH, HOME_DIR
class Config(object):
def __init__(self) -> None:
super().__init__()
self.default_config = {
'save_path': os.path.join(HOME_DIR, 'media'),
'insecure': 0,
'merge': 0,
'caption': 0
}
def load(self):
if os.path.exists(CONFIG_PATH) is False:
return self.default_config
with open(CONFIG_PATH, 'r', encoding='utf-8') as f:
return json.loads(f.read())
def save(self, **kwargs):
config = self.load()
for k, v in kwargs.items():
config[k] = v
with open(CONFIG_PATH, 'w', encoding='utf-8') as f:
f.write(json.dumps(config, ensure_ascii=False, indent=4))
config = Config() | 811 | 261 |
import tornado.ioloop
import tornado.web
from twittercomments.handlers.powhandler import PowHandler
from twittercomments.application import app
from twittercomments.as_dash import dispatcher
from twittercomments.models.tinydb.tweet import Tweet
#
# you can use regex in the routes as well:
# (r"/([^/]+)/(.+)", ObjectHandler),
# any regex goes. any group () will be handed to the handler
#
# or werkzeug like routes..
#
@app.add_route("/", pos=1, dispatch={ "get" : "_get"})
class IndexdHandler(PowHandler):
def _get(self, index=None):
print(" Calling IndexHandler from handlers/shorties.py: parameter index: " + str(index))
##external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
#retval = dispatcher(self.request, username="fake", session_id=1234)
#self.render("index.tmpl", dash_block=retval)
self.redirect("/dash")
# this will be the last route since it has the lowest pos.
@app.add_route(".*", pos=0)
class ErrorHandler(PowHandler):
def get(self):
return self.error( template="404.tmpl", http_code=404 )
| 1,090 | 353 |
''''
execution module for Nexus 3 security privileges
:version: v0.2.1
:configuration: In order to connect to Nexus 3, certain configuration is required
in /etc/salt/minion on the relevant minions.
Example:
nexus3:
hostname: '127.0.0.1:8081'
username: 'admin'
password: 'admin123'
'''
import json
import logging
import nexus3
log = logging.getLogger(__name__)
__outputter__ = {
'sls': 'highstate',
'apply_': 'highstate',
'highstate': 'highstate',
}
privileges_path = 'v1/security/privileges'
def create(name,
type,
actions=[],
contentSelector=None,
description='New Nexus privilege',
domain=None,
format=None,
pattern=None,
repository=None,
scriptName=None):
'''
name (str):
privilege name
type (str):
privilege type [application|repository-admin|respository-content-selector|repository-view|script|wildcard]
actions (list):
list of actions [ADD|ALL|BROWSE|CREATE|DELETE|EDIT|READ|UPDATE] (Default: [])
contentSelector (str):
name of content selector (Default: None
.. note::
required for respository-content-selector privilege type
content selector must exist before assigning privileges
description (str):
description of privilge (Default: 'New Nexus privilege')
domain (str):
domain of privilege [roles|scripts|search|selectors|settings|ssl-truststore|tasks|users|userschangepw] (Default: None)
.. note::
required for application privilege type
format (str):
respository format [bower|cocoapads|conan|docker|etc.] (Default: None)
.. note::
required for repository-admin, respository-content-selector, and repository-view privilege types
pattern (regex):
regex pattern to group other privileges (Default: None)
.. note::
required for wildcard privilege type
repository (str):
repository name (Default: None)
.. note::
required for repository-admin, respository-content-selector, and repository-view privilege types
scriptName (str):
script name (Default: None)
CLI Example::
.. code-block:: bash
salt myminion nexus3_privileges.create name=nx-userschangepw actions="['ADD','READ']" description='Change password permission' domain=userschangepw type=application
salt myminion nexus3_privileges.create name=nx-repository-view-nuget-nuget-hosted-browse actions=['BROWSE'] description='Browse privilege for nuget-hosted repository views' format=nuget repository=nuget-hosted type=repository-view
'''
ret = {
'privilege': {}
}
path = privileges_path + '/' + type
payload = {
'name': name,
'description': description,
'actions': actions,
}
application = {
'domain': domain
}
repository = {
'format': format,
'repository': repository
}
repository_content_selector = {
'format': format,
'repository': repository,
'contentSelector': contentSelector
}
script = {
'scriptName': scriptName
}
wildcard = {
'name': name,
'description': description,
'pattern': pattern
}
if type == 'application':
if domain is None:
ret['comment'] = 'domain cannot be None for type {}'.format(type)
return ret
payload.update(application)
if type in ['repository-admin','repository-view']:
if format is None or repository is None:
ret['comment'] = 'format and repository cannot be None for type {}'.format(type)
return ret
payload.update(repository)
if type == 'repository-content-selector':
if format is None or repository is None or contentSelector is None:
ret['comment'] = 'format, contentSelector, and repository cannot be None for type {}'.format(type)
return ret
payload.update(repository_content_selector)
if type == 'scripts':
if script is None:
ret['comment'] = 'scriptName cannot be None for type {}'.format(type)
return ret
payload.update(script)
if type == 'wildcard':
if pattern is None:
ret['comment'] = 'pattern cannot be None for type {}'.format(type)
return ret
payload = wildcard
nc = nexus3.NexusClient()
resp = nc.post(path, payload)
if resp['status'] == 201:
ret['comment'] = 'privilege {} created.'.format(name)
ret['privilege'] = describe(name)['privilege']
else:
ret['comment'] = 'could not create privilege {}.'.format(name)
ret['error'] = {
'code': resp['status'],
'msg': resp['body']
}
return ret
def delete(name):
'''
name (str):
privilege name
CLI Example::
.. code-block:: bash
salt myminion nexus3_privileges.delete nx-analytics-all
'''
ret = {}
path = privileges_path + '/' + name
nc = nexus3.NexusClient()
resp = nc.delete(path)
if resp['status'] == 204:
ret['comment'] = 'privilege {} delete.'.format(name)
else:
ret['comment'] = 'could not delete privilege {}.'.format(name)
ret['error'] = {
'code': resp['status'],
'msg': resp['body']
}
return ret
def describe(name):
'''
name (str):
privilege name
CLI Example::
.. code-block:: bash
salt myminion nexus3_privileges.describe nx-analytics-all
'''
ret = {
'privilege': {},
}
path = privileges_path + '/' + name
nc = nexus3.NexusClient()
resp = nc.get(path)
if resp['status'] == 200:
ret['privilege'] = json.loads(resp['body'])
else:
ret['comment'] = 'could not retrieve privilege {}.'.format(name)
ret['error'] = {
'code': resp['status'],
'msg': resp['body']
}
return ret
def list_all():
'''
CLI Example::
.. code-block:: bash
salt myminion nexus3_privileges.list_all
'''
ret = {
'privileges': {},
}
path = privileges_path
nc = nexus3.NexusClient()
resp = nc.get(path)
if resp['status'] == 200:
ret['privileges'] = json.loads(resp['body'])
else:
ret['comment'] = 'could not retrieve available privileges.'
ret['error'] = {
'code': resp['status'],
'msg': resp['body']
}
return ret
def update(name,
actions=None,
contentSelector=None,
description=None,
domain=None,
format=None,
pattern=None,
repository=None,
scriptName=None):
'''
name (str):
privilege name
actions (list):
list of actions [ADD|ALL|CREATE|DELETE|EDIT|READ|UPDATE] (Default: None)
contentSelector (str):
name of content selector (Default: None)
.. note::
content selector must exist before assigning privileges
description (str):
description of privilege (Default: None)
domain (str):
domain of privilege [roles|scripts|search|selectors|settings|ssl-truststore|tasks|users|userschangepw] (Default: None)
.. note::
required for application privilege type
format (str):
respository format [bower|cocoapads|conan|docker|etc.] (Default: None)
.. note::
required for repository-admin, respository-content-selector, and repository-view privilege types
pattern (regex):
regex pattern to group other privileges (Default: None)
.. note::
required for wildcard privilege type
repository (str):
repository name (Default: None)
.. note::
required for repository-admin, respository-content-selector, and repository-view privilege types
scriptName (str):
script name (Default: None)
CLI Example::
.. code-block:: bash
salt myminion nexus3_privileges.update name=testing actions="['ADD','READ']" description='Change password permission' domain=userschangepw type=application
'''
ret = {
'privilege': {}
}
priv_description = describe(name)
if 'error' in priv_description.keys():
ret['comment'] = 'failed to update privilege.'
ret['error'] = priv_description['error']
return ret
meta = priv_description['privilege']
path = privileges_path + '/' + meta['type'] + '/' + name
if actions is not None:
meta['actions'] = actions
if contentSelector is not None and 'contentSelector' in meta.keys():
meta['contentSelctor'] = contentSelector
if description is not None:
meta['description'] = description
if domain is not None and 'domain' in meta.keys():
meta['domain'] = domain
if format is not None and 'format' in meta.keys():
meta['format'] = format
if repository is not None and 'repository' in meta.keys():
meta['repository'] = repository
if pattern is not None and 'pattern' in meta.keys():
meta['pattern'] = pattern
if scriptName is not None and 'scriptName' in meta.keys():
meta['scriptName'] = scriptName
nc = nexus3.NexusClient()
resp = nc.put(path, meta)
if resp['status'] == 204:
ret['comment'] = 'updated privilege {}.'.format(name)
ret['privilege'] = describe(name)['privilege']
else:
ret['comment'] = 'could not update privilege {}.'.format(name)
ret['error'] = {
'code': resp['status'],
'msg': resp['body']
}
return ret | 9,807 | 2,838 |
#from GoogleImageScrapper import *
import random
from GoogleImageScrapper import GoogleImageScraper
from patch import *
import os
from os import listdir
from os.path import join, isdir, isfile
from flask import Flask, render_template, send_from_directory, redirect, Response, url_for
import SVN.trunk.Code.Python.lara_utils as lara_utils
import subprocess
# to install
# pip install requests
# pip install python-docx
# pip install nltk
# compiled from lara -html folder
# define LARA
LARA= './SVN/trunk/'
mypath = LARA + 'Content'
compiled_path = LARA + 'Content'
onlydir = [f for f in listdir(mypath) if isdir(join(mypath, f))]
html_path = './SVN/trunk/compiled/'
content_loc = './SVN/trunk/Content/'
corpus_suffix = '/corpus/local_config.json'
lara_builder = LARA + 'Code/Python/lara_run.py '
lara_builder_creator = 'word_pages '
compiled_loc = LARA + 'compiled/'
folder_sufix = 'vocabpages'
index_folder_sufix='vocabpages'
multimedia_folder= "/multimedia/"
py_ver = 'python '
py_ver_w='python'
main_page_hyper = '_from_abstract_htmlvocabpages/_hyperlinked_text_.html'
hyper_page_html='vocabpages/_hyperlinked_text_.html'
pic_loc = html_path + 'pic'
slash='./'
slash_clean='/'
app = Flask(__name__, template_folder=html_path)
"""TODO extarct from meta data JASON the right language"""
alphaBet="'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','r','s','t','u','v','x','w','z','a','á','b','d','ð','e','é','f','g','h','i','í','j','k','l','m','n','o','ó','p','r','s','t','u','ú','v','x','y','ý','þ','æ','ö','ð'"
# import folder names
# location of the dir of files to open
# name of content availble right now
# create an abstract html
# x='/home/david/LaraProject/SVN/trunk/Content/mangiri/corpus/local_config.json'
# os.system('python3.9 /home/david/LaraProject/SVN/trunk/Code/Python/lara_run.py '+'word_pages '+x)
# returning a list with files in dir_path
def filesinDir(dir_path):
try:
files_in_dir = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
except:
"something wrong with the directory, put in the full path"
finally:
return files_in_dir
# returning a list with sub dir to dir_path
def dirinDir(dir_path):
try:
dir_in_dir = [f for f in listdir(dir_path) if isdir(join(dir_path, f))]
except:
"something wrong with the directory, put in the full path"
finally:
return dir_in_dir
def getWords(dir_path, prefix_word):
try:
files_in_dir = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
except:
"something wrong with the directory, put in the full path"
finally:
word_list = []
for file_name in files_in_dir:
if prefix_word in file_name:
word = file_name.split('_')[1].split('.')[0]
word_list.append(word)
return word_list
# routing
@app.route('/')
def home():
return render_template('index.html', content=onlydir,
link_html=main_page_hyper)
@app.route('/index/<name>', methods=['GET','POST'])
def index(name):
flag = 0
# compile only if its not existed in compiled folder
for dir in dirinDir(content_loc):
if name in dir:
if '_hyperlinked_text_.html' in filesinDir(content_loc + slash + dir):
flag = 1
break
else:
pass
if flag == 0:
ful_loc = content_loc + name + corpus_suffix
# try:
os.system("python "+ lara_builder + lara_builder_creator + ful_loc)
# except:
# subprocess.call(py_ver+ lara_builder + lara_builder_creator + ful_loc)
#finally:
return redirect(url_for('surf', story_name=name, name=name))
@app.route('/<path:story_name>/<name>')
def surf(name, story_name):
lema = '_lemmas'
if ".html" in name or ".css" in name or ".js" in name:
return render_template(slash+ story_name + index_folder_sufix+slash + name)
else:
return render_template(slash + story_name + hyper_page_html)
@app.route('/<story_name>/multimedia/<path:filename>', methods=['GET'])
def loading_file(filename, story_name):
return send_from_directory(html_path + slash + story_name + index_folder_sufix + multimedia_folder+slash,filename)
@app.route('/game1/<story_name>', methods=['GET'])
def generate_game1(story_name):
story_dir = compiled_loc + story_name + folder_sufix
words = getWords(story_dir, 'word_')
# TODO find NOUNS
return render_template('game1_template.html', alphabet=alphaBet,alphabet_full=alphaBet,words1=words)
#return render_template('game1_template.html', alphabet=alphaBet,alphabet_full=alphaBet,words1=words[0:3],words2=words[4:7],words3=words[8:11],words4=words[12:15])
@app.route('/game1/<path:filename>', methods=['GET'])
def loading_file_pic_g1(filename):
return send_from_directory(html_path, filename)
@app.route('/game1/index.html', methods=['GET'])
def g1_back_home():
return redirect(url_for('home'))
@app.route('/game2/<story_name>', methods=['GET'])
def generate_game2(story_name):
story_dir = compiled_loc + story_name + folder_sufix
words = getWords(story_dir, 'word_')
# TODO find NOUNS
return render_template('game2_template.html', words1=words[:4],words2=words[4:8],words3=words[3:10],words4=words[4:11])
@app.route('/game2/<path:filename>', methods=['GET'])
def loading_file_pic_g2(filename):
return send_from_directory(html_path, filename)
@app.route('/game2/index.html', methods=['GET'])
def g2_back_home():
return redirect(url_for('home'))
"""GAME 3"""
@app.route('/game3/<story_name>', methods=['GET','POST'])
def generate_game3(story_name):
story_dir = compiled_loc + story_name + folder_sufix
words = getWords(story_dir, 'word_')
# TODO find NOUNS
return render_template('game3_template.html', alphabet=alphaBet,alphabet_full=alphaBet,words1=words)
@app.route('/game3/<path:filename>', methods=['GET'])
def loading_file_pic_g3(filename):
return send_from_directory(html_path, filename)
@app.route('/game3/index.html', methods=['GET'])
def g3_back_home():
return redirect(url_for('home'))
"""IMAGE SCRAPER"""
"""
@app.route('/game3/<story_name>',methods=['GET'])
def generate_game3(story_name):
story_dir=compiled_loc + story_name+folder_sufix
words=getWords(story_dir,'word_')
#TODO find NOUNS
#get_pic(words)#image scrapper
return render_template('game3_template.html',words=words)
@app.route('/game3/<pic_name>/<path:filename>', methods=['GET'])
def loading_file_pic_g3(filename,pic_name):
return send_from_directory(pic_loc+'/'+pic_name,filename)
"""
"""pic scrapper"""
'''def get_pic(words):
webdriver_path = os.path.normpath(os.path.join(os.getcwd(), 'webdriver', webdriver_executable()))
image_path = pic_loc
# Add new search key into array ["cat","t-shirt","apple","orange","pear","fish"]
search_keys = words
# Parameters
number_of_images = 1
headless = False
min_resolution = (0, 0)
max_resolution = (9999, 9999)
# Main program
for search_key in search_keys:
# TODO if its already exist then skip
image_scrapper = GoogleImageScraper(webdriver_path, image_path, search_key, number_of_images, headless,
min_resolution, max_resolution)
image_urls = image_scrapper.find_image_urls()
image_scrapper.save_images(image_urls)
# Release resources
del image_scrapper
'''
"""GAME 4"""
@app.route('/game4/<story_name>', methods=['GET'])
def generate_game4(story_name, file=None):
#TODO add template game 4
#TODO add redirecter to game4
#TODO print build dic from metadata file
#TODO get the metadata folder, random of all of these that inside the folder
metaDataAudioDir=mypath+slash_clean+story_name+slash_clean+'audio'+slash_clean
audioVersions=dirinDir(metaDataAudioDir)
#TODO add expectaion
File=metaDataAudioDir+audioVersions[0]+slash_clean+'metadata_help.json'
Metadata = lara_utils.read_json_file(File)
meta_dic=[{}]
for m in Metadata:
meta_dic[0].update({m['text']:m['file']})
sentance=[]
sounds=[]
for key,value in meta_dic[0].items():
sentance.append(key)
sounds.append(value)
"""get random sentance"""
size_of_story=len(sentance)
rand_index=random.randint(0,size_of_story)
"""gather 4 random index for 4 wrong answers """
rand_i=rand_index
fake_answer=[]
for i in range(4):
rand_i=random.sample(range(0,size_of_story),4)
true_match=[sentance[rand_index],sounds[rand_index]]
bad_match=[sentance[rand_i[0]],sentance[rand_i[1]],sentance[rand_i[2]],sentance[rand_i[3]]]
print(true_match)
print(bad_match)
return render_template('game4_template.html',t_answer=true_match[0],question=true_match[1],fake_answer_0=bad_match[0],fake_answer_1=bad_match[1],fake_answer_2=bad_match[2],fake_answer_3=bad_match[3],name=story_name)
#return render_template('game4_template.html',meta_dic=meta_dic,alphabet=alphaBet,name=story_name)
@app.route('/game4/<story_name>/<path:filename>', methods=['GET','POST'])
def loading_file_pic_g4(filename,story_name):
metaDataAudioDir = mypath + slash_clean + story_name + slash_clean+'audio'+slash_clean
audioVersions = dirinDir(metaDataAudioDir)
# TODO add expectaion
# File = metaDataAudioDir + audioVersions[0] + '/'+filename
return send_from_directory(metaDataAudioDir+slash_clean+audioVersions[0], filename)
"""GAME 5"""
@app.route('/game5/<story_name>', methods=['GET'])
def generate_game5(story_name, file=None):
#TODO add template game 4
#TODO add redirecter to game4
#TODO print build dic from metadata file
#TODO get the metadata folder, random of all of these that inside the folder
metaDataAudioDir=mypath+slash_clean+story_name+slash_clean+'audio'+slash_clean
audioVersions=dirinDir(metaDataAudioDir)
#TODO add expectaion
File=metaDataAudioDir+audioVersions[0]+slash_clean+'metadata_help.json'
Metadata = lara_utils.read_json_file(File)
meta_dic=[{}]
for m in Metadata:
meta_dic[0].update({m['text']:m['file']})
sentance=[]
sounds=[]
for key,value in meta_dic[0].items():
sentance.append(key)
sounds.append(value)
"""get random sentance"""
size_of_story=len(sentance)
rand_index=random.randint(0,size_of_story)
"""gather 4 random index for 4 wrong answers """
rand_i=rand_index
fake_answer=[]
for i in range(4):
rand_i=random.sample(range(0,size_of_story),4)
true_match=[sentance[rand_index],sounds[rand_index]]
bad_match=[sounds[rand_i[0]],sounds[rand_i[1]],sounds[rand_i[2]],sounds[rand_i[3]]]
print(true_match)
print(bad_match)
return render_template('game5_template.html',t_answer=true_match[1],question=true_match[0],fake_answer_0=bad_match[0],fake_answer_1=bad_match[1],fake_answer_2=bad_match[2],fake_answer_3=bad_match[3],name=story_name)
@app.route('/game5/index.html', methods=['GET'])
def g4_back_home():
return redirect(url_for('home'))
@app.route('/game5/<story_name>/<path:filename>', methods=['GET','POST'])
def loading_file_pic_g5(filename,story_name):
metaDataAudioDir = mypath + slash_clean + story_name + slash_clean+'audio'+slash_clean
audioVersions = dirinDir(metaDataAudioDir)
# TODO add expectaion
# File = metaDataAudioDir + audioVersions[0] + '/'+filename
return send_from_directory(metaDataAudioDir+slash_clean+audioVersions[0], filename)
@app.route('/game5/index.html', methods=['GET'])
def g5_back_home():
return redirect(url_for('home'))
"""GAME 6"""
@app.route('/game6/<story_name>', methods=['GET'])
def generate_game6(story_name, file=None):
#TODO add template game 4
#TODO add redirecter to game4
#TODO print build dic from metadata file
#TODO get the metadata folder, random of all of these that inside the folder
metaDataAudioDir=mypath+slash_clean+story_name+slash_clean+'audio'+slash_clean
audioVersions=dirinDir(metaDataAudioDir)
#TODO add expectaion
File=metaDataAudioDir+audioVersions[0]+slash_clean+'metadata_help.json'
Metadata = lara_utils.read_json_file(File)
meta_dic=[{}]
for m in Metadata:
meta_dic[0].update({m['text']:m['file']})
sentance=[]
sounds=[]
for key,value in meta_dic[0].items():
sentance.append(key)
sounds.append(value)
"""get random sentance"""
size_of_story=len(sentance)
rand_index=random.randint(0,size_of_story)
"""gather 4 random index for 4 wrong answers """
rand_i=random.sample(range(0,size_of_story),4)
true_match=[sentance[rand_index],sounds[rand_index]]
bad_match=[sentance[rand_i[0]],sentance[rand_i[1]],sentance[rand_i[2]],sentance[rand_i[3]]]
#count the words in the sentance
words_ct=true_match[0].count(" ")
#pick random word between word set
rand_ct = random.sample(range(1, words_ct), 1)
# swap the word into [-------]
words_arr=true_match[0].split(' ')
true_word=words_arr[rand_ct[0]]#right anwser
words_arr[rand_ct[0]]="[--------]"
missing_sent=" ".join(words_arr)#missing sentance to question
#pick random words from the wrong sentance
bad_words=[]
for i in range(len(bad_match)):
bad_words_arr = bad_match[i].split(' ')
words_ct = bad_match[i].count(" ")
bad_rand_ct = random.sample(range(0, words_ct), 1)
bad_words.append(bad_words_arr[bad_rand_ct[0]])#bad answer
#send the right missing word and wrong words
print("q:"+missing_sent)
print("t_a:"+true_word)
print(bad_words)
return render_template('game6_template.html',t_answer=true_word,question0=missing_sent,question1=true_match[1],fake_answer_0=bad_words[0],fake_answer_1=bad_words[1],fake_answer_2=bad_words[2],fake_answer_3=bad_words[3],name=story_name)
@app.route('/game6/<story_name>/<path:filename>', methods=['GET','POST'])
def loading_file_pic_g6(filename,story_name):
metaDataAudioDir = mypath + slash_clean + story_name + slash_clean+'audio'+slash_clean
audioVersions = dirinDir(metaDataAudioDir)
# TODO add expectaion
# File = metaDataAudioDir + audioVersions[0] + '/'+filename
return send_from_directory(metaDataAudioDir+slash_clean+audioVersions[0], filename)
"""GAME 7"""
@app.route('/game7/<story_name>', methods=['GET'])
def generate_game7(story_name, file=None):
#TODO add template game 4
#TODO add redirecter to game4
#TODO print build dic from metadata file
#TODO get the metadata folder, random of all of these that inside the folder
metaDataAudioDir=mypath+slash_clean+story_name+slash_clean+'audio'+slash_clean
audioVersions=dirinDir(metaDataAudioDir)
#TODO add expectaion
File=metaDataAudioDir+audioVersions[0]+slash_clean+'metadata_help.json'
Metadata = lara_utils.read_json_file(File)
meta_dic=[{}]
for m in Metadata:
meta_dic[0].update({m['text']:m['file']})
sentance=[]
sounds=[]
for key,value in meta_dic[0].items():
sentance.append(key)
sounds.append(value)
"""get random sentance"""
size_of_story=len(sentance)
rand_index=random.randint(0,size_of_story)
"""gather 4 random index for 4 wrong answers """
rand_i=random.sample(range(0,size_of_story),4)
true_match=[sentance[rand_index],sounds[rand_index]]
bad_match=[sentance[rand_i[0]],sentance[rand_i[1]],sentance[rand_i[2]],sentance[rand_i[3]]]
#count the words in the sentance
words_ct=true_match[0].count(" ")
#pick random word between word set
rand_ct = random.sample(range(1, words_ct), 1)
# swap the word into [-------]
words_arr=true_match[0].split(' ')
true_word=words_arr[rand_ct[0]]#right anwser
words_arr[rand_ct[0]]="[--------]"
missing_sent=" ".join(words_arr)#missing sentance to question
#pick random words from the wrong sentance
bad_words=[]
for i in range(len(bad_match)):
bad_words_arr = bad_match[i].split(' ')
words_ct = bad_match[i].count(" ")
bad_rand_ct = random.sample(range(0, words_ct), 1)
bad_words.append(bad_words_arr[bad_rand_ct[0]])#bad answer
#send the right missing word and wrong words
print("q:"+missing_sent)
print("t_a:"+true_word)
print(bad_words)
return render_template('game7_template.html',t_answer=true_word,question0=missing_sent,question1=true_match[1],fake_answer_0=bad_words[0],fake_answer_1=bad_words[1],fake_answer_2=bad_words[2],fake_answer_3=bad_words[3],name=story_name)
@app.route('/game7/<story_name>/<path:filename>', methods=['GET','POST'])
def loading_file_pic_g7(filename,story_name):
metaDataAudioDir = mypath + slash_clean + story_name + slash_clean+'audio'+slash_clean
audioVersions = dirinDir(metaDataAudioDir)
# TODO add expectaion
# File = metaDataAudioDir + audioVersions[0] + '/'+filename
return send_from_directory(metaDataAudioDir+slash_clean+audioVersions[0], filename)
"""GAME 8"""
@app.route('/game8/<story_name>', methods=['GET'])
def generate_game8(story_name, file=None):
#TODO add template game 4
#TODO add redirecter to game4
#TODO print build dic from metadata file
#TODO get the metadata folder, random of all of these that inside the folder
metaDataAudioDir=mypath+slash_clean+story_name+slash_clean+'audio'+slash_clean
audioVersions=dirinDir(metaDataAudioDir)
#TODO add expectaion
File=metaDataAudioDir+audioVersions[0]+slash_clean+'metadata_help.json'
Metadata = lara_utils.read_json_file(File)
meta_dic=[{}]
for m in Metadata:
meta_dic[0].update({m['text']:m['file']})
sentance=[]
sounds=[]
for key,value in meta_dic[0].items():
sentance.append(key)
sounds.append(value)
"""get random sentance"""
size_of_story=len(sentance)
rand_index=random.randint(0,size_of_story)
"""gather 4 random index for 4 wrong answers """
rand_i=random.sample(range(0,size_of_story),4)
true_match=[sentance[rand_index],sounds[rand_index]]
bad_match=[sentance[rand_i[0]],sentance[rand_i[1]],sentance[rand_i[2]],sentance[rand_i[3]]]
#split the sentance
split_setance=true_match[0].split(" ",4)
print(split_setance)
random.shuffle(split_setance)
return render_template('game8_template.html',t_answer=true_match[0],question1=true_match[1],split_a0=split_setance[0],split_a1=split_setance[1],split_a2=split_setance[2],split_a3=split_setance[3],split_a4=split_setance[4],name=story_name)
@app.route('/game8/<story_name>/<path:filename>', methods=['GET','POST'])
def loading_file_pic_g8(filename,story_name):
metaDataAudioDir = mypath + slash_clean + story_name + slash_clean+'audio'+slash_clean
audioVersions = dirinDir(metaDataAudioDir)
# TODO add expectaion
# File = metaDataAudioDir + audioVersions[0] + '/'+filename
return send_from_directory(metaDataAudioDir+slash_clean+audioVersions[0], filename)
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
| 19,169 | 6,858 |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
def find_LCAs(parent):
LCA = dict() # This is the nested dictionary
def lca(u, v):
if u in list(LCA.keys()):
if v in list(LCA[u].keys()):
return
for i in list(parent[u]):
lca(i,v)
ul = [u]
def isu(u):
for i in list(parent.keys()):
if i in parent[u]:
ul.append(i)
isu(u)
for i in ul:
isu(i)
for i in ul:
if u in LCA.keys():
LCA[u].update({v : set(v)})
else:
LCA[u] = ({v : set(v)})
vl = [v]
def isv(v):
for i in list(parent.keys()):
if i in parent[v]:
vl.append(i)
isv(v)
for i in vl:
isv(i)
for i in vl:
if v in LCA.keys():
LCA[v].update({u : set(u)})
else:
LCA[v] = ({u : set(u)})
cal = list((set(ul) & set(vl)))
sl = []
for i in cal:
sl.extend(parent[i])
fl = []
for i in cal:
if i not in sl:
fl.append(i)
if u in LCA.keys():
LCA[u].update({v : set(fl)})
else:
LCA[u] = ({v : set(fl)})
if v in LCA.keys():
LCA[v].update({u : set(fl)})
else:
LCA[v] = ({u : set(fl)})
# This calls the recursive "lca" function on all pairs of nodes to populate the "LCA" dictionary
for u in parent:
for v in parent:
lca(u,v)
return LCA
# In[ ]:
| 1,708 | 572 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import time
from djitellopy import Tello
tello = Tello()
tello.connect()
tello.streamon()
frame_read = tello.get_frame_read()
#tello.takeoff()
time.sleep(1)
cv2.imwrite("picture.png", frame_read.frame)
#tello.land()
tello.end() | 290 | 123 |
# Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
from typing import Tuple, Optional
from typing import Union
import torch
import _k2
def index(src: Union[_k2.RaggedArc, _k2.RaggedInt],
indexes: torch.Tensor,
need_value_indexes: bool = True
) -> Tuple[Union[_k2.RaggedArc, _k2.RaggedInt], # noqa
Optional[torch.Tensor]]: # noqa
'''Indexing operation on ragged tensor, returns src[indexes], where
the elements of `indexes` are interpreted as indexes into axis 0 of
`src`.
Caution:
`indexes` is a 1-D tensor and `indexes.dtype == torch.int32`.
Args:
src:
Source ragged tensor to index.
indexes:
Array of indexes, which will be interpreted as indexes into axis 0
of `src`, i.e. with 0 <= indexes[i] < src.dim0().
need_value_indexes:
If true, it will return a torch.Tensor containing the indexes into
`src.values()` that `ans.values()` has,
as in `ans.values() = src.values()[value_indexes]`.
Returns:
Return a tuple containing:
- `ans` of type `_k2.RaggedArc` or `_k2.RaggedInt` (same as the type
of `src`).
- None if `need_value_indexes` is False; a 1-D torch.tensor of
dtype `torch.int32` containing the indexes into `src.values()` that
`ans.values()` has.
'''
ans, value_indexes = _k2.index(src=src,
indexes=indexes,
need_value_indexes=need_value_indexes)
return ans, value_indexes
| 1,654 | 536 |
# CAP-378 Trabalho: PAD em PDI
# Uso: mpiexec -n <NTASKS> python3 padempdi.py
import numpy as np
from mpi4py import MPI
wt = MPI.Wtime() # "wall time" para cálculo do tempo decorrido
comm = MPI.COMM_WORLD # comunicador global (pode servir para definir grupos)
cpu = comm.Get_size() # total de ranks que o mpi atribui
rank = comm.Get_rank() # rank é o no. que o mpi atribui ao processo
xlen = 118 # total de imagens
sseg = int( xlen / cpu ) # tamanho de um segmento
mseg = sseg + ( xlen % cpu ) # tamanho do maior segmento
# - 256, 256 é a imagem, e 4 é a qtde de canais.
# - The different color bands/channels are stored in the third dimension, such
# that a gray-image is MxN, an RGB-image MxNx3 and an RGBA-image MxNx4.
# - RGBA = Reg, Green, Blue, Alpha(transparência) → PNG
# - Cinza tem só (256, 256) que corresponde ao tamanho da imagem.
xsub = np.zeros((mseg, 256, 256, 4), dtype=np.float32) # área de trabalho
xprocessed = np.zeros((xlen, 256, 256), dtype=np.float32) # resultado
# O processo (rank) 0 lê o arquivo e distribui os segmentos para os ranks
# O rank 0 também processa um segmento
if rank == 0 :
x = np.load("data/map01.npy") # lê o arquivo com o conjunto de dados
xbatches = np.array_split(x, cpu) # divide os dados entre as cpus
xsub[0:len(xbatches[0])] = xbatches[0] # segmento que o rank 0 processa
for i in range(1, cpu) : # distribui os segmentos
# quando Send é upper-case usa buffers
comm.Send(xbatches[i], dest=i, tag=0) # envia um segmento
else : # os demais processos (ranks) recebem os segmentos
comm.Recv(xsub, source=0, tag=0)
# calcula os índices inicial e final de cada segmento, para cada rank
start = 0
if rank == cpu - 1 : # o último rank
end = mseg # fica com o maior segmento
else :
end = sseg # índice do final do segmento
# todos os ranks processam o seu segmento
# xprocessedsub fica com uma dimensão a menos (mseg, 256, 256)
xprocessedsub = np.zeros(xsub.shape[:-1])
# repete 10x o looping, apenas para fins de medição de tempo
for j in range(0,10) :
for i in range(start, end) :
# Grayscale
## xsub[i][...,:3] seleciona a imagem (256, 256, 3)
## np.dot faz a multiplicação e soma para converter
img_gray = np.dot(xsub[i][...,:3], [0.299, 0.587, 0.114])
# Normalization
img_gray_norm = img_gray / (img_gray.max() + 1)
xprocessedsub[i,...] = img_gray_norm
# xprocessedsub contém o segmento processado. O shape é (mseg, 256, 256)
# o rank 0 copia direto para o dataset final
if rank == 0 :
xprocessed[0:len(xprocessedsub)]=xprocessedsub
# os demais ranks retornam o segmento processado para o rank 0
else :
comm.Send(xprocessedsub, dest=0, tag=rank) # tag identifica quem mandou
# o rank 0 recebe os segmentos e os combina em um único dataset
# xprocessedsub do rank 0 já foi copiado e agora serve como armazen. temporário
if rank == 0 :
for i in range(1, cpu) :
status = MPI.Status()
# recebe um segmento
comm.Recv(xprocessedsub, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
rnk_sender = status.Get_source()
start= rnk_sender * sseg # índice para a posição correspondente
slen = sseg
# copia para o dataset final
# essa parte do codigo pode ser melhorada
xprocessed[start : start + len(xprocessedsub)] = xprocessedsub
# shape final incluindo o canal, que no caso de grey é 1
xprocessed.reshape(xprocessed.shape + (1,))
#grava em um arquivo para uso posterior
#np.save("data/map03.npy",xprocessed)
# cada rank mostra o tempo decorrido
print('Rank =', rank, ' Elapsed time =', MPI.Wtime() - wt, 's')
| 3,849 | 1,411 |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App for testing the DRAGON plugin."""
import armi
class DragonTestingApp(armi.apps.App):
"""App that adds only the DRAGON plugin for testing purposes."""
def __init__(self):
armi.apps.App.__init__(self)
# Only registering DRAGON, main purpose is for testing.
from terrapower.physics.neutronics.dragon.plugin import DragonPlugin
self._pm.register(DragonPlugin)
@property
def splashText(self):
return """
================================
== DRAGON Testing Application ==
================================
"""
| 1,167 | 343 |
import FWCore.ParameterSet.Config as cms
import DQM.HLTEvF.hltMonBTagIPSource_cfi
import DQM.HLTEvF.hltMonBTagMuSource_cfi
# definition of the Sources for 8E29
#hltMonBTagIP_Jet50U_Source = DQM.HLTEvF.hltMonBTagIPSource_cfi.hltMonBTagIPSource.clone()
#hltMonBTagMu_Jet10U_Source = DQM.HLTEvF.hltMonBTagMuSource_cfi.hltMonBTagMuSource.clone()
#hltMonBTag = cms.Path( hltMonBTagIP_Jet50U_Source + hltMonBTagMu_Jet10U_Source )
# simple b-tag monitor (can also just be included through HLTMonSimpleBTag_cff)
#import DQM.HLTEvF.HLTMonSimpleBTag_cfi
from DQM.HLTEvF.HLTMonSimpleBTag_cfi import *
hltMonBTag = cms.Path(hltMonSimpleBTag)
| 635 | 302 |
# 4. Text Filter
# Write a program that takes a text and a string of banned words.
# All words included in the ban list should be replaced with asterisks "*", equal to the word's length. ' \
# 'The entries in the ban list will be separated by a comma and space ", ".
# The ban list should be entered on the first input line and the text on the second input line.
banned_words = input().split(', ')
text = input()
for word in banned_words:
text = text.replace(word, '*' * len(word))
print(text)
| 510 | 149 |
import config.funcoes as cfunc
import config.saveload as csave
import config.janela as cjane
import config.gatos_ascii as cga
import objs.gatinho as ogato
import objs.geladeira as ogela
import objs.bau as obau
from random import randint, choice
from time import sleep
humores = ['feliz', 'triste', 'quieto', 'brincalhão', 'carinhoso', 'assustado', 'irritado']
class Main:
def __init__(self):
cfunc.ajustes_iniciais()
op = self.tela_inicial()
self.voltar = False
if op == '1':
objs = self.novo_gato()
elif op == '2':
objs = self.tela_carregar_gato()
elif op == '3':
exit()
if objs:
self.gato, self.gela, self.bau = objs
csave.salvar_jogo([self.gato, self.gela, self.bau])
else:
self.voltar = True
self.salvo = True
@staticmethod
def tela_inicial():
"""`Printa a tela inicial do jogo."""
fonte = [' /) ',
' |\\---/|(( ',
" | ° ° | )) ",
' \\_T_/_// ',
' ________ _______ _____ _{_}_ {_}____ ______ _______ ________ ',
'|_ _|| _ || | | || _ || ___|| _ ||_ _|',
' | | | |_| || - || |_| || | | |_| | | | ',
' | | | _ || _ _ || _ || |___ | _ | | | ',
' |____| |__| |__||__| |_| |__||__| |__||______||__| |__| |____| ']
botao = ['.-----------------------------.',
'| Aperte ENTER para jogar! |',
"'-----------------------------'"]
janela = cjane.Janela()
for i in range(len(fonte)):
janela.muda_linha(i + 1, fonte[i])
try:
janela.muda_linha(i + 15, botao[i])
except IndexError:
pass
janela.muda_linha(11, 'O MELHOR JOGO DO MUNDO!')
janela.muda_linha(21, '© RaGhu 2021 ', alin='rjust')
print(janela)
input() # para não dar para digitar nada no input além de enter
janela.muda_linha(15, '(1) Novo Jogo ')
janela.muda_linha(16, '(2) Carregar Jogo ')
janela.muda_linha(17, '(3) Sair ')
print(janela)
op = input('Digite a opção desejada: ')
while op not in ['1', '2', '3']:
print(janela)
op = input('Digite uma opção válida: ')
return op
def tela_carregar_gato(self):
gatos = csave.listar_saves()
if len(gatos) == 0:
janela = cjane.Janela()
janela.muda_linha(11, 'Você não possui nenhum gato, deseja criar um? (S)im ou (N)ão')
print(janela)
esc = input('>>> ').lower()
while esc != 's' and esc != 'n' and esc != 'sim' and esc != 'não' and esc != 'nao':
janela.muda_linha(12, 'Digite uma opção válida!')
print(janela)
esc = input('>>> ').lower()
if 's' in esc:
return self.novo_gato()
elif 'n':
return None
elif len(gatos) == 1:
save = csave.carregar_jogo(gatos[0].split(".")[0])
return save
elif len(gatos) > 1:
janela = cjane.JanelaTable({'##': 4, 'Gato': 54, 'Idade': 18})
gatitos = []
for i in range(len(gatos)):
ga, ge, ba = csave.carregar_jogo(gatos[i].split(".")[0])
gatitos.append([ga, ge, ba])
janela.add_linha([i+1, ga.nome, ga.mostrar_idade()])
janela.mostrar_janela(False)
esc = input('Digite o número do gato para carregar (ENTER para voltar): ').lower()
while esc != '' and (not esc.isnumeric() or int(esc) not in range(1, len(gatos)+1)):
janela.mostrar_janela(False)
esc = input('Digite uma opção válida: ').lower()
if esc != '':
return gatitos[int(esc)-1]
else:
return None
@staticmethod
def novo_gato():
"""Retorna um Gatinho, Geladeira e Bau para um gato inicial."""
gen_c = choice(['F', 'M'])
gen_r = choice(['F', 'M'])
if gen_c == 'F':
um_c = 'a'
letra_c = um_c
pron_c = um_c
else:
um_c = ''
letra_c = 'o'
pron_c = 'e'
if gen_r == 'F':
um_r = 'a'
letra_r = um_r
pron_r = um_r
else:
um_r = ''
letra_r = 'o'
pron_r = 'e'
textos1 = [' Você está pensando em ter um gato.',
f' Um amigo seu conhece alguém que está vendendo um{um_c} gat{letra_c} bonitinh{letra_c}.',
f' Mas também tem um{um_r} gat{letra_r} que sempre têm andado pela vizinhança,',
f' e el{pron_r} parece muito simpátic{letra_r}.',
' Por outro lado, também existe um abrigo de gatos perto da sua casa.']
cfunc.limpar_tela()
janela = cjane.Janela()
j = 1
i = 0
while i < len(textos1):
janela.muda_linha(j, textos1[i], 'ljust')
if i == 2:
j += 1
janela.muda_linha(j, textos1[i+1], 'ljust')
print(janela)
input('(Aperte ENTER para continuar...)')
j += 2
i += 1 if i != 2 else 2
janela.muda_linha(10, ' Você deseja (C)omprar, (R)esgatar ou (A)dotar o gato?', 'ljust')
print(janela)
escolha = input('>>> ')
while escolha.lower() != 'c' and escolha.lower() != 'r' and escolha.lower() != 'a' \
and escolha.lower() != 'comprar' and escolha.lower() != 'resgatar' and escolha.lower() != 'adotar':
janela.muda_linha(11, ' Digite uma opção válida!', 'ljust')
print(janela)
escolha = input('>>> ')
janela.limpar_janela()
v = 0
if escolha[0] in 'Cc':
janela.muda_linha(1, f' Você conversou com o conhecido do seu amigo e comprou {letra_c} gatinh{letra_c}!', 'ljust')
idade = randint(2, 12)
fome = 100
energia = randint(75, 100)
saude = 100
feliz = randint(80, 100)
vac = True
ga = ogato.Comprado('', idade, fome, energia, saude, feliz, gen_c, vac)
elif escolha[0] in 'Rr':
janela.muda_linha(1, f' Você resgatou {letra_r} gatinh{letra_r}. Agora el{pron_r} tem um dono!', 'ljust')
idade = randint(0, 180)
fome = randint(10, 100)
energia = randint(10, 90)
saude = randint(10, 50)
feliz = randint(10, 90)
vac = False
ga = ogato.Resgatado('', idade, fome, energia, saude, feliz, gen_r, vac)
else:
v = 1
janela.muda_linha(1, ' Você quer adotar um gatinh(o) ou uma gatinh(a)?', 'ljust')
print(janela)
i = input('>>> ')
while i.lower() != 'o' and i.lower() != 'a' and i.lower() != 'gatinho' and i.lower() != 'gatinha':
janela.muda_linha(2, ' Digite uma opção válida!', 'ljust')
print(janela)
i = input('>>> ')
if i[-1].lower() == 'a':
gen_a = 'F'
um_a = 'a'
letra_a = um_a
pron_a = um_a
elif i[-1].lower() == 'o':
gen_a = 'M'
um_a = ''
letra_a = 'o'
pron_a = 'e'
janela.muda_linha(2, f' - Gatinh{letra_a}', 'ljust')
print(janela)
sleep(1)
janela.muda_linha(4, f' Você vai adotar um{um_a} gat{letra_a} (F)ilhote, (A)dult{letra_a} ou (I)dos{letra_a}?', 'ljust')
print(janela)
i = input('>>> ')
while i.lower() != 'f' and i.lower() != 'a' and i.lower() != 'i' \
and i.lower() != 'filhote' and i.lower() != 'adulto' and i.lower() != 'idoso':
janela.muda_linha(5, ' Digite uma opção válida!', 'ljust')
print(janela)
i = input('>>>')
if i[0].lower() == 'f':
idade = randint(3, 12)
janela.muda_linha(5, ' - Filhote', 'ljust')
elif i[0].lower() == 'a':
idade = randint(13, 84)
janela.muda_linha(5, f' - Adult{letra_a}', 'ljust')
elif i[0].lower() == 'i':
idade = randint(85, 180)
janela.muda_linha(5, f' - Idos{letra_a}', 'ljust')
print(janela)
sleep(2)
janela.limpar_janela()
janela.muda_linha(1, f' Você foi até o abrigo e escolheu um{um_a} gatinh{letra_a}.', 'ljust')
janela.muda_linha(2, f' Ou será que foi el{pron_a} quem te escolheu?', 'ljust')
fome = randint(60, 100)
energia = randint(70, 100)
saude = randint(70, 90)
feliz = randint(80, 100)
vac = choice([True, True, True, False, False]) # True: 60%, False: 40%
ga = ogato.Adotado('', idade, fome, energia, saude, feliz, gen_a, vac)
print(janela)
input('(Aperte ENTER para continuar...)')
l = ga.gens['letra']
p = ga.gens['pron']
janela.muda_linha(3+v, f' Hora de uma decisão difícil... Qual vai ser o nome del{p}?', 'ljust')
print(janela)
nome = input('>>> ')
while not cfunc.verificar_nome(nome):
if cfunc.existe_save(nome):
gatolino = csave.carregar_jogo(nome)[0]
l_antigo = gatolino.gens['letra']
p_antigo = gatolino.gens['pron']
janela.muda_linha(4+v, f' Ess{p_antigo} gatinh{l_antigo} já existe! Escolha outro nome.', 'ljust')
else:
janela.muda_linha(4+v, ' Digite um nome válido (e com tamanho menor que 32)!', 'ljust')
print(janela)
nome = input('>>> ')
ga.nome = nome
ge = ogela.Geladeira()
ba = obau.Bau()
return ga, ge, ba
def menu(self, gato_img):
"""Imprime as características do gato."""
acoes = ['', 'Ver geladeira',
'Comer', '',
'Ver baú',
'Brincar'
]
acoes_jogo = ['Salvar o jogo',
f'Abandonar {self.gato.gens["letra"]} gat{self.gato.gens["letra"]} :(',
'Sair'
]
janela = cjane.JanelaMenu(gato_img, acoes, acoes_jogo, self.gato)
print(janela)
def mostra_gela(self):
"""Mostra todos os alimentos da geladeira, em ordem decrescente de magnitude do saciamento."""
cfunc.mudar_titulo('Geladeira')
janela = cjane.JanelaTable({'QTE.': 6, 'Nome': 36, 'Tipo': 15, 'Fome': 8, 'Saúde': 9})
for comida in self.gela.comidasort():
linha = [self.gela[comida.nome][1], comida.nome, comida.__class__.__name__,
comida.saciar, comida.saude]
janela.add_linha(linha)
janela.mostrar_janela()
def mostrar_bau(self):
"""Mostra todos os brinquedos do baú.
Tipos diferentes: ordem decrescente, por felicidade.
Mesmo tipo: ordem crescente, por durabilidade."""
cfunc.mudar_titulo('Baú')
janela = cjane.JanelaTable({'Nome': 32, 'Felicidade': 22, 'Usos restantes': 22})
for brinquedo in self.bau.brinquedosort():
for brinqs in sorted(self.bau[brinquedo.nome]):
brinq = [brinqs.nome, brinqs.feliz, brinqs.dura]
janela.add_linha(brinq)
janela.mostrar_janela()
def brincar(self):
"""Ações principais da ação brincar no menu."""
cfunc.mudar_titulo('Escolher brinquedo')
janela = cjane.JanelaTable({'##': 4, 'Nome': 58, 'Felicidade': 14})
# imprime os brinquedos disponíveis para brincar em ordem de felicidade
brinqs = self.bau.brinquedosort()
for i in range(len(brinqs)):
janela.add_linha([i+1, brinqs[i].nome, brinqs[i].feliz])
janela.mostrar_janela(show_input=False)
brinq = input('Digite o número do brinquedo para jogar (ENTER para voltar): ')
while brinq != '' and (not brinq.isnumeric() or int(brinq) not in range(1, len(brinqs)+1)):
janela.mostrar_janela(show_input=False)
if not brinq.isnumeric():
brinq = input('Digite um valor numérico (ENTER para voltar): ')
else:
brinq = input('Digite um número válido (ENTER para voltar): ')
if brinq != '':
# seleciona o brinquedo com menor durabilidade dentre os do tipo escolhido para brincar
brinq_nome = brinqs[int(brinq) - 1].nome
menor_dura = min(self.bau[brinq_nome])
cfunc.mudar_titulo(f'Brincando com {brinq_nome}')
self.gato.brincar(self.bau, menor_dura)
return True
else:
return False
def comer(self):
cfunc.mudar_titulo('Escolher comida')
comidas_tipos = self.gela.comida_por_classe()
tipos = list(comidas_tipos.keys())
janela_tipos = cjane.JanelaTable({'##': 4, 'Tipo': 73})
for i in range(len(tipos)):
janela_tipos.add_linha([i+1, tipos[i]])
janela_tipos.mostrar_janela(show_input=False)
tipo_index = input('Digite o número do tipo de comida para comer (ENTER para voltar): ')
while tipo_index != '' and (not tipo_index.isnumeric() or int(tipo_index) not in range(1, len(tipos)+1)):
janela_tipos.mostrar_janela(show_input=False)
if not tipo_index.isnumeric():
tipo_index = input('Digite um valor numérico (ENTER para voltar): ')
else:
tipo_index = input('Digite um número válido (ENTER para voltar): ')
if tipo_index != '':
tipo = tipos[int(tipo_index)-1]
comidas = comidas_tipos[tipo]
janela = cjane.JanelaTable({'##': 4, 'Nome': 50, 'Fome': 10, 'Saúde': 11})
for i in range(len(comidas)):
janela.add_linha([i+1, comidas[i].nome, comidas[i].saciar, comidas[i].saude])
janela.mostrar_janela(show_input=False)
comida_index = input('Digite o número da comida para comer (ENTER para voltar ao menu): ')
while comida_index != '' and (not comida_index.isnumeric() or int(comida_index) not in range(1, len(comidas)+1)):
janela_tipos.mostrar_janela(show_input=False)
if not comida_index.isnumeric():
comida_index = input('Digite um valor numérico (ENTER para voltar ao menu): ')
else:
comida_index = input('Digite um número válido (ENTER para voltar ao menu): ')
if comida_index != '':
comida = comidas[int(comida_index)-1]
cfunc.mudar_titulo(f'Comendo {comida.nome}')
self.gato.comer(self.gela, comida)
return True
else:
return False
else:
return False
def run_game(self):
while True:
cfunc.mudar_titulo('Menu')
cfunc.limpar_tela()
self.menu(gato_img=cga.gatitos['Padrão'])
esc = input('>>> ')
if esc == '1':
# Ver geladeira
cfunc.limpar_tela()
self.mostra_gela()
elif esc == '2':
cfunc.limpar_tela()
if self.comer():
self.salvo = False
elif esc == '3':
# Ver bau
cfunc.limpar_tela()
self.mostrar_bau()
elif esc == '4':
cfunc.limpar_tela()
if self.brincar():
self.salvo = False
elif esc == '5':
# Salvar jogo
cfunc.limpar_tela()
csave.salvar_jogo([self.gato, self.gela, self.bau])
self.salvo = True
cfunc.janela_salvar()
sleep(1)
elif esc == '6':
# Deletar jogo (abandonar gato)
cfunc.limpar_tela()
if cfunc.janela_deletar():
break
elif esc == '7':
# Sair do jogo
cfunc.limpar_tela()
if cfunc.janela_sair(self.salvo, self.gato, self.gela, self.bau):
break
elif esc.lower() == 'creditos' or esc.lower() == 'créditos':
cfunc.limpar_tela()
cfunc.janela_creditos()
else:
continue
if __name__ == '__main__':
game = Main()
while game.voltar:
game = Main()
game.run_game()
| 18,009 | 6,150 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
# Copyright 1998-2018 by authors (see AUTHORS.txt)
#
# This file is part of LuxCoreRender.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import socket
import threading
import functools
import pyluxcoretools.utils.loghandler as loghandler
logger = logging.getLogger(loghandler.loggerName + ".netbeacon")
BROADCAST_PORT = 18019
class NetBeaconSender:
def __init__(self, ipAddress, port, broadCastAddress, period=3.0):
self.socket = None
self.thread = None
self.ipAddress = ipAddress
self.port = port
self.broadCastAddress = broadCastAddress
self.period = period
def Start(self):
# Create the socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Create the thread
self.thread = threading.Thread(target=functools.partial(NetBeaconSender.__BeaconThread, self))
self.thread.name = "NetBeaconSenderThread"
# Run the thread
self.stopEvent = threading.Event()
self.thread.start()
def Stop(self):
self.stopEvent.set()
self.thread.join(5.0)
self.socket.close()
def __BeaconThread(self):
logger.info("NetBeaconSender thread started.")
pingMsg = bytearray((
"LUXNETPING\n" +
str(self.ipAddress) + "\n" +
str(self.port) + "\n"
).encode("utf-8"))
while not self.stopEvent.is_set():
logger.debug("NetBeaconSender LUXNETPING sent: " + str(pingMsg))
self.socket.sendto(pingMsg, (self.broadCastAddress, BROADCAST_PORT))
self.stopEvent.wait(self.period)
logger.info("NetBeaconSender thread done.")
class NetBeaconReceiver:
def __init__(self, callBack):
self.socket = None
self.thread = None
self.callBack = callBack
def Start(self):
# Create the socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket.settimeout(1)
self.socket.bind(('', BROADCAST_PORT))
# Create the thread
self.thread = threading.Thread(target=functools.partial(NetBeaconReceiver.__BeaconThread, self))
self.thread.name = "NetBeaconReceiverThread"
# Run the thread
self.stopEvent = threading.Event()
self.thread.start()
def Stop(self):
self.stopEvent.set()
self.thread.join()
# Shutdown can not be used with UDP sockets so I can not wakeup
# the thread form the socket.recvfrom()
#self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
def __BeaconThread(self):
logger.info("NetBeaconReceiver thread started.")
try:
while not self.stopEvent.is_set():
try:
data, whereFrom = self.socket.recvfrom(4096)
if (not data):
break
except socket.timeout:
continue
logger.debug("NetBeaconReceiver LUXNETPING received from " + str(whereFrom) + ": " + str(data))
tag, ipAddress, port, _ = data.decode("utf-8").split("\n")
if (tag != "LUXNETPING"):
continue
if (ipAddress == ""):
ipAddress = str(whereFrom[0])
self.callBack(ipAddress, int(port))
except Exception as e:
logger.info("BeaconThread exception:")
logger.exception(e)
logger.info("NetBeaconReceiver thread done.")
| 3,837 | 1,412 |
from flask import render_template
from .src.app import create_app
db_uri = (
"mongodb+srv://dbAdmin:Ve08ByJJOk5RNhWK@clusterlms.k10xd.mongodb.net/lms"
)
app = create_app(db_uri)
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def index(path):
return render_template("index.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
| 389 | 166 |
import json
import torch
import sys
from common_utils import transform_audio
from engine.data import load_wav, log_mel_spectrogram, plot_mel, plot_attn
from engine.models import load_pretrained_wav2vec
from vocoder.env import AttrDict
sys.path.append("./vocoder")
from vocoder.models import Generator
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ckpt_path = "./fragmentvc.pt"
wav2vec_path = "facebook/wav2vec2-base"
vocoder_path = "./generator.pt"
vocoder_config_path = "./generator_config.json"
preemph = 0.97
sample_rate = 16000
n_mels = 80
n_fft = 1280
hop_len = 320
win_len = 1280
f_min = 50
f_max = None
def convert(src_wav, tgt_wav):
wav2vec = load_pretrained_wav2vec(wav2vec_path).to(device)
print("[INFO] Wav2Vec is loaded from", wav2vec_path)
model = torch.jit.load(ckpt_path).to(device).eval()
print("[INFO] FragmentVC is loaded from", ckpt_path)
vocoder_config = json.loads(open(vocoder_config_path).read())
vocoder = Generator(AttrDict(vocoder_config)).to(device).eval()
vocoder_state_dict = torch.load(vocoder_path, map_location=device)
vocoder.load_state_dict(vocoder_state_dict['generator'])
print("[INFO] Vocoder is loaded from", vocoder_path)
src_wav = torch.FloatTensor(src_wav).unsqueeze(0).to(device)
print("[INFO] source waveform shape:", src_wav.shape)
tgt_mel = log_mel_spectrogram(
tgt_wav, preemph, sample_rate, n_mels, n_fft, hop_len, win_len, f_min, f_max
)
tgt_mel = torch.FloatTensor(tgt_mel.T).unsqueeze(0).to(device)
print("[INFO] target spectrograms shape:", tgt_mel.shape)
with torch.no_grad():
src_feat = wav2vec.extract_features(src_wav, None)[0]
print("[INFO] source Wav2Vec feature shape:", src_feat.shape)
out_mel, _ = model(src_feat, tgt_mel)
print("[INFO] converted spectrogram shape:", out_mel.shape)
out_wav = vocoder(out_mel).squeeze()
out_wav = out_wav.cpu().numpy()
print("[INFO] generated waveform shape:", out_wav.shape)
return out_wav
def get_prediction(src, tgt):
result_wav = convert(src, tgt)
# try:
# result_wav = convert(src, tgt)
# except Exception:
# print(Exception)
# return 0, 'error'
return result_wav
| 2,189 | 894 |
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi_sqlalchemy import DBSessionMiddleware
from .router import router
from .settings import get_settings
app = FastAPI()
app.add_middleware(DBSessionMiddleware, db_url=get_settings().DB_DSN)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(router)
| 452 | 150 |
""" OpenMDAO GUI
This graphical user interface for OpenMDAO is implemented as a web application.
Running this file will start a tornado web server on a local port and open a
browser on that port. An up-to-date version of Chrome or Firefox with support
for WebSockets is required.
"""
import os
import signal
import socket
import sys
import threading
import time
from argparse import ArgumentParser
from zmq.eventloop import ioloop
# tornado
from tornado import httpserver, web
# openmdao
from openmdao.util.network import get_unused_ip_port
from openmdao.util.fileutil import get_ancestor_dir, is_dev_build
from openmdao.util.log import enable_console
from openmdao.gui.util import ensure_dir, launch_browser
from openmdao.gui.projectdb import Projects
from openmdao.gui.session import TornadoSessionManager
from openmdao.gui.zmqservermanager import ZMQServerManager
from openmdao.gui.handlers import LoginHandler, LogoutHandler, \
ExitHandler, PluginDocsHandler
import openmdao.gui.handlers_projectdb as proj
import openmdao.gui.handlers_workspace as wksp
debug = True
def DEBUG(msg):
if debug:
print '<<<' + str(os.getpid()) + '>>> OMG --', msg
sys.stdout.flush()
def get_user_dir():
""" Return user's GUI directory. """
user_dir = os.path.expanduser("~/.openmdao/gui/")
ensure_dir(user_dir)
return user_dir
class App(web.Application):
''' Openmdao web application.
Extends tornado web app with URL mappings, settings and server manager.
'''
def __init__(self, secret=None, external=False):
# locate the docs, so that the /docs url will point to the appropriate
# docs, either for the current release or the current development build
if is_dev_build():
docpath = os.path.join(get_ancestor_dir(sys.executable, 3), 'docs',
'_build', 'html')
else:
import openmdao.main
docpath = os.path.join(os.path.dirname(openmdao.main.__file__), 'docs')
handlers = [
web.url(r'/', web.RedirectHandler, {'url': '/projects', 'permanent': False}),
web.url(r'/login', LoginHandler),
web.url(r'/logout', LogoutHandler),
web.url(r'/exit', ExitHandler),
web.url(r'/docs/plugins/(.*)', PluginDocsHandler, {'route': '/docs/plugins/'}),
web.url(r'/docs/(.*)', web.StaticFileHandler, {'path': docpath, 'default_filename': 'index.html'})
]
handlers.extend(proj.handlers)
handlers.extend(wksp.handlers)
if secret is None:
secret = os.urandom(1024)
app_path = os.path.dirname(os.path.abspath(__file__))
app_settings = {
'login_url': '/login',
'static_path': os.path.join(app_path, 'static'),
'template_path': os.path.join(app_path, 'templates'),
'cookie_secret': secret,
'debug': True,
}
user_dir = get_user_dir()
self.project_dir = os.path.join(user_dir, 'projects')
ensure_dir(self.project_dir)
session_dir = os.path.join(user_dir, 'sessions')
ensure_dir(session_dir)
self.session_manager = TornadoSessionManager(secret, session_dir)
self.server_manager = ZMQServerManager('openmdao.gui.consoleserver.ConsoleServer', external)
# External termination normally only used during GUI testing.
if sys.platform == 'win32':
# Fake SIGTERM by polling for a .sigterm file.
self._exit_requested = False
self._poller = threading.Thread(target=self._sigterm_poller,
name='SIGTERM poller')
self._poller.daemon = True
self._poller.start()
else:
signal.signal(signal.SIGTERM, self._sigterm_handler)
super(App, self).__init__(handlers, **app_settings)
def _sigterm_poller(self):
""" On Windows, poll for an external termination request file. """
sigfile = os.path.join(os.getcwd(), 'SIGTERM.txt')
while not self._exit_requested:
time.sleep(1)
if os.path.exists(sigfile):
DEBUG('Detected SIGTERM, shutting down...')
self._shutdown()
break
def _sigterm_handler(self, signum, frame):
""" On Linux/OS X, handle SIGTERM signal. """
DEBUG('Received SIGTERM, shutting down...')
self._shutdown()
def exit(self):
""" Shutdown. """
DEBUG('Exit requested, shutting down...')
if sys.platform == 'win32':
self._exit_requested = True
self._poller.join(3)
self._shutdown()
def _shutdown(self):
""" Stop all subprocesses and exit. """
self.server_manager.cleanup()
ioloop.IOLoop.instance().add_timeout(time.time() + 5, sys.exit)
class AppServer(object):
''' Openmdao web application server.
Wraps tornado web app, runs http server, and opens browser.
'''
def __init__(self, options):
self.options = options
user_dir = get_user_dir()
# initialize some settings
database = os.path.join(user_dir, 'projects.db')
if options.reset or not os.path.exists(database):
print "Resetting project database..."
if os.path.exists(database):
print "Deleting existing project database..."
os.remove(database)
pdb = Projects(database)
pdb.create()
options.orig_port = options.port
if (options.port < 1):
options.port = get_unused_ip_port()
# save secret between restarts
secret_file = os.path.join(user_dir, 'secret')
if os.path.exists(secret_file):
secret = open(secret_file, 'rb').read()
else:
secret = os.urandom(1024)
open(secret_file, 'wb').write(secret)
self.app = App(secret, options.external)
def serve(self):
''' Start server listening on port, launch browser if requested,
and start the ioloop.
'''
self.http_server = httpserver.HTTPServer(self.app)
for retry in range(3):
try:
if self.options.external:
self.http_server.listen(self.options.port)
else:
self.http_server.listen(self.options.port, address='localhost')
except socket.error:
# Possibly 'Address already in use', try finding another port.
if self.options.orig_port < 1 and retry < 2:
self.options.port = get_unused_ip_port()
else:
raise
else:
break
if not self.options.serveronly:
launch_browser(self.options.port, self.options.browser)
if self.options.external:
print '***********************************************************'
print '** WARNING: You have exposed the server to the external **'
print '** network. THIS IS NOT SAFE!! Clients will **'
print '** have access to a command prompt on the host **'
print '** computer with the identity and privileges of **'
print '** the userid under which the server was run. **'
print '** **'
print '** This is very dangerous and you should NOT do it. **'
print '** You exercise this option at your own risk!!! **'
print '** (Ctrl-C to terminate server) **'
print '***********************************************************'
DEBUG('Serving on port %d' % self.options.port)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
DEBUG('interrupt received, shutting down.')
@staticmethod
def get_argument_parser():
''' create a parser for command line arguments
'''
parser = ArgumentParser(description='launch the graphical user interface')
parser.add_argument('-p', '--port', type=int, dest='port', default=0,
help='port to run server on (defaults to any available port)')
parser.add_argument('-b', '--browser', dest='browser', default='chrome',
help='preferred browser')
parser.add_argument('-s', '--server', action='store_true', dest='serveronly',
help="don't launch browser, just run server")
parser.add_argument('-r', '--reset', action='store_true', dest='reset',
help='reset project database')
parser.add_argument('-x', '--external', action='store_true', dest='external',
help='allow access to server from external clients (WARNING: Not Safe or Secure!!)')
return parser
def get_argument_parser():
''' Shortcut to AppServer argument parser.
'''
return AppServer.get_argument_parser()
def run(parser=None, options=None, args=None):
''' Launch the GUI with specified options.
'''
# install zmq ioloop before creating any tornado objects
ioloop.install()
# create the server and kick it off
server = AppServer(options)
server.serve()
def main():
''' Process command line arguments and run.
'''
enable_console()
parser = AppServer.get_argument_parser()
options, args = parser.parse_known_args()
run(parser, options, args)
if __name__ == '__main__':
main()
| 9,761 | 2,749 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import binascii
import csv
import datetime
import os
import struct
import sys
import nfc
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/nfcpy')
num_blocks = 20
service_code = 0x090f
class StationRecord(object):
db = None
def __init__(self, row):
self.area_key = int(row[0], 10)
self.line_key = int(row[1], 10)
self.station_key = int(row[2], 10)
self.company_value = row[3]
self.line_value = row[4]
self.station_value = row[5]
@classmethod
def get_none(cls):
# 駅データが見つからないときに使う
return cls(["0", "0", "0", "None", "None", "None"])
@classmethod
def get_db(cls, filename):
# 駅データのcsvを読み込んでキャッシュする
if cls.db == None:
cls.db = []
for row in csv.reader(open(filename, 'rU'),
delimiter=',',
dialect=csv.excel_tab):
cls.db.append(cls(row))
return cls.db
@classmethod
def get_station(cls, line_key, station_key):
# 線区コードと駅コードに対応するStationRecordを検索する
import os
station_code_path = os.path.dirname(os.path.abspath(__file__)) + "/StationCode.csv"
for station in cls.get_db(station_code_path):
if station.line_key == line_key and station.station_key == station_key:
return station
return cls.get_none()
class HistoryRecord(object):
def __init__(self, data):
# ビッグエンディアンでバイト列を解釈する
row_be = struct.unpack('>2B2H4BH4B', data)
# リトルエンディアンでバイト列を解釈する
row_le = struct.unpack('<2B2H4BH4B', data)
self.db = None
self.console = self.get_console(row_be[0])
self.process = self.get_process(row_be[1])
self.year = self.get_year(row_be[3]) + 2000
self.month = self.get_month(row_be[3])
self.day = self.get_day(row_be[3])
self.balance = row_le[8]
self.in_station = StationRecord.get_station(row_be[4], row_be[5])
self.out_station = StationRecord.get_station(row_be[6], row_be[7])
@classmethod
def get_console(cls, key):
# よく使われそうなもののみ対応
return {
0x03: "精算機",
0x04: "携帯型端末",
0x05: "車載端末",
0x12: "券売機",
0x16: "改札機",
0x1c: "乗継精算機",
0xc8: "自販機",
}.get(key)
@classmethod
def get_process(cls, key):
# よく使われそうなもののみ対応
return {
0x01: "運賃支払",
0x14: "運賃支払(入場時オートチャージ)",
0x15: "運賃支払(退場時オートチャージ)",
0x02: "チャージ",
0x0f: "バス",
0x46: "物販",
}.get(key)
@classmethod
def get_year(cls, date):
return (date >> 9) & 0x7f
@classmethod
def get_month(cls, date):
return (date >> 5) & 0x0f
@classmethod
def get_day(cls, date):
return (date >> 0) & 0x1f
class Station():
def __init__(self, station, company, line):
self.station = station
self.company = company
self.line = line
class SuicaRecord():
def __init__(self, history):
self.console = history.console
self.process = history.process
self.date = datetime.datetime(history.year, history.month, history.day)
self.in_station = Station(history.in_station.station_value,
history.in_station.company_value,
history.in_station.line_value)
self.out_station = Station(history.out_station.station_value,
history.out_station.company_value,
history.out_station.line_value)
self.balance = history.balance
self.payment = 0
class Suica():
def __init__(self):
clf = nfc.ContactlessFrontend('usb')
self.data = []
clf.connect(rdwr={'on-connect': self.__connected})
self.__calculate_payment()
self.data = self.data[1:]
self.data = self.data[::-1]
def __calculate_payment(self):
for record_, record in zip(self.data[:-1], self.data[1:]):
record.payment = record.balance - record_.balance
def __connected(self, tag):
if not isinstance(tag, nfc.tag.tt3.Type3Tag):
print("error: tag isn't Type3Tag")
return
try:
sc = nfc.tag.tt3.ServiceCode(service_code >> 6,
service_code & 0x3f)
for i in range(num_blocks):
bc = nfc.tag.tt3.BlockCode(i, service=0)
data = tag.read_without_encryption([sc], [bc])
history = HistoryRecord(bytes(data))
self.data.append(SuicaRecord(history))
except Exception as e:
print("error: %s" % e)
if __name__ == "__main__":
suica = Suica()
for d in suica.data:
print()
print("支払い: %s円" % d.payment)
print("端末種: %s" % d.console)
print("処理: %s" % d.process)
print("日付: %02d-%02d-%02d" % (d.date.year, d.date.month, d.date.day))
print("入線区: %s-%s" % (d.in_station.company, d.in_station.line))
print("入駅順: %s" % d.in_station.station)
print("出線区: %s-%s" % (d.out_station.company, d.out_station.line))
print("出駅順: %s" % d.out_station.station)
print("残高: %d" % d.balance)
| 5,412 | 2,069 |
from datetime import datetime
import marshmallow as ma
import sqlalchemy as sa
from marshmallow import fields as f
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
metadata = MetaData()
Session = scoped_session(sessionmaker())
Base = declarative_base(metadata=metadata)
def reflect_field_to_column(fd: f.Field):
ct = sa.String()
if isinstance(fd, f.Decimal):
ct = sa.Numeric(scale=fd.places, decimal_return_scale=fd.places)
elif isinstance(fd, f.Bool):
ct = sa.Boolean()
elif isinstance(fd, f.DateTime):
ct = sa.DateTime()
elif isinstance(fd, f.Date):
ct = sa.Date()
elif isinstance(fd, f.Float):
ct = sa.Float()
elif isinstance(fd, f.Int):
ct = sa.Integer()
return sa.Column(fd.name, ct, nullable=not fd.required, default=fd.default)
def extract_columns_from_schema(schema: ma.Schema):
fields = schema.fields
""":type: list[f.Field]"""
columns = []
for k, field in fields.items():
col = reflect_field_to_column(field)
columns.append(col)
return columns
def make_table_from_schema(name, schema: ma.Schema):
basic_model_columns = (
sa.Column('pk', sa.String(), primary_key=True, nullable=False, unique=True),
# если дата вставки и дата обновления отличаются
# это повод выкинуть операцию в результаты сверки
sa.Column('date_create', sa.DateTime, default=datetime.now),
sa.Column('last_update', sa.DateTime, onupdate=datetime.now),
)
additional_cols = extract_columns_from_schema(schema)
table = sa.Table(name, metadata, *basic_model_columns, *additional_cols)
return table
| 1,760 | 579 |
#!/usr/bin/python3.7
import sys, json, os, stripe
from datetime import timedelta, datetime
from flask import Flask, render_template, redirect, request, escape, jsonify, flash, current_app
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user
from flask_wtf import CSRFProtect
# Import all the things
from setup_app import app
from frontend_action import FrontendAction
from service_calls.call_notifications_service import notification_api
from service_calls.call_user_service import user_api
from service_calls.call_stripe_service import stripe_api
csrf = CSRFProtect(app)
app.register_blueprint(notification_api)
app.register_blueprint(user_api)
app.register_blueprint(stripe_api)
action = FrontendAction(app)
@app.route("/")
def home():
variables = dict(is_authenticated=current_user.is_authenticated)
return render_template('index.html', **variables)
@app.route("/login_page")
def login_page():
if current_user.is_authenticated:
return redirect('/dashboard', code=302)
return render_template('login_page.html')
@app.route("/dashboard")
@login_required
def dashboard():
trial_period = timedelta(days=app.config['TRIAL_LENGTH_DAYS'])
sub_active = action.is_user_subscription_active(False)
notifications, notifications_for_display = action.get_unread_notifications(current_user.id)
variables = dict(name=current_user.name,
expire_date=current_user.created_date + trial_period,
user_is_paying=sub_active,
notifications=notifications_for_display,
n_messages=len(notifications))
return render_template('dashboard.html', **variables)
@app.route("/billing")
@login_required
def billing():
sub_active, show_reactivate, sub_cancelled_at = action.is_user_subscription_active()
stripe_objs = action.get_all_stripe_subscriptions_by_user_id(current_user.id)
sub_dict = action.subscriptions_to_json(stripe_objs)
notifications, notifications_for_display = action.get_unread_notifications(current_user.id)
variables = dict(subscription_active=sub_active,
name=current_user.name,
show_reactivate=show_reactivate,
subscription_cancelled_at=sub_cancelled_at,
subscription_data=sub_dict,
notifications=notifications_for_display,
n_messages=len(notifications))
return render_template('billing.html', **variables)
@app.route("/notifications")
@login_required
def notifications_center():
all_notifications = action.get_all_notifications_by_user_id(current_user.id)
notifications, notifications_for_display = action.get_unread_notifications(current_user.id)
variables = dict(name=current_user.name,
notifications=notifications_for_display,
all_notifications=all_notifications,
n_messages=len(notifications))
return render_template('notifications.html', **variables)
@app.route("/tos")
def terms_of_service():
variables = dict(is_authenticated=current_user.is_authenticated)
return render_template('terms_of_service.html', **variables)
@app.route("/logout")
def logout():
if current_user.is_authenticated == True:
current_user.is_authenticated = False
logout_user()
return redirect('/', code=302)
@app.errorhandler(401)
def not_logged_in(e):
variables = dict(message='Please login first')
return render_template('login_page.html', **variables)
@app.errorhandler(404)
def not_found(e):
variables = dict(is_authenticated=current_user.is_authenticated,
message = '404 Page Not Found',
stacktrace = str(e))
return render_template('error.html', **variables)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=app.config['FRONTEND_PORT']) | 3,963 | 1,200 |
"""Create a sphere."""
import pathlib
import sys
import petibmpy
rootdir = pathlib.Path(__file__).absolute().parents[5]
sys.path.insert(0, str(rootdir / 'misc'))
import icosphere
R = 0.5
sphere = icosphere.create_icosphere(25)
sphere.vertices *= R
sphere.print_info()
x, y, z = sphere.vertices.T
# Center the sphere at (-5.0, 0.0, 0.0)
x += -5.0
simudir = pathlib.Path(__file__).absolute().parents[1]
filepath = simudir / 'sphere.body'
petibmpy.write_body(filepath, x, y, z)
| 483 | 205 |
import context as HP
import os
OT = '2018_1101_B'
if (os.name == 'nt'):
pathData = '{}{}/{}'.format(
'//SERVIDORSQL/Datos/Desarrollos y pruebas/',
'Automatitzacio/Dades Proves/Termoparell',
OT)
pathPlot = '{}{}/{}'.format(
'//SERVIDORSQL/Datos/Desarrollos y pruebas/',
'Automatitzacio/Dades Proves/Termoparell',
OT)
else:
pathData = '/home/pi/results/therm/{}'.format(OT)
pathPlot = '/home/pi/results/plots/{}'.format(OT)
try:
plotter = HP.HandyPlotter()
allPlots = True
whatTc = {
'L1': 'Termopar A',
}
if (not allPlots):
plotter.plot_all(
pathData=pathData,
pathPlot=pathPlot,
find=whatTc['A'],
)
else:
find = {
'2018_1101_B': 'Primera Hornada Ejercicio 2018 Orden Trabajo 1101 (B)',
}
for i in find:
plotter.plot_all(
pathData=pathData,
pathPlot=pathPlot,
find={'tag': i, 'title': find[i]},
naming='column',
xPos=1,
yPos=[i for i in range(2, 14)], # [2, 5, 8, 12],
xLabel='Tiempo [min]',
yLabel='Temperatura [ºC]',
yLim=(0, 180.05),
xTicks=(0, 300.05, 20),
yTicks=(0, 180.05, 10),
)
except KeyboardInterrupt:
print('Cancel')
| 1,129 | 587 |
import os
from nose import with_setup
from nose.tools import assert_true
import numpy as np
import nibabel
from nilearn.datasets.tests import test_utils as tst
from nilearn._utils.testing import assert_raises_regex
from nilearn._utils.niimg_conversions import _check_same_fov
from sammba import testing_data
from sammba.registration.template_registrator import TemplateRegistrator
def crop_and_oblique(in_file, out_file):
img = nibabel.load(in_file)
oblique_affine = .2 * np.eye(4)
oblique_affine[0, 1] = .01
oblique_affine[1, 0] = .01
oblique_affine[3, 3] = 1
oblique_data = img.get_data()[1:]
oblique_img = nibabel.Nifti1Image(oblique_data, oblique_affine)
oblique_img.to_filename(out_file)
def empty_img_like(in_file, out_file):
img = nibabel.load(in_file)
new_img = nibabel.Nifti1Image(np.zeros(img.get_data().shape),
img.affine)
new_img.to_filename(out_file)
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_segment():
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
registrator = TemplateRegistrator(anat_file, 400, output_dir=tst.tmpdir,
use_rats_tool=False, verbose=False)
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
_, brain_file = registrator.segment(anat_file)
assert_true(os.path.isfile(brain_file))
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_fit_anat_and_transform_anat_like():
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
template_file = os.path.join(tst.tmpdir, 'template.nii.gz')
# Create template
crop_and_oblique(anat_file, template_file)
registrator = TemplateRegistrator(template_file, 400,
output_dir=tst.tmpdir,
use_rats_tool=False, verbose=False,
registration_kind='affine')
assert_raises_regex(
ValueError, 'has not been anat fitted',
registrator.transform_anat_like, anat_file)
# test fit_anat
registrator.fit_anat(anat_file)
assert_true(_check_same_fov(nibabel.load(registrator.registered_anat_),
nibabel.load(template_file)))
# test transform_anat_like
anat_like_file = os.path.join(tst.tmpdir, 'anat_like.nii.gz')
empty_img_like(anat_file, anat_like_file)
registrator.fit_anat(anat_file)
transformed_file = registrator.transform_anat_like(anat_like_file)
assert_true(_check_same_fov(nibabel.load(transformed_file),
nibabel.load(template_file)))
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_fit_transform_and_inverse_modality_with_func():
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
func_file = os.path.join(os.path.dirname(testing_data.__file__),
'func.nii.gz')
template_file = os.path.join(tst.tmpdir, 'template.nii.gz')
crop_and_oblique(anat_file, template_file)
registrator = TemplateRegistrator(template_file, 400,
output_dir=tst.tmpdir,
use_rats_tool=False, verbose=False,
registration_kind='affine')
registrator.fit_anat(anat_file)
assert_raises_regex(
ValueError, "Only 'func' and 'perf' ", registrator.fit_modality,
func_file, 'diffusion')
assert_raises_regex(
ValueError, "'t_r' is needed for slice ", registrator.fit_modality,
func_file, 'func')
assert_raises_regex(
ValueError, 'has not been func fitted',
registrator.transform_modality_like, func_file, 'func')
# test fit_modality for func
registrator.fit_modality(func_file, 'func', slice_timing=False)
registered_func_img = nibabel.load(registrator.registered_func_)
template_img = nibabel.load(template_file)
np.testing.assert_array_almost_equal(registered_func_img.affine,
template_img.affine)
np.testing.assert_array_equal(registered_func_img.shape[:-1],
template_img.shape)
# test transform_modality for func
func_like_file = os.path.join(tst.tmpdir, 'func_like.nii.gz')
empty_img_like(func_file, func_like_file)
transformed_file = registrator.transform_modality_like(func_like_file,
'func')
transformed_img = nibabel.load(transformed_file)
assert_true(_check_same_fov(transformed_img, nibabel.load(template_file)))
# test transform then inverse transform brings back to the original image
inverse_transformed_file = registrator.inverse_transform_towards_modality(
transformed_file, 'func')
inverse_transformed_img = nibabel.load(inverse_transformed_file)
func_like_img = nibabel.load(func_like_file)
assert_true(_check_same_fov(inverse_transformed_img, func_like_img))
np.testing.assert_array_equal(inverse_transformed_img.get_data(),
func_like_img.get_data())
# test inverse transform then transform brings back to the original image
transformed_file2 = registrator.transform_modality_like(
inverse_transformed_file, 'func')
transformed_img2 = nibabel.load(transformed_file2)
assert_true(_check_same_fov(transformed_img2,
transformed_img))
np.testing.assert_array_equal(transformed_img2.get_data(),
transformed_img.get_data())
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_fit_and_transform_modality_with_perf():
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
func_file = os.path.join(os.path.dirname(testing_data.__file__),
'func.nii.gz')
template_file = os.path.join(tst.tmpdir, 'template.nii.gz')
crop_and_oblique(anat_file, template_file)
registrator = TemplateRegistrator(template_file, 400,
output_dir=tst.tmpdir,
use_rats_tool=False, verbose=False,
registration_kind='affine')
registrator.fit_anat(anat_file)
assert_raises_regex(
ValueError, 'has not been perf fitted',
registrator.transform_modality_like, func_file, 'perf')
func_img = nibabel.load(func_file)
m0_img = nibabel.Nifti1Image(func_img.get_data()[..., 0], func_img.affine)
m0_file = os.path.join(tst.tmpdir, 'm0.nii.gz')
m0_img.to_filename(m0_file)
# test fit_modality for perf
registrator.fit_modality(m0_file, 'perf')
assert_true(_check_same_fov(nibabel.load(registrator.registered_perf_),
nibabel.load(template_file)))
# test transform_modality for perf
m0_like_file = os.path.join(tst.tmpdir, 'm0_like.nii.gz')
empty_img_like(m0_file, m0_like_file)
transformed_file = registrator.transform_modality_like(m0_like_file,
'perf')
assert_true(_check_same_fov(nibabel.load(transformed_file),
nibabel.load(template_file)))
| 7,526 | 2,491 |
from multiprocessing import Process
import botapi
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram.handlers import MessageHandler
import json
import database
from pyrogram.errors import (PeerIdInvalid, UserIsBlocked, MessageTooLong)
from pyrogram.types import (InlineQueryResultArticle, InputTextMessageContent,
InlineKeyboardMarkup, InlineKeyboardButton)
from config import apiID, apiHASH, botTOKEN
from pyrogram import filters
async def func(_, __, m):
if m.from_user.is_self:
return False
json_object = json.loads(f"{m}")
instance = json_object["_"]
if instance == "Message":
user = m.chat.id
chattype = m.chat.type
elif instance == "CallbackQuery":
user = m.message.chat.id
chattype = m.message.chat.type
elif instance == "InlineQuery":
user = m.from_user.id
chattype = "private"
else:
print(instance)
if not database.user_exist(user, chattype):
database.scrape(m)
mode = database.find_mode(user)
return mode == "mtproto"
mode_filter = filters.create(func)
ostrich = Client("ostrich", api_id=apiID, api_hash=apiHASH, bot_token=botTOKEN)
@ostrich.on_message(filters.command(["button"]) & mode_filter)
async def buttons(client, message):
await message.reply_text(
text=f'''
**Sample Inline buttons:
**''',
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup([
[
InlineKeyboardButton("Button1", callback_data="Button1"),
],
[
InlineKeyboardButton("Button2", callback_data="Button2"),
],
]),
reply_to_message_id=message.message_id)
@ostrich.on_message(filters.command(["help"]) & mode_filter)
async def help(client, message):
await message.reply_text(text=f'''
Here is a detailed guide to use me.
You can use me to get JSON responses of your messages.
**Supports:**
- `Messages`
- `Inline Query`
- `Callback Query`
Use /set to switch between `bot API` and `MTProto` mode and /button to generate sample inline keyboard buttons.''',
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(
"Get Help",
url="https://t.me/ostrichdiscussion/"),
]]),
reply_to_message_id=message.message_id)
@ostrich.on_message(filters.command(["start"]) & mode_filter)
async def start(client, message):
await message.reply_text(text=f'''
**Hi {message.from_user.mention}!
I return JSON responses of both bot api and MTProto for your messages.
Hit help to know more about how to use me.
**''',
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton("HELP",
callback_data="getHELP"),
]]),
reply_to_message_id=message.message_id)
database.scrape(message)
@ostrich.on_message(filters.command(["copy"]))
async def copy(client, message):
await client.copy_message(message.chat.id,
message.reply_to_message.chat.id,
message.reply_to_message.message_id)
@ostrich.on_message(filters.command(["set"]) & mode_filter)
async def set(client, message):
await message.reply_text(
text=f"**Select an option**",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton("bot API", callback_data="set_botapi"),
], [
InlineKeyboardButton("MTProto", callback_data="set_mtproto"),
]]),
reply_to_message_id=message.message_id)
@ostrich.on_message(mode_filter)
async def new_message(client, message):
json_object = json.loads(f"{message}")
formatted = json.dumps(json_object, indent=4)
try:
await message.reply_text(
f"```{formatted}```",
disable_web_page_preview=True,
disable_notification=True,
)
except MessageTooLong:
file = open("json.txt", "w+")
file.write(formatted)
file.close()
await client.send_document(message.chat.id,
document="json.txt",
caption="responseJSONbot",
disable_notification=True)
@ostrich.on_chosen_inline_result(mode_filter)
async def inline_result(client, inline_query):
mode = database.find_mode(inline_query.from_user.id)
if mode != "mtproto":
print(
f"ignoring non mtproto request by user {inline_query.from_user.id.first_name}"
)
return
json_object = json.loads(f"{inline_query}")
formatted = json.dumps(json_object, indent=4)
try:
await client.send_message(
chat_id=inline_query.from_user.id,
text=f"```{formatted}```",
# parse_mode=,
disable_web_page_preview=True,
disable_notification=True,
# reply_to_message_id=,
)
except MessageTooLong:
file = open("json.txt", "w+")
file.write(formatted)
file.close()
await client.send_document(document="json.txt",
caption="responseJSONbot",
disable_notification=True,
quote=True)
@ostrich.on_inline_query(mode_filter)
async def inline_query(client, inline_query):
await inline_query.answer(results=[
InlineQueryResultArticle(title="MTProto API response",
input_message_content=InputTextMessageContent(
f"{inline_query}"),
description="@responseJSONbot",
thumb_url="https://i.imgur.com/JyxrStE.png"),
InlineQueryResultArticle(title="About",
input_message_content=InputTextMessageContent(
"**Response JSON BOT - @ theostrich**"),
url="https://t.me/theostrich",
description="About bot",
thumb_url="https://imgur.com/DBwZ2y9.png",
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(
"Updates",
url="https://t.me/ostrichdiscussion")
]])),
])
@ostrich.on_callback_query(mode_filter)
async def cb_handler(client, query):
if query.data.startswith('set'):
await query.answer()
user = query.message.reply_to_message.chat.id
mode = query.data.split("_")[1]
database.set_mode(user, mode)
await query.message.reply_text(
text=f"**Mode set to {mode} successfully**")
elif query.data == "getHELP":
await query.answer()
await query.message.edit_text(
text=f'''
Here is a detailed guide to use me.
You can use me to get JSON responses of your messages.
**Supports:**
- ```Messages```
- ```Inline Query```
- ```Callback Query```
Use /set to switch between ``|bot API``` and ```MTProto``` mode and /button to generate sample inline keyboard buttons.
''',
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton("Get Help",
url="https://t.me/ostrichdiscussion"),
]]),
disable_web_page_preview=True)
elif query.data == "closeInline":
await query.answer("done")
await query.message.delete()
else:
await query.answer()
if query.message:
user = query.message.chat.id
else:
user = query.from_user.id
json_object = json.loads(f"{query}")
formatted = json.dumps(json_object, indent=4)
try:
await client.send_message(user, text=f"```{formatted}```")
except MessageTooLong:
file = open("json.txt", "w+")
file.write(formatted)
file.close()
await client.send_document(
user,
document="json.txt",
caption="responseJSONbot",
disable_notification=True,
)
if __name__ == '__main__':
pyro = Process(target=ostrich.run)
pyro.start()
ptb = Process(target=botapi.main)
ptb.start()
pyro.join()
ptb.join()
| 8,939 | 2,513 |
from typing import Dict
from dbnd import Config, parameter
class Algorithm(object):
PCA = "pca"
KMEANS = "kmeans"
LINEAR_LEARNER = "linear-learner"
FACTORIZATION_MACHINES = "factorization-machines"
NTM = "ntm"
RANDOMCUTFOREST = "randomcutforest"
KNN = "knn"
OBJECT2VEC = "object2vec"
IPUNSIGHTS = "ipinsights"
LDA = "lda"
FORCATSING_DEEPAR = "forecasting-deepar"
XGBOOST = "xgboost"
SEQ2SEQ = "seq2seq"
IMAGE_CLASIFICATION = "image-classification"
BLAZONGTEXT = "blazingtext"
OBJECT_DETECTION = "object-detection"
SEMANTIC_SEGMENTATION = "semantic-segmentation"
IMAGE_CLASIFICATION_NEO = "image-classification-neo"
XGBOOST_NEO = "xgboost-neo"
class BaseEstimatorConfig(Config):
"""AWS SageMaker (-s [TASK].estimator.[PARAM]=[VAL] for specific tasks)"""
# we don't want spark class to inherit from this one, as it should has Config behaviour
_conf__task_family = "estimator"
train_instance_count = parameter(default=1)[int]
train_instance_type = parameter(
default="ml.c5.4xlarge", description="EC2 instance type to use for a training"
)[str]
train_volume_size = parameter(default=30)[int]
train_max_run = parameter(default=3600)[int]
base_job_name = parameter.c(default="trng-recommender")
hyperparameters = parameter(
empty_default=True, description="Hyperparameter tuning configuration"
)[Dict]
def get_input_dict(self, train, test, validate):
raise NotImplemented("Subclass should implement")
def get_estimator(self, task):
raise NotImplemented("Subclass should implement")
def get_estimator_ctrl(self):
raise NotImplemented("Subclass should implement")
class GenericEstimatorConfig(BaseEstimatorConfig):
_conf__task_family = "generic_estimator"
algorithm = parameter(
default=Algorithm.FACTORIZATION_MACHINES, description="algorithm name"
)
def get_input_dict(self, train, test, validate):
inputs = dict()
inputs["train"] = str(train)
if test:
inputs["test"] = str(test)
if validate:
inputs["validate"] = str(validate)
return inputs
def _to_estimator_conf(self, task):
from sagemaker.amazon.amazon_estimator import get_image_uri
return {
"image_name": get_image_uri(task.region, task.estimator_config.algorithm),
"role": task.sagemaker_role,
"train_instance_count": task.estimator_config.train_instance_count,
"train_instance_type": task.estimator_config.train_instance_type,
"train_volume_size": task.estimator_config.train_volume_size,
"output_path": str(task.output_path),
"base_job_name": task.estimator_config.base_job_name,
"hyperparameters": task.estimator_config.hyperparameters,
}
def get_estimator(self, task):
from sagemaker.estimator import Estimator
conf = self._to_estimator_conf(task)
return Estimator(**conf)
class PyTorchEstimatorConfig(BaseEstimatorConfig):
entry_point = parameter.description("Path to a source file")[str]
source_dir = parameter.description("Path to a additional source files").none()[str]
py_version = parameter(default="py3", description="Python version")[str]
framework_version = parameter.description("PyTorch version").none[str]
image_name = parameter.description("Custom Image").none[str]
def get_input_dict(self, train, test, validate):
inputs = dict()
inputs["training"] = str(train)
if test:
inputs["test"] = str(test)
if validate:
inputs["validate"] = str(validate)
return inputs
def _to_estimator_conf(self, task):
return {
"entry_point": task.estimator_config.entry_point,
"framework_version": task.estimator_config.framework_version,
"image_name": task.estimator_config.image_name,
"role": task.sagemaker_role,
"train_instance_count": task.estimator_config.train_instance_count,
"train_instance_type": task.estimator_config.train_instance_type,
"train_volume_size": task.estimator_config.train_volume_size,
"output_path": str(task.output_path),
"hyperparameters": task.estimator_config.hyperparameters,
}
def get_estimator(self, task):
from sagemaker.pytorch import PyTorch
conf = self._to_estimator_conf(task)
return PyTorch(**conf)
| 4,566 | 1,450 |
# Heap Tree Implementation
from __future__ import annotations
import warnings
from typing import Optional, Any
from datastax.errors import DeletionFromEmptyTreeWarning
from datastax.trees.private_trees.binary_tree import BinaryTree, TreeNode
class HeapNode(TreeNode):
def __init__(self, data: Any,
left: HeapNode = None,
right: HeapNode = None):
super().__init__(data, left, right)
self.parent: Optional[HeapNode] = None
self.prev_leaf: Optional[HeapNode] = None
class HeapTree(BinaryTree):
def __init__(self, array: list[Any] = None, root: HeapNode = None):
self._root: Optional[HeapNode] = root
self._leaf: Optional[HeapNode] = root
super().__init__(array, root)
def _construct(self, array: list[Any] = None) -> Optional[HeapTree]:
if not array or array[0] is None:
return None
for item in array:
try:
self.heappush(item)
except TypeError as error:
raise error
return self
@property
def leaf(self):
return self._leaf
# Function to push an element inside a tree
def heappush(self, data: Any) -> None:
root = self.root
if data is None:
return
node = HeapNode(data)
if root is None: # Heap Tree is Empty
self._root = self._leaf = node
# Heap tree has nodes. So inserting new node
# in the left of leftmost leaf node
elif self.leaf and self.leaf.left is None:
self.leaf.left = node
node.parent = self.leaf
else:
if not self.leaf:
return
self.leaf.right = node
previous_leaf = self.leaf
node.parent = self.leaf
self._update_leaf(self.leaf)
self.leaf.prev_leaf = previous_leaf
self._heapify(node)
# Private function to convert a subtree to heap
def _heapify(self, node: HeapNode) -> None:
if node.parent and node.parent.data < node.data:
node.parent.data, node.data = node.data, node.parent.data
self._heapify(node.parent)
# Private Helper method of heappush function to
# update rightmost node in deepest level
def _update_leaf(self, node: HeapNode) -> None:
# reach extreme left of next level if current level is full
if node.parent is None:
self._leaf = node
elif node.parent.left is node:
self._leaf = node.parent.right
elif node.parent.right is node:
self._update_leaf(node.parent)
while self.leaf and self.leaf.left:
self._leaf = self.leaf.left
# Function to pop the largest element in the tree
def heappop(self) -> Optional[Any]:
if not self.root:
warnings.warn(
"Deletion Unsuccessful. Can't delete when"
"tree is Already Empty", DeletionFromEmptyTreeWarning
)
return None
deleted_data = self.root.data
if self.root is self.leaf and not any(
[self.leaf.left, self.leaf.right]):
self._root = self._leaf = None
else:
if self.leaf.right and self.root:
self.root.data = self.leaf.right.data
self.leaf.right = None
self._shift_up(self.root)
elif self.leaf.left and self.root:
self.root.data = self.leaf.left.data
self.leaf.left = None
self._shift_up(self.root)
else: # We have reached the end of a level
self._leaf = self.leaf.prev_leaf
return self.heappop()
return deleted_data
# Private helper method of heappop function
def _shift_up(self, node: HeapNode) -> None:
root = node
left_child = root.left
right_child = root.right
if left_child and left_child.data > root.data:
root = left_child
if right_child and right_child.data > root.data:
root = right_child
if root is node:
return
root.data, node.data = node.data, root.data
self._shift_up(root)
def insert(self, item: Any):
self.heappush(item)
| 4,301 | 1,200 |
import json
import logging
import urllib.parse
import phonenumbers
import math
from pympesa import Pympesa
from django import forms
from django.contrib import messages
from django.core import signing
from django.template.loader import get_template
from django.utils.translation import ugettext as __, ugettext_lazy as _
from django.utils.functional import cached_property
from collections import OrderedDict
from django.http import HttpRequest
from pretix.base.decimal import round_decimal
from pretix.base.models import Order, Quota, RequiredAction,OrderPayment, OrderRefund
from pretix.base.payment import BasePaymentProvider, PaymentException
from pretix.base.services.mail import SendMailException
from pretix.base.services.orders import mark_order_paid, mark_order_refunded
from pretix.helpers.urls import build_absolute_uri as build_global_uri
from pretix.multidomain.urlreverse import build_absolute_uri
from pretix.plugins.paypal.models import ReferencedPayPalObject
from pretix.presale.views.cart import (
cart_session, create_empty_cart_id, get_or_create_cart_id,
)
from .tasks import send_stk
logger = logging.getLogger('pretix.plugins.mpesa')
class Mpesa(BasePaymentProvider):
identifier = 'mpesa'
verbose_name = _('Mpesa')
payment_form_fields = OrderedDict([
])
@property
def abort_pending_allowed(self):
return False
@cached_property
def cart_session(self):
return cart_session(self.request)
@property
def settings_form_fields(self):
d = OrderedDict(
[
('endpoint',
forms.ChoiceField(
label=_('Endpoint'),
initial='sandbox',
choices=(
('production', 'Live'),
('sandbox', 'Sandbox'),
),
)),
('safaricom_consumer_key',
forms.CharField(
label=_('Safaricom Consumer Key'),
required=True,
help_text=_('<a target="_blank" rel="noopener" href="{docs_url}">{text}</a>').format(
text=_('Go to the safaricom developer portal to obtain developer keys a get guidance on going live'),
docs_url='https://developer.safaricom.co.ke'
)
)),
('safaricom_consumer_secret',
forms.CharField(
label=_('Safaricom Consumer Secret'),
required=True,
)),
('mpesa_shortcode',
forms.CharField(
label=_('Lipa na Mpesa Online shortcode'),
required=True,
help_text=_('Apply for this from safaricom')
)),
('encryption_password',
forms.CharField(
label=_('Encription Password'),
required=True,
help_text=_('The password for encrypting the request')
)),
('stk_callback_url',
forms.CharField(
label=_('Mpesa STK Callback'),
required=True,
help_text=_('This is the callback url for mpesa stk')
)),
('mpesa_phone_number_field_required',
forms.BooleanField(
label=_('Will the mpesa phone number be required to place an order'),
help_text=_("If this is not checked, entering a mpesa phone number is optional and the mpesa payment my not work."),
required=False,
)),
] + list(super().settings_form_fields.items())
)
return d
def checkout_confirm_render(self, request) -> str:
"""
Returns the HTML that should be displayed when the user selected this provider
on the 'confirm order' page.
"""
template = get_template('pretix_mpesa/checkout_payment_confirm.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings}
return template.render(ctx)
def order_pending_render(self, request, order) -> str:
template = get_template('pretix_mpesa/pending.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings, 'order': order}
return template.render(ctx)
def payment_form_render(self, request) -> str:
template = get_template('pretix_mpesa/checkout_payment_form.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings}
return template.render(ctx)
def checkout_prepare(self, request, cart):
self.request = request
mpesa_phone_number = self.cart_session.get('contact_form_data', {}).get('mpesa_phone_number', '')
try:
parsed_num = phonenumbers.parse(mpesa_phone_number, 'KE')
except phonenumbers.NumberParseException:
messages.error(request, _('Please check to confirm that you entered the mpesa phone number and that it was a valid phone number'))
return False
else:
if phonenumbers.is_valid_number(parsed_num):
request.session['mpesa_phone_number'] = '254' + str(parsed_num.national_number)
return True
else:
messages.error(request, _('The Mpesa number is not a valid phone number'))
return False
def payment_is_valid_session(self, request):
return True
def order_can_retry(self, order):
return self._is_still_available(order=order)
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
"""
Will be called if the user submitted his order successfully to initiate the
payment process.
It should return a custom redirct URL, if you need special behavior, or None to
continue with default behavior.
On errors, it should use Django's message framework to display an error message
to the user (or the normal form validation error messages).
:param order: The order object
"""
kwargs = {}
if request.resolver_match and 'cart_namespace' in request.resolver_match.kwargs:
kwargs['cart_namespace'] = request.resolver_match.kwargs['cart_namespace']
parsed_num = request.session.get('mpesa_phone_number', '')
logger.debug(parsed_num)
mode = self.settings.get('endpoint')
consumer_key = self.settings.get('safaricom_consumer_key')
consumer_secret = self.settings.get('safaricom_consumer_secret')
business_short_code = self.settings.get('mpesa_shortcode')
password = self.settings.get('encryption_password')
amount = math.ceil(payment.amount)
callback_url = self.settings.get('stk_callback_url')
logger.debug(amount)
logger.debug(callback_url)
send_stk.apply_async(kwargs={'consumer_key': consumer_key, 'consumer_secret': consumer_secret,
'business_short_code': business_short_code,
'password': password, 'amount': str(amount), 'phone': parsed_num, 'order_number': str(payment.id),
'callback_url': callback_url, 'mode': mode})
return None
| 7,443 | 1,966 |