hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e2a3a5e193462cd9cfc3f8e77576ef311f0d6d70 | 1,050 | py | Python | async_sched/client/request_schedules.py | justengel/async_sched | f980722d51d15025522b2265426b0188ff368418 | [
"MIT"
] | 1 | 2020-10-19T13:36:20.000Z | 2020-10-19T13:36:20.000Z | async_sched/client/request_schedules.py | justengel/async_sched | f980722d51d15025522b2265426b0188ff368418 | [
"MIT"
] | null | null | null | async_sched/client/request_schedules.py | justengel/async_sched | f980722d51d15025522b2265426b0188ff368418 | [
"MIT"
] | null | null | null | """
module to run with the -m flag
python -m async_sched.client.request_schedules
"""
import argparse
from async_sched.client.client import request_schedules
from async_sched.utils import DEFAULT_HOST, DEFAULT_PORT
__all__ = ['NAME', 'get_argparse', 'main']
NAME = 'request_schedules'
def get_argparse(host: str = DEFAULT_HOST, port: int = DEFAULT_PORT, parent_parser=None):
if parent_parser is None:
p = argparse.ArgumentParser(description='Request and list the running schedules')
else:
p = parent_parser.add_parser(NAME, help='Request and list the running schedules')
p.add_argument('--host', type=str, default=host)
p.add_argument('--port', type=int, default=port)
return p
def main(host: str = DEFAULT_HOST, port: int = DEFAULT_PORT, **kwargs):
request_schedules((host, port))
if __name__ == '__main__':
P = get_argparse()
ARGS = P.parse_args()
KWARGS = {n: getattr(ARGS, n) for n in dir(ARGS) if not n.startswith('_') and getattr(ARGS, n, None) is not None}
main(**KWARGS)
| 26.25 | 117 | 0.70381 | 151 | 1,050 | 4.655629 | 0.357616 | 0.091038 | 0.059744 | 0.051209 | 0.196302 | 0.196302 | 0.102418 | 0.102418 | 0 | 0 | 0 | 0 | 0.175238 | 1,050 | 39 | 118 | 26.923077 | 0.811778 | 0.074286 | 0 | 0 | 0 | 0 | 0.139148 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2a4885ec926f3d640fc765abc70c06983834ccb | 2,156 | py | Python | sns/api/reddit/reddit.py | kylepw/panner | 482ef8e8c1e8d9464d7dc8e4df5b5d9b58e83d35 | [
"MIT"
] | 2 | 2019-07-20T01:48:20.000Z | 2019-11-15T06:50:54.000Z | sns/api/reddit/reddit.py | kylepw/panner | 482ef8e8c1e8d9464d7dc8e4df5b5d9b58e83d35 | [
"MIT"
] | 5 | 2020-02-12T08:58:06.000Z | 2021-09-22T17:56:42.000Z | sns/api/reddit/reddit.py | kylepw/panner | 482ef8e8c1e8d9464d7dc8e4df5b5d9b58e83d35 | [
"MIT"
] | null | null | null | from datetime import datetime
import logging
import os
from praw import Reddit as PrawReddit
from prawcore.exceptions import NotFound
import pytz
logger = logging.getLogger(__name__)
class Reddit:
def __init__(self, client_id=None, client_secret=None, user_agent=None):
self.client_id = client_id or os.getenv('REDDIT_CLIENT_ID')
self.client_secret = client_secret or os.getenv('REDDIT_CLIENT_SECRET')
self.user_agent = user_agent or os.getenv('REDDIT_USER_AGENT')
self.api = PrawReddit(
client_id=self.client_id,
client_secret=self.client_secret,
user_agent=self.user_agent,
read_only=True,
)
def get_comments_submissions(self, username, num=5):
"""Return max `num` of comments and submissions by `username`."""
coms = [
dict(
title=comment.link_title,
text=comment.body_html,
subreddit=comment.subreddit_name_prefixed,
url=comment.link_url,
created=datetime.fromtimestamp(comment.created_utc, pytz.utc),
)
for comment in self.api.redditor(username).comments.new(limit=num)
]
subs = [
dict(
title=submission.title,
text=submission.selftext_html,
subreddit=submission.subreddit_name_prefixed,
url=submission.url,
created=datetime.fromtimestamp(submission.created_utc, pytz.utc),
)
for submission in self.api.redditor(username).submissions.new(limit=num)
]
return coms + subs if len(coms + subs) < num else (coms + subs)[:num]
def profile_image_url(self, username):
"""Return URL of user's avatar image."""
try:
return self.api.redditor(username).icon_img
except NotFound:
logger.exception('Failed to fetch Reddit profile image of %s', username)
return None
@staticmethod
def profile_url(username):
"""Return URL of user's profile."""
return 'https://www.reddit.com/user/%s' % username
| 35.933333 | 84 | 0.621521 | 251 | 2,156 | 5.155378 | 0.334661 | 0.037094 | 0.027821 | 0.037094 | 0.140649 | 0.037094 | 0 | 0 | 0 | 0 | 0 | 0.000651 | 0.28757 | 2,156 | 59 | 85 | 36.542373 | 0.841797 | 0.057514 | 0 | 0.040816 | 0 | 0 | 0.062004 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0 | 0.122449 | 0 | 0.306122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2a8d2bec3d6270cbb856eb6bae1fa43dddb9949 | 1,736 | py | Python | tests/test_lsp.py | WeixinYang/DatasetLoader | d6e800d6a9cca8809d7dab88a6a13a7916ef272d | [
"Apache-2.0"
] | 1 | 2021-08-16T14:14:40.000Z | 2021-08-16T14:14:40.000Z | tests/test_lsp.py | WeixinYang/DatasetLoader | d6e800d6a9cca8809d7dab88a6a13a7916ef272d | [
"Apache-2.0"
] | 1 | 2020-10-12T20:56:21.000Z | 2020-10-12T20:56:21.000Z | tests/test_lsp.py | WeixinYang/DatasetLoader | d6e800d6a9cca8809d7dab88a6a13a7916ef272d | [
"Apache-2.0"
] | 1 | 2021-06-14T10:30:08.000Z | 2021-06-14T10:30:08.000Z | import os.path
import pytest
from datasetloader import LSP
from datasetloader import LSPExtended
from . import DS_PATH
class TestLSP():
def test_LSP(self):
# test loading both, full sized and small images
folders = ("lsp", "lsp_small")
for ds in folders:
lsp = LSP(os.path.join(DS_PATH, ds))
# check dataset sizes and get_data accessors on different elements
assert lsp.get_data("image-filenames").shape == (2000, )
assert lsp.get_data("keypoints", "train").shape == (1000, 14, 3)
d = lsp.get_data(("image-filenames", "keypoints"), "test")
assert d[0].shape == (1000, )
assert d[1].shape == (1000, 14, 3)
# test iterator access
it = lsp.get_iterator(("image-filenames", "keypoints"), "train")
filename, keypoints = next(it)
# check we got the correct first element
assert isinstance(filename, str)
assert keypoints.shape == (14, 3)
def test_LSPExtended(self):
lsp = LSPExtended(os.path.join(DS_PATH, "lsp_extended"))
# check dataset sizes and get_data accessors on different elements
assert lsp.get_data("image-filenames").shape == (10000, )
assert lsp.get_data("keypoints").shape == (10000, 14, 3)
with pytest.raises(Exception):
lsp.get_data("image-filenames", "train")
lsp = LSPExtended(os.path.join(DS_PATH, "lsp_extended_improved"),
improved=True)
# check dataset sizes and get_data accessors on different elements
assert lsp.get_data("image-filenames").shape == (9428, )
assert lsp.get_data("keypoints").shape == (9428, 14, 3)
| 41.333333 | 78 | 0.611751 | 215 | 1,736 | 4.837209 | 0.316279 | 0.074038 | 0.076923 | 0.092308 | 0.481731 | 0.396154 | 0.338462 | 0.338462 | 0.338462 | 0.259615 | 0 | 0.040253 | 0.270161 | 1,736 | 41 | 79 | 42.341463 | 0.780584 | 0.173387 | 0 | 0 | 0 | 0 | 0.139356 | 0.014706 | 0 | 0 | 0 | 0 | 0.344828 | 1 | 0.068966 | false | 0 | 0.172414 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2aefdb5d4c4918146034a0834daf3d3d9bd181b | 1,173 | py | Python | website/python/app.py | man-r/DimensionsLab | c94c3aec0d52326ad522a6fa41d43ec3bde87d74 | [
"MIT"
] | null | null | null | website/python/app.py | man-r/DimensionsLab | c94c3aec0d52326ad522a6fa41d43ec3bde87d74 | [
"MIT"
] | 1 | 2022-03-24T06:13:52.000Z | 2022-03-24T06:13:52.000Z | website/python/app.py | man-r/DimensionsLab | c94c3aec0d52326ad522a6fa41d43ec3bde87d74 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_restful import Api, Resource, reqparse
app = Flask(__name__)
api = Api(app)
users = [
{
"name": "Nicholas",
"age": 42,
"occupation": "Network Engineer"
},
{
"name": "Elvin",
"age": 32,
"occupation": "Doctor"
},
{
"name": "Jass",
"age": 22,
"occupation": "Web Developer"
}
]
class User(Resource):
def get(self, name):
for user in users:
if (name == user["name"]):
return user, 200
return "User not found", 404
def post(self, name):
parser = reqparse.RequestParser()
parser.add_argument("age")
parser.add_argument("occupation")
args = parser.parse_args()
for user in users:
if (name == user["name"]):
return "User with the name {} already exist".format(name), 400
user = {
"name": name,
"age": args["age"],
"occupation": args["occupation"]
}
users.append(user)
return user, 201
def delete(self, name):
global users
users = [user for user in users if user["name"] != name]
return "{} is deleted.".format(name), 200
api.add_resource(User, "/user/<string:name>")
app.run(debug=True) | 19.881356 | 66 | 0.58994 | 146 | 1,173 | 4.678082 | 0.40411 | 0.046852 | 0.039531 | 0.061493 | 0.1347 | 0.111274 | 0.111274 | 0.111274 | 0.111274 | 0.111274 | 0 | 0.023864 | 0.249787 | 1,173 | 59 | 67 | 19.881356 | 0.752273 | 0 | 0 | 0.083333 | 0 | 0 | 0.204429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.041667 | 0 | 0.229167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2af09249eabdf6c6cfac1108013ff5630a16a85 | 371 | py | Python | src/applications/reviews/urls.py | Alex-T13/sxw_data_conversion | ae70198960af1af004ad28b73d6e885c5afa74c2 | [
"MIT"
] | null | null | null | src/applications/reviews/urls.py | Alex-T13/sxw_data_conversion | ae70198960af1af004ad28b73d6e885c5afa74c2 | [
"MIT"
] | null | null | null | src/applications/reviews/urls.py | Alex-T13/sxw_data_conversion | ae70198960af1af004ad28b73d6e885c5afa74c2 | [
"MIT"
] | null | null | null | from django.urls import path
from applications.reviews import views
urlpatterns = [
path('', views.AllPostView.as_view(), name='reviews'),
path('add_post', views.AddPostView.as_view(), name='add_post'),
path('post/<int:pk>', views.ShowPostView.as_view(), name='post'),
path('update_post/<int:pk>', views.UpdatePostView.as_view(), name='update_post'),
]
| 33.727273 | 85 | 0.700809 | 50 | 371 | 5.04 | 0.42 | 0.095238 | 0.15873 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.113208 | 371 | 10 | 86 | 37.1 | 0.765957 | 0 | 0 | 0 | 0 | 0 | 0.191375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2af6e967078f9ead2d03be919925056b558ae89 | 13,207 | py | Python | tests/tests_with_server_and_cached_results/test_planner/test_canvas.py | aquariumbio/trident | d1712cae544103fb145e3171894e4b35141f6813 | [
"MIT"
] | 5 | 2019-01-21T11:12:05.000Z | 2020-03-05T20:52:14.000Z | tests/tests_with_server_and_cached_results/test_planner/test_canvas.py | aquariumbio/pydent | d1712cae544103fb145e3171894e4b35141f6813 | [
"MIT"
] | 28 | 2020-11-18T02:07:09.000Z | 2021-06-08T15:49:41.000Z | tests/tests_with_server_and_cached_results/test_planner/test_canvas.py | aquariumbio/trident | d1712cae544103fb145e3171894e4b35141f6813 | [
"MIT"
] | 2 | 2021-02-27T19:23:45.000Z | 2021-09-14T10:29:07.000Z | import pytest
from pydent.planner import Planner
from pydent.planner import PlannerException
from pydent.planner.utils import get_subgraphs
def test_canvas_create(session):
canvas = Planner(session)
canvas.create()
assert canvas.plan.id
def test_raises_exception_wiring_with_no_afts(session):
canvas = Planner(session)
op1 = canvas.create_operation_by_name("Make PCR Fragment", category="Cloning")
op2 = canvas.create_operation_by_name("Check Plate", category="Cloning")
with pytest.raises(PlannerException):
canvas._set_wire(op1.outputs[0], op2.inputs[0])
def test_add_wire(session):
canvas = Planner(session)
assert len(canvas.plan.wires) == 0
op1 = canvas.create_operation_by_name("Make PCR Fragment", category="Cloning")
op2 = canvas.create_operation_by_name("Rehydrate Primer", category="Cloning")
canvas.add_wire(op2.outputs[0], op1.input("Forward Primer"))
assert len(canvas.plan.wires) == 1
wire = canvas.plan.wires[0]
assert (
wire.source.allowable_field_type.sample_type_id
== wire.destination.allowable_field_type.sample_type_id
)
assert (
wire.source.allowable_field_type.object_type_id
== wire.destination.allowable_field_type.object_type_id
)
def test_add_wire_sets_sample_from_destination(session):
"""When adding a wire, the sample should be set from the destination to the
source."""
session.set_verbose(True)
canvas = Planner(session)
assert len(canvas.plan.wires) == 0
p = session.Sample.one(
query=dict(sample_type_id=session.SampleType.find_by_name("Primer").id)
)
destination = canvas.create_operation_by_name(
"Make PCR Fragment", category="Cloning"
)
source = canvas.create_operation_by_name("Rehydrate Primer", category="Cloning")
canvas.set_field_value(destination.input("Forward Primer"), sample=p)
assert destination.input("Forward Primer").sample is p
canvas.add_wire(source.outputs[0], destination.input("Forward Primer"))
assert source.outputs[0].sample.id == p.id
def test_add_wire_sets_sample_from_source(session):
session.set_verbose(True)
canvas = Planner(session)
assert len(canvas.plan.wires) == 0
p = session.Sample.one(
query=dict(sample_type_id=session.SampleType.find_by_name("Primer").id)
)
destination = canvas.create_operation_by_name(
"Make PCR Fragment", category="Cloning"
)
source = canvas.create_operation_by_name("Rehydrate Primer", category="Cloning")
canvas.set_field_value(source.outputs[0], sample=p)
canvas.add_wire(source.outputs[0], destination.input("Forward Primer"))
assert destination.input("Forward Primer").sample.id == p.id
def test_collect_matching_afts(session):
canvas = Planner(session)
op1 = canvas.create_operation_by_name("Check Plate", category="Cloning")
op2 = canvas.create_operation_by_name("E Coli Lysate", category="Cloning")
afts = canvas._collect_matching_afts(op1, op2)
print(afts)
def test_raise_exception_if_wiring_two_inputs(session):
canvas = Planner(session)
assert len(canvas.plan.wires) == 0
op1 = canvas.create_operation_by_name("Check Plate", category="Cloning")
op2 = canvas.create_operation_by_name("Check Plate", category="Cloning")
with pytest.raises(PlannerException):
canvas.add_wire(op1.inputs[0], op2.inputs[0])
def test_raise_exception_if_wiring_two_outputs(session):
canvas = Planner(session)
assert len(canvas.plan.wires) == 0
op1 = canvas.create_operation_by_name("Check Plate", category="Cloning")
op2 = canvas.create_operation_by_name("Check Plate", category="Cloning")
with pytest.raises(PlannerException):
canvas.add_wire(op1.outputs[0], op2.outputs[0])
def test_canvas_add_op(session):
canvas = Planner(session)
canvas.create_operation_by_name("Yeast Transformation")
canvas.create_operation_by_name("Yeast Antibiotic Plating")
canvas.quick_wire_by_name("Yeast Transformation", "Yeast Antibiotic Plating")
canvas.create()
p = session.Plan.find(canvas.plan.id)
pass
def test_canvas_quick_create_chain(session):
canvas = Planner(session)
canvas.chain(
"Yeast Transformation", "Check Yeast Plate", "Yeast Overnight Suspension"
)
assert len(canvas.plan.operations) == 3
assert len(canvas.plan.wires) == 2, "There should be two operations"
def test_chain_run_gel(session):
canvas = Planner(session)
canvas.chain("Make PCR Fragment", "Run Gel", category="Cloning")
def test_quick_chain_to_existing_operation(session):
canvas = Planner(session)
op = canvas.create_operation_by_name("Yeast Transformation")
canvas.chain(op, "Check Yeast Plate")
assert len(canvas.plan.wires) == 1
def test_quick_chain_to_existing_operation_too_many_times(session):
canvas = Planner(session)
op = canvas.create_operation_by_name("Yeast Transformation")
op1 = canvas.chain(op, "Check Yeast Plate")[-1]
with pytest.raises(PlannerException):
canvas.chain("Yeast Transformation", op1)
assert len(canvas.plan.wires) == 1
def test_canvas_chaining(session):
canvas = Planner(session)
canvas.browser.log.set_verbose(True)
ops = canvas.chain(
"Assemble Plasmid",
"Transform Cells",
"Plate Transformed Cells",
"Check Plate",
category="Cloning",
)
assert len(canvas.plan.wires) == 3
new_ops = []
for i in range(3):
new_ops += canvas.chain(
ops[-1], ("E Coli Lysate", "Cloning"), "E Coli Colony PCR"
)[1:]
assert len(canvas.plan.wires) == 2 * 3 + 3
def test_layout_edges_and_nodes(session):
canvas = Planner(session)
canvas.chain(
"Yeast Transformation", "Check Yeast Plate", "Yeast Overnight Suspension"
)
G = canvas.layout.nxgraph
edges = list(G.edges)
assert len(edges) == 2, "There should only be 2 edges/wires in the graph/plan"
assert (
len(G.nodes) == 3
), "There should only be 3 nodes/Operations in the graph/plan"
assert edges[0][1] == edges[1][0], "Check Yeast Plate should be in both wires"
def test_load_canvas(session):
canvas = Planner(session.Plan.one())
assert canvas is not None
assert canvas.plan is not None
assert canvas.plan.operations is not None
def test_proper_setting_of_object_types(session):
canvas = Planner(session)
yeast = session.Sample.where(
{"sample_type_id": session.SampleType.find_by_name("Yeast Strain").id},
opts={"limit": 10},
)[-1]
streak = canvas.create_operation_by_name("Streak Plate", category="Yeast")
glycerol = canvas.create_operation_by_name("Yeast Glycerol Stock", category="Yeast")
canvas.set_field_value(glycerol.inputs[0], sample=yeast)
canvas.set_field_value(streak.inputs[0], sample=yeast)
mating = canvas.create_operation_by_name("Yeast Mating")
canvas.add_wire(streak.outputs[0], mating.inputs[0])
canvas.add_wire(glycerol.outputs[0], mating.inputs[1])
assert (
mating.inputs[0].allowable_field_type.object_type.name == "Divided Yeast Plate"
)
assert (
mating.inputs[1].allowable_field_type.object_type.name == "Yeast Glycerol Stock"
)
def test_annotate(session):
canvas = Planner(session)
a = canvas.annotate("This is my annotation", 10, 20, 110, 100)
assert a["x"] == 10
assert a["y"] == 20
anchor = a["anchor"]
assert anchor["x"] == 110
assert anchor["y"] == 100
def test_annotate_layout(session):
canvas = Planner(session)
ops = canvas.chain("Make PCR Fragment", "Run Gel", category="Cloning")
canvas.layout.topo_sort()
canvas.layout.move(100, 200)
a = canvas.annotate_above_layout("This is an annotation", 100, 50)
anchor = a["anchor"]
xmidpoint = a["x"] + anchor["x"] / 2
ybottom = a["y"] + anchor["y"]
assert xmidpoint == 100 + canvas.layout.BOX_WIDTH / 2
assert ybottom == 200 - canvas.layout.BOX_DELTA_Y / 2
canvas.plan.name = "annotation test"
canvas.create()
print(canvas.url)
def test_routing_graph(session):
canvas = Planner(session)
ops = canvas.chain(
"Rehydrate Primer",
"Make PCR Fragment",
"Run Gel",
"Extract Gel Slice",
"Purify Gel Slice",
"Assemble Plasmid",
category="Cloning",
)
routing_graph = canvas._routing_graph()
print(get_subgraphs(routing_graph))
def test_quick_wire_to_input_array(session):
canvas = Planner(session)
ops = canvas.chain("Purify Gel Slice", "Assemble Plasmid", category="Cloning")
canvas.chain("Purify Gel Slice", ops[-1], category="Cloning")
assert len(canvas.plan.operations) == 3
assert len(canvas.plan.wires) == 2
def test_quick_wire_to_input_array_and_then_set_sample(session):
canvas = Planner(session)
frags = session.Sample.where(
{"sample_type_id": session.SampleType.find_by_name("Fragment").id},
opts={"limit": 10},
)
purify1 = canvas.create_operation_by_name("Purify Gel Slice", category="Cloning")
purify2 = canvas.create_operation_by_name("Purify Gel Slice", category="Cloning")
assemble = canvas.create_operation_by_name("Assemble Plasmid", category="Cloning")
canvas.quick_wire(purify1, assemble)
canvas.quick_wire(purify2, assemble)
canvas.set_field_value_and_propogate(purify1.inputs[0], sample=frags[0])
input_array = assemble.input_array("Fragment")
print("purify1: " + str(purify1.rid))
print("purify2: " + str(purify2.rid))
for i in input_array:
print(
i.operation.operation_type.name
+ " "
+ i.name
+ " "
+ str(i.sample)
+ " "
+ str(canvas.get_incoming_wires(i)[0].source.operation.rid)
)
print("ljljklj")
print(purify2.outputs[0].sample)
assert (
assemble.input_array("Fragment")[0].sample == frags[0]
), "Setting a wire should propogate to a field value"
assert assemble.input_array("Fragment")[1].sample is None, (
"Setting a wire should not propogate sample to other field"
"values in the input array."
)
def test_quick_wire_to_input_array_with_set_sample(session):
canvas = Planner(session)
frags = session.Sample.where(
{"sample_type_id": session.SampleType.find_by_name("Fragment").id},
opts={"limit": 10},
)
purify1 = canvas.create_operation_by_name("Purify Gel Slice", category="Cloning")
purify2 = canvas.create_operation_by_name("Purify Gel Slice", category="Cloning")
canvas.set_field_value(purify1.inputs[0], sample=frags[0])
canvas.set_field_value(purify2.inputs[0], sample=frags[1])
assemble = canvas.create_operation_by_name("Assemble Plasmid", category="Cloning")
canvas.quick_wire(purify1, assemble)
canvas.quick_wire(purify2, assemble)
canvas.chain("Purify Gel Slice", assemble, category="Cloning")
input_array = assemble.input_array("Fragment")
assert len(input_array) == 3, "There should be 3 field values"
assert input_array[0].sample == frags[0]
assert input_array[1].sample == frags[1]
assert input_array[2].sample is None
# TODO: this test is not finished..
def test_set_output_and_propogate(session):
session.set_verbose(True)
canvas = Planner(session)
ops = canvas.chain(
"Rehydrate Primer",
"Make PCR Fragment",
"Run Gel",
"Extract Gel Slice",
"Purify Gel Slice",
"Assemble Plasmid",
category="Cloning",
)
example_fragment = session.Sample.find_by_name("SV40-dCas9-split")
canvas.set_output_sample(
ops[1].outputs[0],
sample=example_fragment,
setter=canvas.set_field_value_and_propogate,
)
canvas.validate()
def test_set_input_array(session):
canvas = Planner(session)
op = canvas.create_operation_by_name("Assemble Plasmid", category="Cloning")
frags = session.Sample.where(
{"sample_type_id": session.SampleType.find_by_name("Fragment").id},
opts={"limit": 10},
)
canvas.set_input_field_value_array(op, "Fragment", sample=frags[0])
canvas.set_input_field_value_array(op, "Fragment", sample=frags[1])
input_array = op.input_array("Fragment")
assert (
len(op.input_array("Fragment")) == 2
), "There should be exactly 2 field values in the input array"
assert (
input_array[0] != input_array[1]
), "Input array field values should be different"
assert len(op.input_array("Fragment")) == 2
assert (
op.input_array("Fragment")[0].sample == frags[0]
), "Input array 0 should have fragment 0"
assert (
op.input_array("Fragment")[1].sample == frags[1]
), "Input array 1 should have fragment 1"
def test_plan_validate_with_no_errors(session):
"""An easy to pass test.
A plan that is complete should always pass the validation method.
"""
session.set_verbose(True)
plan = session.Plan.one(query='status != "planning"')
assert plan
canvas = Planner(plan)
canvas.validate()
| 31.595694 | 88 | 0.687211 | 1,728 | 13,207 | 5.05787 | 0.118056 | 0.024714 | 0.067277 | 0.073684 | 0.645538 | 0.595881 | 0.484668 | 0.423455 | 0.398627 | 0.384439 | 0 | 0.016307 | 0.192095 | 13,207 | 417 | 89 | 31.671463 | 0.802812 | 0.015446 | 0 | 0.424342 | 0 | 0 | 0.173665 | 0 | 0 | 0 | 0 | 0.002398 | 0.151316 | 1 | 0.085526 | false | 0.003289 | 0.013158 | 0 | 0.098684 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2b0056c08e76535e1062630476c66ecd0573a56 | 1,021 | py | Python | icekit/plugins/image/migrations/0015_auto_20170310_2004.py | ic-labs/django-icekit | c507ea5b1864303732c53ad7c5800571fca5fa94 | [
"MIT"
] | 52 | 2016-09-13T03:50:58.000Z | 2022-02-23T16:25:08.000Z | icekit/plugins/image/migrations/0015_auto_20170310_2004.py | ic-labs/django-icekit | c507ea5b1864303732c53ad7c5800571fca5fa94 | [
"MIT"
] | 304 | 2016-08-11T14:17:30.000Z | 2020-07-22T13:35:18.000Z | icekit/plugins/image/migrations/0015_auto_20170310_2004.py | ic-labs/django-icekit | c507ea5b1864303732c53ad7c5800571fca5fa94 | [
"MIT"
] | 12 | 2016-09-21T18:46:35.000Z | 2021-02-15T19:37:50.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_image', '0014_image_external_ref'),
]
operations = [
migrations.RenameField(
model_name='image',
old_name='maximum_dimension',
new_name='maximum_dimension_pixels',
),
migrations.AlterField(
model_name='image',
name='maximum_dimension_pixels',
field=models.PositiveIntegerField(blank=True, help_text='If this image is to be limited to a particular pixel size for distribution, note it here.', null=True),
),
migrations.AlterField(
model_name='image',
name='alt_text',
field=models.CharField(max_length=255, blank=True, help_text="A description of the image for users who don't see images visually. Leave blank if the image has no informational value."),
),
]
| 34.033333 | 197 | 0.643487 | 116 | 1,021 | 5.456897 | 0.62931 | 0.042654 | 0.066351 | 0.082149 | 0.120063 | 0.120063 | 0 | 0 | 0 | 0 | 0 | 0.010596 | 0.260529 | 1,021 | 29 | 198 | 35.206897 | 0.827815 | 0.020568 | 0 | 0.347826 | 0 | 0.043478 | 0.340681 | 0.071142 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2b14a40b24c80800d570e25ab804585deb5e038 | 3,531 | py | Python | b3j0f/conf/parser/resolver/base.py | b3j0f/configuration | 18dd6d5d6560f9b202793739e2330a2181163511 | [
"MIT"
] | 3 | 2016-02-18T18:58:24.000Z | 2017-03-14T08:40:01.000Z | b3j0f/conf/parser/resolver/base.py | b3j0f/configuration | 18dd6d5d6560f9b202793739e2330a2181163511 | [
"MIT"
] | 1 | 2016-02-18T15:27:35.000Z | 2016-04-02T10:36:43.000Z | b3j0f/conf/parser/resolver/base.py | b3j0f/configuration | 18dd6d5d6560f9b202793739e2330a2181163511 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2015 Jonathan Labéjof <jonathan.labejof@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Programming language expression resolver module.
This module defines expression resolvers for dedicated programming languages.
An expression resolver is a function which takes in parameters:
- expr: string expression to resolve.
- safe: boolean flag for a safe execution context.
- tostr: boolean flag about format the expression into a string representation.
- scope: dict of variables for execution context (contain variables).
A resolver is registered with the function ``register``.
And is loaded in three ways:
- in setting the environment variable 'B3J0F_EXPRRES_PATH' (for example,
`b3j0f.conf.parser.lang.js,custom.c` could load both modules `js` and `c`
containing programming language parsers).
- in using the function `loadresolvers`.
- in simply importing a dedicated module with the import keyword.
"""
__all__ = ['ExprResolver']
from six import add_metaclass
from .registry import register
from .core import (
DEFAULT_BESTEFFORT, DEFAULT_SAFE, DEFAULT_TOSTR, DEFAULT_SCOPE
)
class _MetaExprResolver(type):
"""Expression Resolver meta class.
Register automatically ExprResolver classes."""
def __new__(cls, *args, **kwargs):
result = super(_MetaExprResolver, cls).__new__(cls, *args, **kwargs)
if result.__register__:
register(exprresolver=result)
return result
@add_metaclass(_MetaExprResolver)
class ExprResolver(object):
"""Expression resolver class.
All sub classes are automatically registered."""
__register__ = False #: class registration flag.
def __call__(
self, expr,
safe=DEFAULT_SAFE, tostr=DEFAULT_TOSTR, scope=DEFAULT_SCOPE,
besteffort=DEFAULT_BESTEFFORT
):
"""Resolve input expression.
:param str expr: configuration expression to resolve in this language.
:param bool safe: safe run execution context (True by default).
:param bool tostr: format the result.
:param dict scope: execution scope (contains references to expression
objects).
:param bool besteffort: try to resolve unknown variable name from
runtime context.
"""
raise NotImplementedError()
| 35.666667 | 79 | 0.704616 | 436 | 3,531 | 5.616972 | 0.479358 | 0.035933 | 0.010617 | 0.013067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003149 | 0.190598 | 3,531 | 98 | 80 | 36.030612 | 0.853744 | 0.74823 | 0 | 0 | 0 | 0 | 0.015584 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2b187b27bc8c59eec3870b18d05e4350c4e270b | 1,670 | py | Python | statement_renamer/extractors/capitalone360.py | mkazin/StatementRenamer | ef03c71f0e627a15a4bba08e45bfa90ecacd28fc | [
"Apache-2.0"
] | null | null | null | statement_renamer/extractors/capitalone360.py | mkazin/StatementRenamer | ef03c71f0e627a15a4bba08e45bfa90ecacd28fc | [
"Apache-2.0"
] | 15 | 2018-05-01T12:48:30.000Z | 2021-05-14T02:52:48.000Z | statement_renamer/extractors/capitalone360.py | mkazin/StatementRenamer | ef03c71f0e627a15a4bba08e45bfa90ecacd28fc | [
"Apache-2.0"
] | 1 | 2019-07-09T22:59:50.000Z | 2019-07-09T22:59:50.000Z | from datetime import datetime
from .extractor import DateExtractor, ExtractedData, ExtractorException
class CapitalOne360ExtractorException(ExtractorException):
def __init__(self, *args, **kwargs):
ExtractorException.__init__(self, *args, **kwargs)
class CapitalOne360DateExtractor(DateExtractor):
EXCEPTION = CapitalOne360ExtractorException
DATE_FORMAT = '%m/%d/%Y'
SEARCH_TEXT = "Opening Balance"
END_TEXT = 'Closing Balance'
FILE_FORMAT = '{:02}-{:02}-CapitalOne360.pdf'
@staticmethod
def match(text):
return 'My Info section.capitalone360.comInteractive' in text
def extract(self, text):
start_date = None
end_date = None
start = 0
while True:
start = text.find(self.__class__.SEARCH_TEXT, start + 1)
self.__handle_search_failure__(start < 0)
start += len(self.__class__.SEARCH_TEXT)
try:
int(text[start])
parts = text[start:].strip().split('$')
start_date = datetime.strptime(
parts[0], self.__class__.DATE_FORMAT)
end = text.find(self.__class__.END_TEXT)
end_text = text[end + len(self.__class__.END_TEXT):]
int(end_text[0])
parts = end_text.replace(' ', '').split('$')
end_date = datetime.strptime(
parts[0], self.__class__.DATE_FORMAT)
break
except ValueError:
print("ValueError at index: {} - [{}]".format(start, text[start:]))
pass
return ExtractedData(start_date, end_date)
| 29.821429 | 83 | 0.592814 | 166 | 1,670 | 5.620482 | 0.385542 | 0.052519 | 0.025723 | 0.038585 | 0.096463 | 0.096463 | 0.096463 | 0.096463 | 0.096463 | 0 | 0 | 0.021386 | 0.3 | 1,670 | 55 | 84 | 30.363636 | 0.776732 | 0 | 0 | 0.052632 | 0 | 0 | 0.086228 | 0.038922 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0.026316 | 0.052632 | 0.026316 | 0.368421 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2b274a05a49e36d3f5f1623c359598acd291590 | 1,335 | py | Python | setup.py | oeg-upm/ya2ro | 799b59046c77a9277b92f2adfa1521d353dfe93d | [
"Apache-2.0"
] | 1 | 2021-10-06T09:34:48.000Z | 2021-10-06T09:34:48.000Z | setup.py | oeg-upm/ya2ro | 799b59046c77a9277b92f2adfa1521d353dfe93d | [
"Apache-2.0"
] | 30 | 2021-11-17T18:35:31.000Z | 2022-03-28T10:46:45.000Z | setup.py | PavelAntonia/EELISA-research-object | 799b59046c77a9277b92f2adfa1521d353dfe93d | [
"Apache-2.0"
] | 1 | 2021-11-22T17:15:53.000Z | 2021-11-22T17:15:53.000Z | from setuptools import find_packages, setup
version = {}
with open("src/ya2ro/ya2ro.py") as fp:
exec(fp.read(), version)
setup(
name='ya2ro',
author='Antonia Pavel',
author_email='floriana.antonia.pavel@gmail.com',
description='Tool to which you pass basic information of a project or a research article (such as the datasets, software, people who have been involved, bibliography...) and generates two files with structured information with the intention of easing the readability for machines and people. One file is a webpage with all the relevant information and the other one is a Research Object.',
version=version['__version__'],
url='https://github.com/oeg-upm/ya2ro',
packages=find_packages(where="src",),
package_dir={"": "src"},
package_data={'ya2ro': ['images/*', 'resources/*']},
license='Apache License 2.0',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
entry_points={
'console_scripts': [
'ya2ro = ya2ro.ya2ro:main',
],
},
install_requires=[
'PyYAML>=5.0.0',
'bs4>=0.0.1',
'requests>=2.22.0',
'bibtexparser>=1.2.0',
'Pygments>=2.11.2',
'somef',
'soca @ git+https://github.com/oeg-upm/soca',
'metadata-parser'
]
)
| 37.083333 | 393 | 0.64794 | 175 | 1,335 | 4.851429 | 0.622857 | 0.035336 | 0.03298 | 0.040047 | 0.047114 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02644 | 0.206742 | 1,335 | 35 | 394 | 38.142857 | 0.77526 | 0 | 0 | 0 | 0 | 0.030303 | 0.546816 | 0.02397 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.030303 | 0.030303 | 0 | 0.030303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2b8e2b89a1d109f4bd53134df5660df9751af92 | 545 | py | Python | actstream/tests/test_apps.py | kimbugp/django-activity-stream | 4e53e62adf2b82cd01ab70a033839ab61b2d087b | [
"BSD-3-Clause"
] | 1,489 | 2015-01-02T02:46:30.000Z | 2022-03-30T07:32:45.000Z | actstream/tests/test_apps.py | kimbugp/django-activity-stream | 4e53e62adf2b82cd01ab70a033839ab61b2d087b | [
"BSD-3-Clause"
] | 277 | 2015-01-02T19:54:09.000Z | 2022-03-28T12:07:20.000Z | actstream/tests/test_apps.py | kimbugp/django-activity-stream | 4e53e62adf2b82cd01ab70a033839ab61b2d087b | [
"BSD-3-Clause"
] | 345 | 2015-01-13T01:02:42.000Z | 2022-03-21T09:39:26.000Z | from unittest import TestCase
from django.apps.registry import apps
class ActstreamConfigTestCase(TestCase):
def test_data_field_is_added_to_action_class_only_once_even_if_app_is_loaded_again(self):
actstream_config = apps.get_app_config('actstream')
actstream_config.ready()
actstream_config.ready()
from actstream.models import Action
data_fields = [field for field in Action._meta.fields if field.name == 'data']
self.assertEqual(
len(data_fields),
1
)
| 28.684211 | 93 | 0.702752 | 68 | 545 | 5.294118 | 0.558824 | 0.125 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002381 | 0.229358 | 545 | 18 | 94 | 30.277778 | 0.854762 | 0 | 0 | 0.153846 | 0 | 0 | 0.023853 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c3378c740ae8e44f9721f70acf8526419bc1dcc | 26,445 | py | Python | viz_platform/dynamic_datasets/model/classes.py | alexanderzimmerman/smart-vp-server | 3aa57b5fa32e90a8406684d0d0a2860e224d7916 | [
"Apache-2.0"
] | null | null | null | viz_platform/dynamic_datasets/model/classes.py | alexanderzimmerman/smart-vp-server | 3aa57b5fa32e90a8406684d0d0a2860e224d7916 | [
"Apache-2.0"
] | null | null | null | viz_platform/dynamic_datasets/model/classes.py | alexanderzimmerman/smart-vp-server | 3aa57b5fa32e90a8406684d0d0a2860e224d7916 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import scipy
import scipy.special
import scipy.interpolate
import pickle
import sklearn
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import MySQLdb
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
import sqlalchemy.ext.mutable
from sqlalchemy import Table, Column, Integer, String, Binary, Float, Boolean, Enum, ForeignKey, PickleType, DateTime, LargeBinary
from sqlalchemy import create_engine, inspect
from sqlalchemy.orm import sessionmaker, relationship
import sqlalchemy.types as types
Base = declarative_base()
scenario_x_ensemble = Table('scenario_x_ensemble', Base.metadata,
Column('scenario_id', Integer, ForeignKey('scenario.id')),
Column('ensemble_id', Integer, ForeignKey('ensemble.id')))
deployment_x_instrument = Table('deployment_x_instrument', Base.metadata,
Column('deployment_id', Integer, ForeignKey('deployment.id')),
Column('instrument_id', Integer, ForeignKey('instrument.id')))
deployment_x_platform = Table('deployment_x_platform', Base.metadata,
Column('deployment_id', Integer, ForeignKey('deployment.id')),
Column('platform_id', Integer, ForeignKey('platform.id')))
opt_x_optParamType = Table('opt_x_optParamType', Base.metadata,
Column('optimization_id', Integer, ForeignKey('optimization.id')),
Column('optParamType_id', Integer, ForeignKey('operationalParameterType.id')))
opt_x_decisionType = Table('opt_x_decisionType', Base.metadata,
Column('optimization_id', Integer, ForeignKey('optimization.id')),
Column('decisionType_id', Integer, ForeignKey('decisionType.id')))
opt_x_modelParamType = Table('opt_x_modelParamType', Base.metadata,
Column('optimization_id', Integer, ForeignKey('optimization.id')),
Column('modelParamType_id', Integer, ForeignKey('modelParameterType.id')))
opt_x_stateVarType = Table('opt_x_stateVarType', Base.metadata,
Column('optimization_id', Integer, ForeignKey('optimization.id')),
Column('stateVarType_id', Integer, ForeignKey('stateVarType.id')))
opt_x_instrumentType = Table('opt_x_instrumentType', Base.metadata,
Column('optimization_id', Integer, ForeignKey('optimization.id')),
Column('instrumentType_id', Integer, ForeignKey('instrumentType.id')))
class Decision(Base):
__tablename__ = 'decision'
id = Column(Integer, primary_key=True, autoincrement=True)
id_type = Column(Integer, ForeignKey('decisionType.id'), primary_key=True )
id_schedule = Column(Integer, ForeignKey('schedule.id') )
time = Column(Float)
value = Column(PickleType)
decisionType = relationship( 'DecisionType', back_populates='decisions' )
class DecisionType(Base):
__tablename__ = 'decisionType'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(128))
decisions = relationship( 'Decision', back_populates='decisionType' )
opts = relationship( 'Optimization', back_populates='decisionTypes', secondary=opt_x_decisionType )
class DecisionDependency(Base):
__tablename__ = 'decisionDependency'
id = Column(Integer, primary_key=True, autoincrement=True)
id_subject = Column(Integer, ForeignKey('decisionType.id') )
id_object = Column(Integer, ForeignKey('decisionType.id') )
dec_subject = relationship( 'DecisionType', foreign_keys=[id_subject] )
dec_object = relationship( 'DecisionType', foreign_keys=[id_object] )
class Deployment(Base):
__tablename__ = 'deployment'
id = Column(Integer, primary_key=True, autoincrement=True)
instruments = relationship( 'Instrument', back_populates='deployment', secondary=deployment_x_instrument )
platforms = relationship( 'Platform', back_populates='deployment', secondary=deployment_x_platform )
predictions = relationship( 'PredictedDataset', back_populates='deployment' )
observations = relationship( 'ObservedDataset', back_populates='deployment' )
class Ensemble(Base):
__tablename__ = 'ensemble'
id = Column(Integer, primary_key=True, autoincrement=True)
scenarios = relationship( 'Scenario', back_populates='ensembles', secondary=scenario_x_ensemble )
class Instrument(Base):
__tablename__ = 'instrument'
id = Column(Integer, primary_key=True, autoincrement=True)
id_type = Column(Integer, ForeignKey('instrumentType.id') )
name = Column(String(256))
type = relationship( 'InstrumentType' )
deployment = relationship( 'Deployment', back_populates='instruments', secondary=deployment_x_instrument )
objFuncs = relationship( 'ObjectiveFunction', back_populates='instrument' )
class InstrumentType(Base):
__tablename__ = 'instrumentType'
id = Column(Integer, primary_key=True, autoincrement=True)
id_type = Column(Integer, ForeignKey('stateVarType.id') )
type = relationship( 'StateVariableType' )
opts = relationship( 'Optimization', back_populates='instrumentTypes', secondary=opt_x_instrumentType )
class ModelParameter(Base):
__tablename__ = 'modelParameter'
id = Column(Integer, primary_key=True, autoincrement=True)
id_type = Column(Integer, ForeignKey('modelParameterType.id'), primary_key=True )
id_realization = Column(Integer, ForeignKey('realization.id') )
value = Column(Float)
modelParamType = relationship( 'ModelParameterType', back_populates='modelParams' )
realization = relationship( 'Realization', back_populates='modelParams' )
class ModelParameterType(Base):
__tablename__ = 'modelParameterType'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(64)) # pressure, aqueous CO2 content, total dissolved solids
abvr = Column(String(64)) # pf, co2, tds
unit = Column(String(64)) # kPa, %, kg/m3
modelParams = relationship( 'ModelParameter', back_populates='modelParamType' )
opts = relationship( 'Optimization', back_populates='modelParamTypes', secondary=opt_x_modelParamType )
class ObjectiveFunction(Base):
__tablename__ = 'objFunction'
id = Column(Integer, primary_key=True, autoincrement=True)
id_instrument = Column(Integer, ForeignKey('instrument.id') )
instrument = relationship( 'Instrument', back_populates='objFuncs' )
objVals = relationship( 'ObjectiveValue', back_populates='objFunction' )
class ObjectiveValue(Base):
__tablename__ = 'objValue'
id = Column(Integer, primary_key=True, autoincrement=True)
id_objectF = Column(Integer, ForeignKey('objFunction.id') )
id_simulation = Column(Integer, ForeignKey('simulation.id') )
value = Column(Float)
objFunction = relationship( 'ObjectiveFunction', back_populates='objVals' )
simulation = relationship( 'Simulation', back_populates='objVals' )
def compute(self,time,end):
observation = self.objFunction.instrument.deployment[0].observations[0]
for prediction in self.objFunction.instrument.deployment[0].predictions:
if prediction.simulation==self.simulation:
f = scipy.interpolate.interp1d(time,prediction.data)
err=0
for i in range(observation.data.shape[0]):
if time[i]<end:
err += (f(observation.data[i,0])-observation.data[i,1])**2
return err
class ObservedDataset(Base):
__tablename__ = 'observedDataset'
id = Column(Integer, primary_key=True, autoincrement=True)
id_deployment = Column(Integer, ForeignKey('deployment.id') )
id_stateVarType = Column(Integer, ForeignKey('stateVarType.id') )
deployment = relationship( 'Deployment', back_populates='observations' )
stateVarType = relationship( 'StateVariableType' )
data = Column(String(2**24))
class OperationalParameterType(Base):
__tablename__ = 'operationalParameterType'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(64))
abvr = Column(String(64))
unit = Column(String(64))
schedules = relationship( 'Schedule', back_populates='optParamType' )
opts = relationship( 'Optimization', back_populates='optParamTypes', secondary=opt_x_optParamType )
class Optimization(Base):
__tablename__ = 'optimization'
id = Column(Integer, primary_key=True, autoincrement=True)
X = Column(String(2**24))
Y = Column(String(2**24))
Z = Column(String(2**24))
t = Column(String(2**24))
end = Column(Float)
optParamTypes = relationship( 'OperationalParameterType', back_populates='opts', secondary=opt_x_optParamType )
decisionTypes = relationship( 'DecisionType', back_populates='opts', secondary=opt_x_decisionType )
modelParamTypes = relationship( 'ModelParameterType', back_populates='opts', secondary=opt_x_modelParamType )
stateVarTypes = relationship( 'StateVariableType', back_populates='opts', secondary=opt_x_stateVarType )
instrumentTypes = relationship( 'InstrumentType', back_populates='opts', secondary=opt_x_instrumentType )
def generate_scenario(self,realization):
scenario = Scenario(realization=realization)
nInj = np.random.randint(1,3+1)
nObs = np.random.randint(1,3+1)
nInj = 2
nObs = 2
for iInj in range(nInj):
pump_rates = np.random.uniform(400,1000)
start_time = np.random.uniform(0.48*np.max(self.t),0.52*np.max(self.t))
if iInj==0: start_time=0
easting = np.random.uniform(np.min(self.X),np.max(self.X))
northing = np.random.uniform(np.min(self.Y),np.max(self.Y))
depth = np.random.uniform(np.min(self.Z),np.max(self.Z))
injWell = Well(easting=easting,northing=northing,depth=depth,time=start_time)
schedule = Schedule(optParamType=self.optParamTypes[0],value=pump_rates,well=injWell)
decisions = [Decision(decisionType=self.decisionTypes[0])]
scenario.schedules += [schedule]
simulation = Simulation(scenario=scenario)
simulation.run(self.X,self.Y,self.Z,self.t,self.stateVarTypes[0])
for iObs in range(nObs):
if iObs==0:
drill_time = np.random.uniform(0.00*np.max(self.t),0.05*np.max(self.t))
elif iObs==1:
drill_time = np.random.uniform(0.35*np.max(self.t),0.40*np.max(self.t))
else:
drill_time = np.random.uniform(0.45*np.max(self.t),0.50*np.max(self.t))
easting = np.random.uniform(np.min(self.X),np.max(self.X))
northing = np.random.uniform(np.min(self.Y),np.max(self.Y))
depth = np.random.uniform(np.min(self.Z),np.max(self.Z))
obsWell = Well(easting=easting,northing=northing,depth=depth,time=drill_time)
platform = Platform(wells=[obsWell])
sensor = Instrument(type=self.instrumentTypes[0])
deployment = Deployment(platforms=[platform],instruments=[sensor])
prediction = PredictedDataset(deployment=deployment,stateVarType=self.stateVarTypes[0],simulation=simulation)
prediction.compute(self.X,self.Y,self.Z,self.t)
observed = ObservedDataset(deployment=deployment,stateVarType=self.stateVarTypes[0],data=prediction.noisy_data(self.t,0.0005))
objectiveFunction = ObjectiveFunction(instrument=sensor)
return scenario
def add_to_scenario(self,scenario,easting,northing,depth,drill_time):
obsWell = Well(easting=easting,northing=northing,depth=depth,time=drill_time)
platform = Platform(wells=[obsWell])
sensor = Instrument(type=self.instrumentTypes[0])
deployment = Deployment(platforms=[platform],instruments=[sensor])
prediction = PredictedDataset(deployment=deployment,stateVarType=self.stateVarTypes[0],simulation=scenario.simulations[0])
prediction.compute(self.X,self.Y,self.Z,self.t)
observed = ObservedDataset(deployment=deployment,stateVarType=self.stateVarTypes[0],data=prediction.noisy_data(self.t,0.0005))
objFunc = ObjectiveFunction(instrument=sensor)
return scenario
def monte_carlo(self,schedules,deployments,full=False):
T = np.random.uniform(0.2e+4,2.6e+4)
S = np.random.uniform(0.5e-5,0.5e-2)
#print T,S
modelParams = []
modelParams += [ModelParameter(value=T,modelParamType=self.modelParamTypes[0])]
modelParams += [ModelParameter(value=S,modelParamType=self.modelParamTypes[1])]
realization = Realization(modelParams=modelParams)
scenario = Scenario(realization=realization)
for true_schedule in schedules:
pump_rates = true_schedule.value
start_time = true_schedule.well.time
easting = true_schedule.well.easting
northing = true_schedule.well.northing
depth = true_schedule.well.depth
injWell = Well(easting=easting,northing=northing,depth=depth,time=start_time)
schedule = Schedule(optParamType=self.optParamTypes[0],value=pump_rates,well=injWell)
decisions = [Decision(decisionType=self.decisionTypes[0])]
scenario.schedules += [schedule]
simulation = Simulation(scenario=scenario)
if full: simulation.run(self.X,self.Y,self.Z,self.t,self.stateVarTypes[0])
for true_deployment in deployments:
obsWell = true_deployment.platforms[0].wells[0]
platform = true_deployment.platforms[0]
sensor = true_deployment.instruments[0]
deployment = true_deployment
prediction = PredictedDataset(deployment=deployment,stateVarType=self.stateVarTypes[0],simulation=simulation)
prediction.compute(self.X,self.Y,self.Z,self.t)
objFunc = sensor.objFuncs[0]
objVal = ObjectiveValue(objFunction=objFunc,simulation=simulation)
return scenario
def mcmc(self,schedules,deployments,nn,full=False):
scenarios = [self.monte_carlo(schedules,deployments)]
nRej = 0
while len(scenarios)<nn:
print(len(scenarios))
while True:
step = 1-0.75/(1+np.exp(-2*(nRej-4)))
T = scenarios[-1].realization.modelParams[0].value+np.random.normal(0,step*0.025e+4)
S = scenarios[-1].realization.modelParams[1].value+np.random.normal(0,step*0.025e-3)
if (0.2e+4<T<2.6e+4) and (0.5e-5<S<0.5e-2): break
#print T,S
modelParams = []
modelParams += [ModelParameter(value=T,modelParamType=self.modelParamTypes[0])]
modelParams += [ModelParameter(value=S,modelParamType=self.modelParamTypes[1])]
realization = Realization(modelParams=modelParams)
scenario = Scenario(realization=realization)
for true_schedule in schedules:
pump_rates = true_schedule.value
start_time = true_schedule.well.time
easting = true_schedule.well.easting
northing = true_schedule.well.northing
depth = true_schedule.well.depth
injWell = Well(easting=easting,northing=northing,depth=depth,time=start_time)
schedule = Schedule(optParamType=self.optParamTypes[0],value=pump_rates,well=injWell)
decisions = [Decision(decisionType=self.decisionTypes[0])]
scenario.schedules += [schedule]
simulation = Simulation(scenario=scenario)
if full: simulation.run(self.X,self.Y,self.Z,self.t,self.stateVarTypes[0])
for true_deployment in deployments:
obsWell = true_deployment.platforms[0].wells[0]
platform = true_deployment.platforms[0]
sensor = true_deployment.instruments[0]
deployment = true_deployment
prediction = PredictedDataset(deployment=deployment,stateVarType=self.stateVarTypes[0],simulation=simulation)
prediction.compute(self.X,self.Y,self.Z,self.t)
objFunc = sensor.objFuncs[0]
objVal = ObjectiveValue(objFunction=objFunc,simulation=simulation)
# new
e1new = scenario.simulations[0].objVals[0].compute(self.t,self.end)**0.5
e2new = scenario.simulations[0].objVals[1].compute(self.t,self.end)**0.5
# old
e1old = scenarios[-1].simulations[0].objVals[0].compute(self.t,self.end)**0.5
e2old = scenarios[-1].simulations[0].objVals[1].compute(self.t,self.end)**0.5
#print e1,e3, e2,e4
#print e1<=e3, e2<=e4
acc1 = e1new<e1old and e2new<e2old
acc2 = e1new<e1old and e2new>e2old and np.random.uniform(0,1)<(0.1+0.9*(e2new-e2old)/e2old)
acc3 = e2new<e2old and e1new>e1old and np.random.uniform(0,1)<(0.1+0.9*(e1new-e1old)/e1old)
acc4 = e1new>e1old and e2new>e2old and np.random.uniform(0,1)<(0.1+0.9*((e1new-e1old)/e1old+(e2new-e2old)/e2old))
if acc1 or acc2 or acc3 or acc4:
scenarios += [scenario]
nRej = 0
else:
nRej+=1
return scenarios
def mcmc2(self,schedules,deployments,nn):
scenarios = [self.monte_carlo(schedules,deployments)]
nRej = 0
while len(scenarios)<nn:
print(len(scenarios))
while True:
step = 1-0.75/(1+np.exp(-2*(nRej-4)))
T = scenarios[-1].realization.modelParams[0].value+np.random.normal(0,step*0.025e+4)
S = scenarios[-1].realization.modelParams[1].value+np.random.normal(0,step*0.025e-3)
if (0.2e+4<T<2.6e+4) and (0.5e-5<S<0.5e-2): break
#print T,S
modelParams = []
modelParams += [ModelParameter(value=T,modelParamType=self.modelParamTypes[0])]
modelParams += [ModelParameter(value=S,modelParamType=self.modelParamTypes[1])]
realization = Realization(modelParams=modelParams)
scenario = Scenario(realization=realization)
for true_schedule in schedules:
pump_rates = true_schedule.value
start_time = true_schedule.well.time
easting = true_schedule.well.easting
northing = true_schedule.well.northing
depth = true_schedule.well.depth
injWell = Well(easting=easting,northing=northing,depth=depth,time=start_time)
schedule = Schedule(optParamType=self.optParamTypes[0],value=pump_rates,well=injWell)
decisions = [Decision(decisionType=self.decisionTypes[0])]
scenario.schedules += [schedule]
simulation = Simulation(scenario=scenario)
simulation.run(self.X,self.Y,self.Z,self.t,self.stateVarTypes[0])
for true_deployment in deployments:
obsWell = true_deployment.platforms[0].wells[0]
platform = true_deployment.platforms[0]
sensor = true_deployment.instruments[0]
deployment = true_deployment
prediction = PredictedDataset(deployment=deployment,stateVarType=self.stateVarTypes[0],simulation=simulation)
prediction.compute(self.X,self.Y,self.Z,self.t)
objFunc = sensor.objFuncs[0]
objVal = ObjectiveValue(objFunction=objFunc,simulation=simulation)
e1 = scenario.simulations[0].objVals[0].compute(self.t,self.end)**0.5
e2 = scenario.simulations[0].objVals[1].compute(self.t,self.end)**0.5
e3 = scenario.simulations[0].objVals[2].compute(self.t,self.end)**0.5
e4 = scenarios[-1].simulations[0].objVals[0].compute(self.t,self.end)**0.5
e5 = scenarios[-1].simulations[0].objVals[1].compute(self.t,self.end)**0.5
e6 = scenarios[-1].simulations[0].objVals[2].compute(self.t,self.end)**0.5
#print e1<=e3, e2<=e4
if (e1<=e4 or e2<=e5 or e3<=e6):
scenarios += [scenario]
nRej = 0
else:
nRej+=1
return scenarios
class Platform(Base):
__tablename__ = 'platform'
id = Column(Integer, primary_key=True, autoincrement=True)
deployment = relationship( 'Deployment', back_populates='platforms', secondary=deployment_x_platform )
wells = relationship( 'Well', back_populates='platform' )
class PredictedDataset(Base):
__tablename__ = 'predictedDataset'
id = Column(Integer, primary_key=True, autoincrement=True)
id_deployment = Column(Integer, ForeignKey('deployment.id') )
id_stateVarType = Column(Integer, ForeignKey('stateVarType.id') )
id_simulation = Column(Integer, ForeignKey('simulation.id') )
deployment = relationship( 'Deployment', back_populates='predictions' )
stateVarType = relationship( 'StateVariableType' )
simulation = relationship( 'Simulation', back_populates='predictions' )
data = Column(String(2**24))
def stateVarField(self):
for field in self.simulation.fields:
if field.type==self.stateVarType: return field
def compute(self,X,Y,Z,ts):
#print type(self.stateVarField())
self.data = np.zeros(ts.shape,dtype='float')
if type(self.stateVarField())==type(None):
#print 'compute just the sensor response'
for schedule in self.simulation.scenario.schedules:
Q = schedule.value
T = self.simulation.scenario.realization.modelParams[0].value
S = self.simulation.scenario.realization.modelParams[1].value
xw = schedule.well.easting
yw = schedule.well.northing
zw = schedule.well.depth
t0 = schedule.well.time
xo = self.deployment.platforms[0].wells[0].easting
yo = self.deployment.platforms[0].wells[0].northing
zo = self.deployment.platforms[0].wells[0].depth
r = ( (xw-xo)**2+(yw-yo)**2+(zw-zo)**2 )**0.5
for it in range(len(ts)):
self.data[it] += self.simulation.theis(Q,T,S,r,ts[it],t0)
else:
#print 'compute the full 4d response'
data = pickle.loads(self.stateVarField().data)
xw = self.deployment.platforms[0].wells[0].easting
yw = self.deployment.platforms[0].wells[0].northing
zw = self.deployment.platforms[0].wells[0].depth
pts = np.array(list(zip(X.ravel(),Y.ravel(),Z.ravel())),dtype='float')
for it in range(len(ts)):
f = scipy.interpolate.LinearNDInterpolator(pts,data[:,:,:,it].ravel())
self.data[it] = f(xw,yw,zw)
def noisy_data(self,time,level):
ii = np.where(time>self.deployment.platforms[0].wells[0].time)[0]
return np.concatenate([ time[ii].reshape([len(ii),1]), (self.data[ii]+np.cumsum(np.random.normal(0,level,self.data[ii].shape))).reshape([len(ii),1]) ], axis=1)
class Realization(Base):
__tablename__ = 'realization'
id = Column(Integer, primary_key=True, autoincrement=True)
modelParams = relationship( 'ModelParameter', back_populates='realization' )
scenarios = relationship( 'Scenario', back_populates='realization' )
class Scenario(Base):
__tablename__ = 'scenario'
id = Column(Integer, primary_key=True, autoincrement=True)
id_realization = Column(Integer, ForeignKey('realization.id') )
realization = relationship( 'Realization', back_populates='scenarios' )
schedules = relationship( 'Schedule', back_populates='scenario' )
ensembles = relationship( 'Ensemble', back_populates='scenarios', secondary=scenario_x_ensemble )
simulations = relationship( 'Simulation', back_populates='scenario' )
class Schedule(Base):
__tablename__ = 'schedule'
id = Column(Integer, primary_key=True, autoincrement=True)
id_optParamType = Column(Integer, ForeignKey('operationalParameterType.id') )
id_scenario = Column(Integer, ForeignKey('scenario.id') )
id_well = Column(Integer, ForeignKey('well.id') )
value = Column(PickleType)
optParamType = relationship( 'OperationalParameterType', back_populates='schedules' )
scenario = relationship( 'Scenario', back_populates='schedules' )
well = relationship( 'Well', back_populates='schedules' )
class Simulation(Base):
__tablename__ = 'simulation'
id = Column(Integer, primary_key=True, autoincrement=True)
id_scenario = Column(Integer, ForeignKey('scenario.id') )
scenario = relationship( 'Scenario', back_populates='simulations' )
fields = relationship( 'StateVariableField', back_populates='simulation' )
predictions = relationship( 'PredictedDataset', back_populates='simulation' )
objVals = relationship( 'ObjectiveValue', back_populates='simulation' )
def theis(self,Q,T,S,r,t,t0):
if t>t0:
u = (r**2*S)/(4*T*(t-t0))
W = -scipy.special.expi(-u)
return Q / (4*np.pi*T) * W
else: return 0.0
def run(self,X,Y,Z,time,stateVarType):
T = self.scenario.realization.modelParams[0].value
S = self.scenario.realization.modelParams[1].value
s = np.zeros([X.shape[0],Y.shape[1],Z.shape[2],time.shape[0]],dtype='float')
for schedule in self.scenario.schedules:
Q = schedule.value
x = schedule.well.easting
y = schedule.well.northing
z = schedule.well.depth
t0 = schedule.well.time
for i in range(X.shape[0]):
for j in range(Y.shape[1]):
for k in range(X.shape[2]):
r = ((X[i,j,k]-x)**2+(Y[i,j,k]-y)**2+(Z[i,j,k]-z)**2)**0.5
for l in range(time.shape[0]):
s[i,j,k,l] += self.theis(Q,T,S,r,time[l],t0)
StateVariableField(type=stateVarType,simulation=self,data=pickle.dumps(s))
class StateVariableField(Base):
__tablename__ = 'stateVarField'
id = Column(Integer, primary_key=True, autoincrement=True)
id_type = Column(Integer, ForeignKey('stateVarType.id') )
id_simulation = Column(Integer, ForeignKey('simulation.id') )
data = Column(String(2**26))
type = relationship( 'StateVariableType' )
simulation = relationship( 'Simulation', back_populates='fields' )
class StateVariableType(Base):
__tablename__ = 'stateVarType'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(64))
abvr = Column(String(64))
unit = Column(String(64))
opts = relationship( 'Optimization', back_populates='stateVarTypes', secondary=opt_x_stateVarType )
class Well(Base):
__tablename__ = 'well'
id = Column(Integer, primary_key=True, autoincrement=True)
id_platform = Column(Integer, ForeignKey('platform.id') )
easting = Column(Float)
northing = Column(Float)
depth = Column(Float)
time = Column(Float)
platform = relationship( 'Platform', back_populates='wells' )
schedules = relationship( 'Schedule', back_populates='well' )
class Forecast(Base):
__tablename__ = 'forecast'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(64))
abvr = Column(String(64))
unit = Column(String(64))
# %%
f = Forecast()
print(f) | 49.615385 | 163 | 0.690187 | 3,100 | 26,445 | 5.775484 | 0.09129 | 0.035579 | 0.020331 | 0.029491 | 0.618968 | 0.529156 | 0.495588 | 0.461852 | 0.440684 | 0.416778 | 0 | 0.019792 | 0.180374 | 26,445 | 533 | 164 | 49.615385 | 0.806228 | 0.010739 | 0 | 0.450855 | 0 | 0 | 0.091525 | 0.008108 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023504 | false | 0 | 0.036325 | 0 | 0.465812 | 0.00641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c3545b94818bca3c82a497be99bd356be44a3e7 | 7,592 | py | Python | lokii/table.py | dorukerenaktas/lok | 0fce198cfc3e5293a2666a66a5d1ee80b81fca48 | [
"MIT"
] | 1 | 2021-02-07T09:57:28.000Z | 2021-02-07T09:57:28.000Z | lokii/table.py | dorukerenaktas/lokii | 0fce198cfc3e5293a2666a66a5d1ee80b81fca48 | [
"MIT"
] | null | null | null | lokii/table.py | dorukerenaktas/lokii | 0fce198cfc3e5293a2666a66a5d1ee80b81fca48 | [
"MIT"
] | null | null | null | import random
from typing import Dict, Callable, List, Any, Optional
import pandas as pd
class Table:
def __init__(self,
name: str,
outfile: str,
index_cache_size: int,
random_cache_size: int,
debug: bool):
"""
Database table like data structure definition that hold column and general configuration to
adjust generated data.
:param name: name of the table
"""
self.name = name
self.outfile = outfile
self.columns = None
self.relations: List["Table"] = []
self.defaults: List[Dict] = []
# Determine if table is product of a multiplication
self.is_product = False
# Multiplicand table, for each row in this table multiplier length of rows will be generated
self.multiplicand: Optional["Table"] = None
# Multiplier of the pivot table
self.multiplier: List[Any] = [1]
self.gen_func = lambda x: x
# The number of rows to be created
self.target_count = 0
# Processed number of target rows
self.row_count = 0
# Number of generated rows
self.gen_row_count = 0
self._index_cache_size = index_cache_size
self._random_cache_size = random_cache_size
self._debug = debug
self._row_cache = []
self._row_cache_start = -1
self._row_cache_end = -1
def cols(self, *cols: str) -> "Table":
"""
Adds columns to table. Generated output will be ordered by given columns order.
:param cols: name of the columns
"""
if len(cols) <= 1:
# In order to use row cache (Pandas to dict oriented as records) there must be two or more rows
raise KeyError('Table {} must have 2 or more columns'.format(self.name))
dup = {x for x in cols if cols.count(x) > 1}
if len(dup) > 0:
raise KeyError('Columns {} are duplicated for table {}'.format(dup, self.name))
self.columns = cols
return self
def rels(self, *tables: "Table") -> "Table":
"""
Adds relations to the table. For every generated row, a random row will be selected from
relation tables.
:param tables: the relation tables
"""
dup = {x for x in tables if tables.count(x) > 1}
if len(dup) > 0:
raise KeyError('Relations {} are duplicated for table {}'.format(dup, self.name))
self.relations = tables
return self
def defs(self, defaults: List[Dict]) -> "Table":
"""
Adds default rows to the table. Every default row must have all required columns.
:param defaults: default rows for the table
"""
for i, d in enumerate(defaults):
if not all(k in self.columns for k in d):
raise KeyError('Default row at index {} does have all required columns for table {}'
.format(i, self.name))
self.defaults = defaults
return self
def simple(self, count: int, gen: Callable[[int, Dict], Dict]) -> "Table":
self.target_count = count
def generate_row(index: int, rel_dict: Dict) -> Dict:
return gen(index, rel_dict)
# self._write_async(100 if self._debug else count, generate_row)
self.gen_func = generate_row
return self
def multiply(self, table: "Table", gen: Callable[[int, Any, Dict], Dict],
multiplier: List) -> "Table":
if len(multiplier) == 0:
raise KeyError('Table {} has a multiplier with no items'.format(self.name))
self.is_product = True
self.multiplicand = table
self.multiplier = multiplier if multiplier else [1]
def generate_row(index: int, rel_dict: Dict) -> Dict:
return gen(index, multiplier[index % len(multiplier)], rel_dict)
# self._write_async(count, generate_row)
self.gen_func = generate_row
return self
def prepare(self):
if self.is_product:
self.target_count = self.multiplicand.gen_row_count * len(self.multiplier)
def load_index_cache(self, start: int, end: int) -> None:
"""
Index cache is used for multiplying. Before starting batch jobs pivot table needs to cache all
range of required indexes.
:param start index of range
:param end index of range
"""
if self._row_cache_start <= start and end <= self._row_cache_end:
# Already have all required indexes, do nothing
return
if start + self._index_cache_size > self.gen_row_count:
# Remaining range is smaller than cache size, cache all remaining
self._row_cache_start = start
self._row_cache_end = self.gen_row_count
else:
# Cache range from start to start + cache size
self._row_cache_start = start
self._row_cache_end = start + self._index_cache_size
dfs = pd.read_csv(self.outfile, sep=',', header=0, names=self.columns,
skiprows=self._row_cache_start,
chunksize=self._row_cache_end - self._row_cache_start, squeeze=True)
df = pd.concat(dfs)
self._row_cache = df.to_dict(orient='records')
def load_random_cache(self, process: float):
"""
Random cache is used for relations. Before starting batch jobs relation table needs to cache
range of random indexes.
:param process completion ratio of the process
"""
curr = int(self.gen_row_count * process)
if self._row_cache_start <= curr <= self._row_cache_end:
# Already have all required indexes, do nothing
return
if curr + self._random_cache_size > self.gen_row_count:
# Remaining range is smaller than cache size, cache all remaining
self._row_cache_start = curr
self._row_cache_end = self.gen_row_count
else:
# Cache range from start to start + cache size
self._row_cache_start = curr
self._row_cache_end = curr + self._random_cache_size
dfs = pd.read_csv(self.outfile, sep=',', header=0, names=self.columns,
skiprows=self._row_cache_start,
chunksize=self._row_cache_end - self._row_cache_start, squeeze=True)
df = pd.concat(dfs)
self._row_cache = df.to_dict(orient='records')
random.shuffle(self._row_cache)
def purge_cache(self):
"""
Purge cache after generation process end.
"""
self._row_cache = []
self._row_cache_start = -1
self._row_cache_end = -1
def get_row(self, index: int):
if index >= self.gen_row_count:
raise IndexError('Index {} is not valid for table {}'.format(index, self.name))
if self._row_cache_start > index or self._row_cache_end < index:
raise IndexError('Index {} is not cached for table {}, cache range {}-{} of {}'
.format(index, self.name, self._row_cache_start, self._row_cache_end,
self.gen_row_count))
return self._row_cache[index - self._row_cache_start]
def get_rand(self, seed: int):
index = seed % self.gen_row_count \
if self._random_cache_size > self.gen_row_count \
else seed % self._random_cache_size
return self._row_cache[index]
| 37.584158 | 107 | 0.601159 | 979 | 7,592 | 4.467824 | 0.180797 | 0.056013 | 0.093278 | 0.058299 | 0.386145 | 0.328304 | 0.328304 | 0.328304 | 0.318244 | 0.262003 | 0 | 0.00403 | 0.31362 | 7,592 | 201 | 108 | 37.771144 | 0.835348 | 0.221417 | 0 | 0.318966 | 0 | 0 | 0.066407 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0.025862 | 0.017241 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c36eb0bb22cd8a2ed48443d1c87d8de5ed364ca | 13,428 | py | Python | powerspectrum.py | ronniyjoseph/Beam-Perturbations | 0122fed7e3018f2e188e12b62ad760e11f6eb158 | [
"MIT"
] | null | null | null | powerspectrum.py | ronniyjoseph/Beam-Perturbations | 0122fed7e3018f2e188e12b62ad760e11f6eb158 | [
"MIT"
] | 4 | 2019-06-25T02:02:56.000Z | 2019-10-24T08:12:41.000Z | powerspectrum.py | ronniyjoseph/Beam-Perturbations | 0122fed7e3018f2e188e12b62ad760e11f6eb158 | [
"MIT"
] | null | null | null | import numpy
import powerbox
import matplotlib
from matplotlib import pyplot
import matplotlib.colors as colors
from plottools import colorbar
from generaltools import symlog_bounds
from radiotelescope import beam_width
"""
This file is going to contain all relevant power spectrum functions, i.e data gridding, (frequency tapering), frequency
fft, angular averaging, plotting
"""
class PowerSpectrumData:
def __init__(self, visibility_data = None, u_coordinate = None, v_coordinate = None, frequency_coordinate = None):
self.data_raw = visibility_data
self.u_raw = u_coordinate
self.v_raw = v_coordinate
self.f_raw = frequency_coordinate
self.data_regrid = None
self.u_regrid = None
self.v_regrid = None
self.f_regrid = None
self.eta = None
return
def append_frequency_slice(self, new_data, new_u, new_v, new_frequency):
if self.data is None:
self.data = new_data
self.u = new_u
self.v = new_v
self.f = numpy.array([new_frequency])
else:
current_data = self.data
current_u = self.u
current_v = self.v
current_f = self.f
self.data = numpy.vstack((current_data, new_data))
self.u = numpy.vstack((current_u, new_u))
self.v = numpy.vstack((current_v, new_v))
self.f = numpy.vstack((current_f, numpy.array([new_frequency])))
return
def regrid_data(self, keep_raw = True):
return
def serialised_gridding():
return
def parallelised_gridding():
return
def regrid_visibilities(measured_visibilities, baseline_u, baseline_v, u_grid):
u_shifts = numpy.diff(u_grid) / 2.
u_bin_edges = numpy.concatenate((numpy.array([u_grid[0] - u_shifts[0]]), u_grid[1:] - u_shifts,
numpy.array([u_grid[-1] + u_shifts[-1]])))
weights_regrid, u_bins, v__bins = numpy.histogram2d(baseline_u,
baseline_v,
bins=(u_bin_edges, u_bin_edges))
real_regrid, u_bins, v__bins = numpy.histogram2d(baseline_u,
baseline_v,
bins=(u_bin_edges, u_bin_edges),
weights=
numpy.real(measured_visibilities))
imag_regrid, u_bins, v__bins = numpy.histogram2d(baseline_u,
baseline_v,
bins=(u_bin_edges, u_bin_edges),
weights=
numpy.imag(measured_visibilities))
regridded_visibilities = real_regrid + 1j*imag_regrid
normed_regridded_visibilities = numpy.nan_to_num(regridded_visibilities/weights_regrid)
return normed_regridded_visibilities, weights_regrid
def regrid_visibilities_gaussian(measured_visibilities, baseline_u, baseline_v, u_grid, frequency):
u_shifts = numpy.diff(u_grid) / 2.
u_bin_edges = numpy.concatenate((numpy.array([u_grid[0] - u_shifts[0]]), u_grid[1:] - u_shifts,
numpy.array([u_grid[-1] + u_shifts[-1]])))
gridded_data = numpy.zeros((len(u_grid), len(u_grid)), dtype = complex)
gridded_weights = numpy.zeros((len(u_grid), len(u_grid)))
#calculate the kernel
kernel_pixel_size = 51
if kernel_pixel_size % 2 == 0:
dimension = kernel_pixel_size/2
else:
dimension = (kernel_pixel_size + 1)/2
grid_midpoint = int(len(u_grid)/2)
kernel_width = beam_width(frequency)
print(kernel_width)
kernel_grid = u_grid[int(grid_midpoint-dimension):int(grid_midpoint+dimension+1)]
uu, vv = numpy.meshgrid(kernel_grid, kernel_grid)
kernel = (numpy.exp(-kernel_width**2*(uu ** 2. + vv ** 2.)).flatten())
kernel_coordinates = numpy.arange(-dimension, dimension + 1, 1, dtype = int)
kernel_mapx, kernel_mapy = numpy.meshgrid(kernel_coordinates, kernel_coordinates)
for i in range(len(measured_visibilities)):
u_index = numpy.digitize(numpy.array(baseline_u[i]), u_bin_edges)
v_index = numpy.digitize(numpy.array(baseline_v[i]), u_bin_edges)
kernel_x = kernel_mapx.flatten() + u_index
kernel_y = kernel_mapy.flatten() + v_index
#filter indices which are beyond array range
indices = numpy.where((kernel_x > 0) & (kernel_x < len(u_grid)) & (kernel_y > 0) & (kernel_y < len(u_grid)))[0]
print(indices)
gridded_data[kernel_x[indices], kernel_y[indices]] += measured_visibilities[i]*kernel[indices]
gridded_weights[kernel_x[indices], kernel_y[indices]] += kernel[indices]
normed_gridded_data = numpy.nan_to_num(gridded_data/gridded_weights)
return normed_gridded_data, gridded_weights
def get_power_spectrum(frequency_range, radio_telescope, ideal_measured_visibilities, broken_measured_visibilities,
faulty_tile, plot_file_name, gaussian_kernel = False, verbose = False):
baseline_table = radio_telescope.baseline_table
# Determine maximum resolution
max_frequency = frequency_range[-1]
max_u = numpy.max(numpy.abs(baseline_table.u(max_frequency)))
max_v = numpy.max(numpy.abs(baseline_table.v(max_frequency)))
max_b = max(max_u, max_v)
re_gridding_resolution = 0.5 # lambda
n_regridded_cells = int(numpy.ceil(2 * max_b / re_gridding_resolution))
#ensure gridding cells are always odd numbered
if n_regridded_cells % 2 == 0:
n_regridded_cells += 1
else:
pass
regridded_uv = numpy.linspace(-max_b, max_b, n_regridded_cells)
if verbose:
print("Gridding data for Power Spectrum Estimation")
#Create empty_uvf_cubes:
ideal_regridded_cube = numpy.zeros((n_regridded_cells,n_regridded_cells, len(frequency_range)), dtype = complex)
broken_regridded_cube= ideal_regridded_cube.copy()
ideal_regridded_weights = numpy.zeros((n_regridded_cells,n_regridded_cells, len(frequency_range)))
broken_regridded_weights= ideal_regridded_weights.copy()
for frequency_index in range(len(frequency_range)):
if gaussian_kernel:
ideal_regridded_cube[..., frequency_index], ideal_regridded_weights[
..., frequency_index] = regrid_visibilities_gaussian(
ideal_measured_visibilities[:, frequency_index], baseline_table.u(frequency_range[frequency_index]),
baseline_table.v(frequency_range[frequency_index]), regridded_uv, frequency_range[frequency_index])
broken_regridded_cube[..., frequency_index], broken_regridded_weights[
..., frequency_index] = regrid_visibilities_gaussian(
broken_measured_visibilities[:, frequency_index], baseline_table.u(frequency_range[frequency_index]),
baseline_table.v(frequency_range[frequency_index]), regridded_uv, frequency_range[frequency_index])
else:
ideal_regridded_cube[..., frequency_index], ideal_regridded_weights[..., frequency_index] = regrid_visibilities(
ideal_measured_visibilities[:, frequency_index], baseline_table.u(frequency_range[frequency_index]),
baseline_table.v(frequency_range[frequency_index]), regridded_uv)
broken_regridded_cube[..., frequency_index], broken_regridded_weights[..., frequency_index] = regrid_visibilities(
broken_measured_visibilities[:, frequency_index], baseline_table.u(frequency_range[frequency_index]),
baseline_table.v(frequency_range[frequency_index]), regridded_uv)
pyplot.imshow(numpy.abs(ideal_regridded_weights[...,frequency_index]))
pyplot.savefig("blaah1.pdf")
# visibilities have now been re-gridded
if verbose:
print("Taking Fourier Transform over frequency and averaging")
ideal_shifted = numpy.fft.ifftshift(ideal_regridded_cube, axes=2)
broken_shifted = numpy.fft.ifftshift(broken_regridded_cube, axes=2)
ideal_uvn, eta_coords = powerbox.dft.fft(ideal_shifted,
L=numpy.max(frequency_range) - numpy.min(frequency_range), axes=(2,))
broken_uvn, eta_coords = powerbox.dft.fft(broken_shifted,
L=numpy.max(frequency_range) - numpy.min(frequency_range), axes=(2,))
ideal_PS, uv_bins = powerbox.tools.angular_average_nd(numpy.abs(ideal_uvn) ** 2,
coords=[regridded_uv, regridded_uv,
eta_coords], bins=75,
n=2, weights=numpy.sum(ideal_regridded_weights, axis=2))
broken_PS, uv_bins = powerbox.tools.angular_average_nd(numpy.abs(broken_uvn) ** 2,
coords=[regridded_uv, regridded_uv,
eta_coords], bins=75,
n=2, weights=numpy.sum(broken_regridded_weights, axis=2))
diff_PS, uv_bins = powerbox.tools.angular_average_nd(numpy.abs(broken_uvn - ideal_uvn) ** 2,
coords=[regridded_uv, regridded_uv,
eta_coords], bins=75,
n=2, weights=numpy.sum(broken_regridded_weights, axis=2))
#diff_PS = (broken_PS - ideal_PS)/ideal_PS
selection = int(len(eta_coords[0]) / 2) + 1
if verbose:
print("Making 2D PS Plots")
power_spectrum_plot(uv_bins, eta_coords[0, selection:], ideal_PS[:, selection:], broken_PS[:, selection:],
diff_PS[:, selection:],plot_file_name, faulty_tile)
return
def power_spectrum_plot(uv_bins, eta_coords, ideal_PS, broken_PS, diff_PS, plot_file_name, faulty_tile = -1, ):
fontsize = 25
tickfontsize = 20
figure = pyplot.figure(figsize=(30, 10))
ideal_axes = figure.add_subplot(131)
broken_axes = figure.add_subplot(132)
difference_axes = figure.add_subplot(133)
ideal_plot = ideal_axes.pcolor(uv_bins, eta_coords, numpy.real(ideal_PS.T),
cmap='Spectral_r',
norm=colors.LogNorm(vmin=numpy.nanmin(numpy.real(ideal_PS.T)),
vmax=numpy.nanmax(numpy.real(ideal_PS.T))))
broken_plot = broken_axes.pcolor(uv_bins, eta_coords, numpy.real(broken_PS.T),
cmap='Spectral_r',
norm=colors.LogNorm(vmin=numpy.nanmin(numpy.real(broken_PS.T)),
vmax=numpy.nanmax(numpy.real(broken_PS.T))))
symlog_min, symlog_max, symlog_threshold, symlog_scale = symlog_bounds(numpy.real(diff_PS))
diff_plot = difference_axes.pcolor(uv_bins, eta_coords, numpy.real(diff_PS.T),
norm=colors.SymLogNorm(linthresh=10**-5, linscale=symlog_scale,
vmin=symlog_min, vmax=symlog_max), cmap='coolwarm')
ideal_axes.set_xscale("log")
ideal_axes.set_yscale("log")
broken_axes.set_xscale("log")
broken_axes.set_yscale("log")
difference_axes.set_xscale("log")
difference_axes.set_yscale("log")
x_labeling = r"$ k_{\perp} \, [\mathrm{h}\,\mathrm{Mpc}^{-1}]$"
y_labeling = r"$k_{\parallel} $"
x_labeling = r"$ |u |$"
y_labeling = r"$ \eta $"
ideal_axes.set_xlabel(x_labeling, fontsize=fontsize )
broken_axes.set_xlabel(x_labeling, fontsize=fontsize )
difference_axes.set_xlabel(x_labeling, fontsize=fontsize)
ideal_axes.set_ylabel(y_labeling, fontsize=fontsize )
ideal_axes.tick_params(axis='both', which='major', labelsize=tickfontsize)
broken_axes.tick_params(axis='both', which='major', labelsize=tickfontsize)
difference_axes.tick_params(axis='both', which='major', labelsize=tickfontsize)
figure.suptitle(f"Tile {faulty_tile}")
ideal_axes.set_title("Ideal Array", fontsize = fontsize)
broken_axes.set_title("Broken Array", fontsize = fontsize)
difference_axes.set_title("(Ideal - Broken)/Ideal", fontsize = fontsize)
# ideal_axes.set_xlim(10**-2.5, 10**-0.5)
# broken_axes.set_xlim(10**-2.5, 10**-0.5)
# difference_axes.set_xlim(10**-2.5, 10**-0.5)
print(uv_bins)
ideal_axes.set_xlim(numpy.nanmin(uv_bins), 2*1e2)
broken_axes.set_xlim(numpy.nanmin(uv_bins), 2*1e2)
difference_axes.set_xlim(numpy.nanmin(uv_bins), 2*1e2)
ideal_cax = colorbar(ideal_plot)
broken_cax = colorbar(broken_plot)
diff_cax = colorbar(diff_plot)
diff_cax.set_label(r"$[Jy^2]$", fontsize=fontsize)
ideal_cax.ax.tick_params(axis='both', which='major', labelsize=tickfontsize)
broken_cax.tick_params(axis='both', which='major', labelsize=tickfontsize)
diff_cax.ax.tick_params(axis='both', which='major', labelsize=tickfontsize)
print(plot_file_name)
figure.savefig(plot_file_name)
return | 44.611296 | 126 | 0.629803 | 1,624 | 13,428 | 4.886084 | 0.153941 | 0.042344 | 0.011342 | 0.035287 | 0.470951 | 0.427095 | 0.393321 | 0.362067 | 0.325646 | 0.284814 | 0 | 0.012316 | 0.26832 | 13,428 | 301 | 127 | 44.611296 | 0.795318 | 0.027703 | 0 | 0.232227 | 0 | 0 | 0.028948 | 0.002484 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042654 | false | 0.004739 | 0.037915 | 0.014218 | 0.127962 | 0.033175 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c3a2df09b00838929b47a69b48dbd4d97a14392 | 3,089 | py | Python | banditpylib/protocols/single_player.py | XiGYmax/banditpylib | 07698a1c6b17720a8199dea76580546fe3dfb9be | [
"MIT"
] | null | null | null | banditpylib/protocols/single_player.py | XiGYmax/banditpylib | 07698a1c6b17720a8199dea76580546fe3dfb9be | [
"MIT"
] | null | null | null | banditpylib/protocols/single_player.py | XiGYmax/banditpylib | 07698a1c6b17720a8199dea76580546fe3dfb9be | [
"MIT"
] | null | null | null | from typing import List, Dict
import numpy as np
from absl import logging
from banditpylib.bandits import Bandit
from banditpylib.learners import Learner
from .utils import Protocol
class SinglePlayerProtocol(Protocol):
"""Single player protocol
This protocol is used to simulate the ordinary single-player game. It runs in
rounds. During each round, the protocol runs the following steps in sequence.
* fetch the state of the environment and ask the learner for actions
* send the actions to the enviroment for execution
* update the learner with the feedback of the environment
The simulation stops when actions returned by the learner is `None`.
.. note::
The total number of rounds shows how adaptive the learner is and it is at
most the total number of actions.
"""
def __init__(self,
bandit: Bandit,
learners: List[Learner],
intermediate_regrets: List[int] = None):
"""
Args:
bandit: bandit environment
learner: learners to be compared with
intermediate_regrets: a list of intermediate times to record
intermediate regrets
"""
super().__init__(bandit=bandit, learners=learners)
self.__intermediate_regrets = \
intermediate_regrets if intermediate_regrets is not None else []
@property
def name(self) -> str:
"""default protocol name"""
return 'single_player_protocol'
def _one_trial(self, random_seed: int, debug: bool) -> List[Dict]:
"""One trial of the game
This method defines how to run one trial of the game.
Args:
random_seed: random seed
debug: whether to run the trial in debug mode
Returns:
result of one trial
"""
if debug:
logging.set_verbosity(logging.DEBUG)
np.random.seed(random_seed)
# reset the bandit environment and the learner
self.bandit.reset()
self.current_learner.reset()
one_trial_data = []
# number of rounds to communicate with the bandit environment
adaptive_rounds = 0
# total actions executed by the bandit environment
total_actions = 0
def record_data():
one_trial_data.append(
dict({
'bandit': self.bandit.name,
'learner': self.current_learner.name,
'rounds': adaptive_rounds,
'total_actions': total_actions,
'regret': self.bandit.regret(self.current_learner.goal)
}))
while True:
context = self.bandit.context()
actions = self.current_learner.actions(context)
# stop the game if actions returned by the learner is None
if actions is None:
break
# record intermediate regrets
if adaptive_rounds in self.__intermediate_regrets:
record_data()
feedback = self.bandit.feed(actions)
self.current_learner.update(feedback)
if feedback:
# information update
for (_, times) in actions:
total_actions += int(times)
adaptive_rounds += 1
# record final regret
record_data()
return one_trial_data
| 28.601852 | 79 | 0.672062 | 388 | 3,089 | 5.224227 | 0.311856 | 0.074988 | 0.044401 | 0.019734 | 0.049334 | 0.03256 | 0.03256 | 0 | 0 | 0 | 0 | 0.001313 | 0.260602 | 3,089 | 107 | 80 | 28.869159 | 0.886165 | 0.390418 | 0 | 0.04 | 0 | 0 | 0.033708 | 0.01236 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.12 | 0 | 0.26 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c3ba3a7de0cd2d1f09de245212daad861a07972 | 2,873 | py | Python | 3pxnet-inference/scripts/runValidation.py | SRavit1/3pxnet | 1f81a2bdcbb97c42163e914b01dba4e6c73ade60 | [
"MIT"
] | 7 | 2020-12-11T16:06:03.000Z | 2022-02-13T20:56:06.000Z | 3pxnet-inference/scripts/runValidation.py | SRavit1/3pxnet | 1f81a2bdcbb97c42163e914b01dba4e6c73ade60 | [
"MIT"
] | 4 | 2021-07-13T10:50:49.000Z | 2021-08-13T16:06:20.000Z | 3pxnet-inference/scripts/runValidation.py | SRavit1/3pxnet | 1f81a2bdcbb97c42163e914b01dba4e6c73ade60 | [
"MIT"
] | 1 | 2021-07-06T03:41:55.000Z | 2021-07-06T03:41:55.000Z | #!/usr/bin/env python
################################################################################
# MIT License
#
# Copyright (c) 2019 UCLA NanoCAD Laboratory
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
"""Tests XNOR/3PXnet implementation against reference
Author: Wojciech Romaszkan
Organization: NanoCAD Laboratory, University of California, Los Angeles
License: MIT
"""
import subprocess
__author__ = "Wojciech Romaszkan, NanoCAD Laboratory, UCLA"
__license__ = "MIT"
class runValidation(object):
def __init__(self):
self.f = open("logfile", "w")
# Iterations
self.iters = 100
# Layers to run
self.layers = [" -f ", " -c ", " -c -d ", " -c -l 2 ", " -c -l 2 -d "]
# batch norm
self.bnorm = [" ", " -b "]
# output binarization
self.outbin = [" ", " -n "]
# sparsity
self.sparse = [" ", " -s 90 -p "]
def run(self):
# Failure counter
fail = 0
for layer in self.layers:
for bn in self.bnorm:
for ob in self.outbin:
for sp in self.sparse:
cmdString = "./validation" + layer + ob + bn + sp + " -i " + str(self.iters)
print("Running: " + cmdString)
result = subprocess.call(cmdString, shell=True, stdout=self.f)
if result:
print("FAILED")
fail = 1
else:
print("PASSED")
# Check if any of the tests failed
if fail:
print("Some tests failed")
return True
def main():
validator = runValidation()
validator.run()
if __name__ == '__main__':
main()
| 34.614458 | 95 | 0.577445 | 334 | 2,873 | 4.907186 | 0.535928 | 0.053691 | 0.015863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006803 | 0.283676 | 2,873 | 82 | 96 | 35.036585 | 0.789602 | 0.485207 | 0 | 0 | 0 | 0 | 0.145121 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.030303 | 0.030303 | 0 | 0.181818 | 0.121212 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c3c00e8f94657bbf831d69554620cc2f14d7279 | 978 | py | Python | setup.py | nous-consulting/basecamp-next | 33851de091056a33663c9370f564cf3d41fe868f | [
"MIT"
] | 2 | 2015-03-28T21:50:36.000Z | 2015-09-10T22:29:17.000Z | setup.py | GetBlimp/basecamp-next | f7ebdd3da97ee13cd5ca18f440506fbbc84e7800 | [
"MIT"
] | null | null | null | setup.py | GetBlimp/basecamp-next | f7ebdd3da97ee13cd5ca18f440506fbbc84e7800 | [
"MIT"
] | 4 | 2015-04-14T16:18:26.000Z | 2021-03-28T19:00:21.000Z | import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
required = ['requests>=1.0.0',
'requests-oauth2>=0.2.0']
setup(
name='basecampx',
version='0.1.8',
author='Rimvydas Naktinis',
author_email='naktinis@gmail.com',
description=('Wrapper for Basecamp Next API.'),
license="MIT",
keywords="basecamp bcx api",
url='https://github.com/nous-consulting/basecamp-next',
packages=['basecampx'],
install_requires=required,
long_description=read('README.rst'),
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7'
],
)
| 27.166667 | 70 | 0.641104 | 112 | 978 | 5.517857 | 0.705357 | 0.035599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016905 | 0.213701 | 978 | 35 | 71 | 27.942857 | 0.786736 | 0 | 0 | 0 | 0 | 0 | 0.408998 | 0.022495 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.129032 | 0.032258 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c3c2a730b5f9ebaec5a9cf2153e02f5e709d39e | 2,602 | py | Python | crop_align/crop_align_affectnet.py | AutoLV/NoisyFER | 353ff60bad90dd346cd6a8fc54d7a6acd5897044 | [
"MIT"
] | 15 | 2020-11-09T16:35:08.000Z | 2022-02-12T14:53:11.000Z | crop_align/crop_align_affectnet.py | AutoLV/NoisyFER | 353ff60bad90dd346cd6a8fc54d7a6acd5897044 | [
"MIT"
] | 1 | 2021-07-21T03:33:46.000Z | 2021-08-08T20:24:12.000Z | crop_align/crop_align_affectnet.py | AutoLV/NoisyFER | 353ff60bad90dd346cd6a8fc54d7a6acd5897044 | [
"MIT"
] | 3 | 2021-03-30T10:21:52.000Z | 2021-09-12T15:55:32.000Z | import sys
rootPath = '/Users/siwei/Desktop/noisyFER'
sys.path.append(rootPath)
import os
from tqdm import tqdm
import cv2
import csv
import numpy as np
import argparse
from crop_align.align import MyFaceAligner
parser = argparse.ArgumentParser()
parser.add_argument("--root", type=str, default='datasets/affectnet')
args = parser.parse_args()
def lms_to_np(lms):
lms = lms.split(';')
x_cor_list, y_cor_list = [], []
for i in range(len(lms)):
if i % 2 == 0:
x_cor_list.append(float(lms[i]))
else:
y_cor_list.append(float(lms[i]))
lms = [x_cor_list, y_cor_list]
lms = np.asarray(lms) # [2, 68]
return lms
# training.csv
def crop_align_affectnet(csv_file, root):
img_root = os.path.join(root, 'Manually_Annotated_Images')
save_root = os.path.join(root, 'myaligned')
if not os.path.exists(save_root):
os.makedirs(save_root)
my_fa = MyFaceAligner(desiredLeftEye=(0.3, 0.3), desiredFaceWidth=256)
cnt = 0
with open(csv_file, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in tqdm(reader):
cur_sample = {}
cur_sample['img_path'] = os.path.join(img_root, row['subDirectory_filePath'].split('/')[1])
lms = row['facial_landmarks']
cur_sample['lms'] = lms_to_np(lms)
cur_sample['expression'] = int(row['expression'][0:])
# for Uncertain and No-face categories the value is -2)
cur_sample['valence'] = float(row['valence'])
cur_sample['arousal'] = float(row['arousal'])
# affectnet emotion label:
# 0: Neutral, 1: Happy, 2: Sad, 3: Surprise, 4: Fear, 5: Disgust, 6: Anger, 7: Contempt
# 8: None, 9: Uncertain, 10: No-Face
if cur_sample['valence'] != -2 and 0 <= cur_sample['expression'] <= 7:
img = cv2.imread(cur_sample['img_path'])
img_name = row['subDirectory_filePath'].split('/')[1]
save_path = os.path.join(save_root, img_name)
# use 68 lms provided by AffectNet
result = my_fa.align(img, cur_sample['lms'])
cv2.imwrite(save_path, result)
cnt += 1
print('num of saved images:', cnt)
if __name__ == '__main__':
print('crop and align for affectnet training set...')
crop_align_affectnet(csv_file=os.path.join(args.root, 'training.csv'), root=args.root)
print('crop and align for affectnet validation set...')
crop_align_affectnet(csv_file=os.path.join(args.root, 'validate.csv'), root=args.root) | 35.162162 | 103 | 0.618755 | 359 | 2,602 | 4.300836 | 0.367688 | 0.05829 | 0.03886 | 0.040803 | 0.223446 | 0.146373 | 0.059585 | 0.059585 | 0.059585 | 0.059585 | 0 | 0.019309 | 0.243659 | 2,602 | 74 | 104 | 35.162162 | 0.765244 | 0.097233 | 0 | 0 | 0 | 0 | 0.161401 | 0.040991 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.148148 | 0 | 0.203704 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c3c89d30724c38d60dae13286e13556b05ebcb3 | 640 | py | Python | bot/cli.py | LukasForst/toggl-wire-bot | 1a242ef281b3cb501f30a1acee9cda7fd2cb2a84 | [
"MIT"
] | null | null | null | bot/cli.py | LukasForst/toggl-wire-bot | 1a242ef281b3cb501f30a1acee9cda7fd2cb2a84 | [
"MIT"
] | null | null | null | bot/cli.py | LukasForst/toggl-wire-bot | 1a242ef281b3cb501f30a1acee9cda7fd2cb2a84 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
from Toggl.togglApi import getTogglReport
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Obtain report from Toggl.')
parser.add_argument("--toggl-token", "-tt", help="Set Toggl token.")
parser.add_argument("--toggl-workspace", "-w", help="Set Toggl workspace")
parser.add_argument("--since", "-s", help="Start date for the report.")
parser.add_argument("--until", "-u", help="End date for the report.")
args = parser.parse_args()
report = getTogglReport(args.toggl_token, int(args.toggl_workspace), args.since, args.until)
print(report)
| 32 | 96 | 0.698438 | 83 | 640 | 5.204819 | 0.46988 | 0.083333 | 0.157407 | 0.101852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142188 | 640 | 19 | 97 | 33.684211 | 0.786885 | 0.03125 | 0 | 0 | 0 | 0 | 0.276252 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c3e24b4b6d91fcd05c76dfb86067fae1b3b498e | 3,746 | py | Python | a_storage/codec/codec_agroup.py | praefrontalis/Anfisa-Annotations | b4127c68e3696b75b2972f6759437034cc56f8e3 | [
"Apache-2.0"
] | null | null | null | a_storage/codec/codec_agroup.py | praefrontalis/Anfisa-Annotations | b4127c68e3696b75b2972f6759437034cc56f8e3 | [
"Apache-2.0"
] | 3 | 2022-03-28T13:44:24.000Z | 2022-03-28T13:53:57.000Z | a_storage/codec/codec_agroup.py | praefrontalis/Anfisa-Annotations | b4127c68e3696b75b2972f6759437034cc56f8e3 | [
"Apache-2.0"
] | 3 | 2019-02-18T17:05:06.000Z | 2022-03-22T19:42:38.000Z | from ._codec_data import _CodecData
#===============================================
class CodecAGroup(_CodecData):
def __init__(self, master, parent, schema_instr, default_name):
self.mGroupName = "?"
_CodecData.__init__(self, master, parent, schema_instr, default_name)
self.mGroup = self._getProperty("group")
self.mGroupName = self._getProperty("group-name")
self.mItemCodecs = [
_CodecData.create(self.getMaster(), self, it_instr, "?")
for it_instr in self._getProperty("items")]
if not self.mGroupName.startswith('<'):
self.mGroupName = "<%s>" % self.mGroupName
self._updateProperty("group-name", self.mGroupName)
self._updateProperty("items",
[it.getSchemaDescr() for it in self.mItemCodecs])
used_names = set()
for it in self.mItemCodecs:
it._checkNameUsage(used_names)
stat_info = self._getProperty("stat", dict())
self.mStatValCount = stat_info.get("val", 0)
self.mStatGrpCount = stat_info.get("groups")
if self.mStatGrpCount is None:
self.mStatGrpCount = {name: 0 for name in self.mGroup}
stat_info["groups"] = self.mStatGrpCount
self._onDuty()
def _checkNameUsage(self, used_names):
for name in self.mGroup:
assert name not in used_names, (
"Duplication name in group for codec %s" % self.getPath())
used_names.add(name)
def getType(self):
return "attr-group"
def isAtomic(self):
return False
def isAggregate(self):
return True
def getPath(self):
if self.mParent is None:
return "/" + self.mGroupName
return self.mParent.getPath() + "/" + self.mGroupName
def encode(self, value, encode_env):
self.mStatValCount += 1
ret_repr = []
for name_idx, name in enumerate(self.mGroup):
it_dict = value.get(name)
if it_dict is None:
continue
self.mStatGrpCount[name] += 1
items_repr = [str(name_idx)]
for it in self.mItemCodecs:
it_repr = "null"
if it.isAggregate():
it_repr = it.encode(it_dict, encode_env)
else:
it_val = it_dict.get(it.getName())
if it_val is not None:
it_repr = it.encode(it_val, encode_env)
items_repr.append(it_repr)
while len(items_repr) > 0 and items_repr[-1] == "null":
del items_repr[-1]
ret_repr.append('[' + ','.join(items_repr) + ']')
return '[' + ','.join(ret_repr) + ']'
def updateWStat(self):
stat_info = self._getProperty("stat")
stat_info["groups"] = self.mStatGrpCount
stat_info["val"] = self.mStatValCount
for it in self.mItemCodecs:
it.updateWStat()
def decode(self, group_obj, decode_env):
ret = dict()
for int_obj in group_obj:
name = self.mGroup[int_obj[0]]
grp_obj = dict()
for idx, it in enumerate(self.mItemCodecs):
it_obj = None
if idx + 1 < len(int_obj):
it_obj = int_obj[idx + 1]
if it.isAggregate():
if it_obj is not None:
grp_obj.update(it.decode(it_obj, decode_env))
else:
if it_obj is not None:
grp_obj[it.getName()] = it.decode(it_obj, decode_env)
else:
grp_obj[it.getName()] = None
ret[name] = grp_obj
return ret
| 38.618557 | 77 | 0.540577 | 422 | 3,746 | 4.592417 | 0.203791 | 0.057792 | 0.014448 | 0.022704 | 0.241486 | 0.134159 | 0.097007 | 0.070175 | 0.047472 | 0 | 0 | 0.00405 | 0.340897 | 3,746 | 96 | 78 | 39.020833 | 0.780883 | 0.012547 | 0 | 0.137931 | 0 | 0 | 0.037317 | 0 | 0 | 0 | 0 | 0 | 0.011494 | 1 | 0.103448 | false | 0 | 0.011494 | 0.034483 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c3ea335989982f0ae8f5847cf419f798c2e40fa | 3,528 | py | Python | app/customer/models/bottle_message.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
] | 2 | 2017-12-02T13:58:30.000Z | 2018-08-02T17:07:59.000Z | app/customer/models/bottle_message.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
] | null | null | null | app/customer/models/bottle_message.py | B-ROY/TESTGIT | 40221cf254c90d37d21afb981635740aebf11949 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import random
from mongoengine import *
from base.settings import CHATPAMONGO
import datetime
connect(CHATPAMONGO.db, host=CHATPAMONGO.host, port=CHATPAMONGO.port, username=CHATPAMONGO.username,
password=CHATPAMONGO.password)
class BottleMessageText(Document):
USER_TYPE = [
(0, "主播发送"),
(1, "用户发送")
]
GENDER = [
(0, "Both"),
(1, "男用户"),
(2, "女用户")
]
DELETE_STATUS = [
(0, "已删除"),
(2, "正在使用")
]
label = IntField(verbose_name=u"标签", unique=True)
message = StringField(verbose_name=u"消息")
sender_type = IntField(verbose_name=u"发送者类型", choices=USER_TYPE)
gender = IntField(verbose_name=u"性别")
delete_status = IntField(verbose_name=u"是否删除", )
create_time = DateTimeField(verbose_name=u"创建时间")
def normal_info(self):
return {
"lable": self.label,
"message": self.message,
"sender_type": self.sender_type,
"gender": self.gender
}
@classmethod
def create_message_text(cls, label, message, sender_type, to_gender):
obj_ = cls()
obj_.label = label
obj_.message = message
obj_.sender_type = sender_type
obj_.gender = to_gender
obj_.create_time = datetime.datetime.now()
obj_.save()
@classmethod
def get_message_text(cls, sender_type, gender=0):
return cls.objects.filter(sender_type=sender_type)
@classmethod
def get_one_mesasge_text(cls, sender_type):
messages = cls.objects.filter(sender_type=sender_type)
num = random.randint(0,messages.count()-1)
return messages[num]
@classmethod
def get_two_message(cls):
to_male_messages = cls.objects.filter(gender=2) # delete_status=2
to_female_messages = cls.objects.filter(gender=1)
message_list = []
if to_male_messages:
to_male_num = random.randint(0, to_male_messages.count()-1)
message_list.append(to_male_messages[to_male_num])
if to_female_messages:
to_female_num = random.randint(0, to_female_messages.count()-1)
message_list.append(to_female_messages[to_female_num])
return message_list
@classmethod
def delete_message_text(cls):
#todo 待开发
pass
@classmethod
def update_message_test(cls):
#todo 待开发
pass
class BottleRecord(Document):
SEND_STATUS = [
(0, "开始发送"),
(1, "发送成功"),
(2, "发送失败")
]
SENDER_TYPE = [
(0, "主播发送"),
(1, "用户发送")
]
user_id = IntField(verbose_name=u"发送者id")
label = IntField(verbose_name=u"消息标签")
messages = StringField(verbose_name=u"消息内容")
sender_type = IntField(verbose_name=u"发送者类型", choices=SENDER_TYPE)
send_time = DateTimeField(verbose_name=u"发送时间")
count = IntField(verbose_name=u"发送人数")
status = IntField(verbose_name=u"发送状态", choices=SEND_STATUS)
@classmethod
def create_bottle_record(cls, user_id, label, messages, sender_type, count):
obj_ = cls()
obj_.user_id = user_id
obj_.label = label
obj_.messages = messages
obj_.sender_type = sender_type
obj_.send_time = datetime.datetime.now()
obj_.count = count
# todo 暂时默认 发送陈功
obj_.status = 1
obj_.save()
@classmethod
def update(cls, id, status):
record = cls.objects.get(id=id)
record.update(set__status=status)
| 23.837838 | 100 | 0.622166 | 426 | 3,528 | 4.894366 | 0.246479 | 0.086331 | 0.07482 | 0.086331 | 0.336691 | 0.177458 | 0.106475 | 0.040288 | 0 | 0 | 0 | 0.009277 | 0.266723 | 3,528 | 147 | 101 | 24 | 0.796676 | 0.016723 | 0 | 0.22 | 0 | 0 | 0.035673 | 0 | 0 | 0 | 0 | 0.006803 | 0 | 1 | 0.09 | false | 0.03 | 0.04 | 0.02 | 0.37 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c43035692fa6a7c9793606b200857a4997d78db | 3,646 | py | Python | names_generator/__init__.py | glentner/names_generator | c3526a90f3c0f8d2b388542dc3770baa4cc455fe | [
"Apache-2.0"
] | 1 | 2020-12-03T08:52:58.000Z | 2020-12-03T08:52:58.000Z | names_generator/__init__.py | glentner/names_generator | c3526a90f3c0f8d2b388542dc3770baa4cc455fe | [
"Apache-2.0"
] | null | null | null | names_generator/__init__.py | glentner/names_generator | c3526a90f3c0f8d2b388542dc3770baa4cc455fe | [
"Apache-2.0"
] | 2 | 2021-01-16T08:52:33.000Z | 2022-02-24T13:53:58.000Z | # This program is free software: you can redistribute it and/or modify it under the
# terms of the Apache License (v2.0) as published by the Apache Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the Apache License for more details.
#
# You should have received a copy of the Apache License along with this program.
# If not, see <https://www.apache.org/licenses/LICENSE-2.0>.
"""API and entry-point for names_generator."""
# type annotations
from typing import Tuple, List, Dict, Callable
# standard libs
import sys
import random
import logging
# internal libs
from .__meta__ import __version__, __description__, __authors__, __contact__
from . import names
# external libs
from cmdkit.app import Application
from cmdkit.cli import Interface
# In the interest of keeping with the original implementation :)
restricted_names: List[Tuple[str, str]] = [
('boring', 'wozniak') # Steve Wozniak is not boring.
]
def random_names() -> Tuple[str, str]:
"""Select a random choice of names from `names.LEFT` and `names.RIGHT`."""
_names = random.choice(names.LEFT), random.choice(names.RIGHT)
return _names if _names not in restricted_names else random_names()
def _format_plain(pair: Tuple[str, str]) -> str:
return f'{pair[0]} {pair[1]}'
def _format_capital(pair: Tuple[str, str]) -> str:
return f'{pair[0].capitalize()} {pair[1].capitalize()}'
def _format_hyphen(pair: Tuple[str, str]) -> str:
return f'{pair[0]}-{pair[1]}'
def _format_underscore(pair: Tuple[str, str]) -> str:
return f'{pair[0]}_{pair[1]}'
_formatting_methods: Dict[str, Callable[[Tuple[str, str]], str]] = {
'plain': _format_plain,
'capital': _format_capital,
'hyphen': _format_hyphen,
'underscore': _format_underscore,
}
def format_names(pair: Tuple[str, str], style: str = 'underscore') -> str:
"""Format a pair of names in one of several styles."""
try:
return _formatting_methods[style](pair)
except KeyError as error:
raise NotImplementedError(f'No style \'{style}\'') from error
def generate_name(style: str = 'underscore', seed: int = None) -> str:
"""Generate a random name."""
if seed is not None:
random.seed(seed)
return format_names(random_names(), style=style)
# Command-line interface implementation
PROGRAM = 'generate_name'
USAGE = f"""\
usage: {PROGRAM} [-h] [-v] [--style NAME]
Generate random name pairing.\
"""
EPILOG = f"""\
Documentation and issue tracking at:
https://github.com/glentner/names_generator\
"""
HELP = f"""\
{USAGE}
options:
-s, --style NAME Formatting (default: underscore).
-h, --help Show this message and exit.
-v, --version Show the version and exit.
{EPILOG}\
"""
class NamesGeneratorApp(Application):
"""Top-level application class for `generate_name` console application."""
interface = Interface(PROGRAM, USAGE, HELP)
interface.add_argument('-v', '--version', action='version', version=__version__)
style: str = 'underscore'
interface.add_argument('-s', '--style', default=style, choices=list(_formatting_methods))
# run even without arguments (do not print usage)
ALLOW_NOARGS = True
def run(self) -> None:
"""Generate a random name and print it."""
print(generate_name(style=self.style))
def main() -> int:
"""Entry-point for `generate_name` console application."""
logging.basicConfig(format='%(msg)s')
return NamesGeneratorApp.main(sys.argv[1:])
| 28.708661 | 93 | 0.694185 | 491 | 3,646 | 5.01833 | 0.356415 | 0.031656 | 0.035714 | 0.030438 | 0.08888 | 0.062094 | 0.062094 | 0.062094 | 0.062094 | 0.049919 | 0 | 0.004326 | 0.175809 | 3,646 | 126 | 94 | 28.936508 | 0.815641 | 0.302523 | 0 | 0.046154 | 0 | 0 | 0.232035 | 0.017664 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138462 | false | 0 | 0.123077 | 0.061538 | 0.446154 | 0.015385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c431d85cef6b81fa14e9c6e9f5d7057ee4c9176 | 849 | py | Python | cotd/plugins/motivationv2.py | 5h4d0w4rt/cotd-telegram-bot | 2185353047557aa0864d64d464597993d0b5eb02 | [
"MIT"
] | 1 | 2021-01-14T10:03:49.000Z | 2021-01-14T10:03:49.000Z | cotd/plugins/motivationv2.py | 5h4d0w4rt/cotd-telegram-bot | 2185353047557aa0864d64d464597993d0b5eb02 | [
"MIT"
] | 2 | 2020-09-13T00:47:54.000Z | 2021-09-25T16:14:35.000Z | cotd/plugins/motivationv2.py | 5h4d0w4rt/cotd-telegram-bot | 2185353047557aa0864d64d464597993d0b5eb02 | [
"MIT"
] | 1 | 2020-09-12T22:34:04.000Z | 2020-09-12T22:34:04.000Z | import io
import typing
import uuid
import ratelimit
import telegram
import telegram.ext
from cotd.plugins.helpers import make_image
from PIL import Image
ONE_SECOND = 1
def motivation_inline(
update: telegram.Update, context: telegram.ext.CallbackContext
) -> telegram.InlineQueryResultCachedPhoto:
db = context.dispatcher._cotd_db
query = update.inline_query.query
if query == "":
return
motivation_image = make_image(Image.open("static/motivator.jpg"), query, "top")
msg = context.bot.send_photo(
chat_id=db,
photo=motivation_image,
)
photo_id = msg.photo[0].file_id
context.bot.delete_message(chat_id=db, message_id=msg.message_id)
return telegram.InlineQueryResultCachedPhoto(
id=str(uuid.uuid4()),
title="CachedPhoto",
photo_file_id=photo_id,
)
| 24.257143 | 83 | 0.714959 | 107 | 849 | 5.485981 | 0.448598 | 0.0477 | 0.027257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004373 | 0.191991 | 849 | 34 | 84 | 24.970588 | 0.851312 | 0 | 0 | 0 | 0 | 0 | 0.040047 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.285714 | 0 | 0.392857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c46ebea0fe0e050587aad5c5ac6a903aa168e9c | 21,090 | py | Python | renderSDK/Rayvision.py | wangshunhui/renderSDK | b512c43fd0111c114b6abc6398c4609b758436e4 | [
"Apache-2.0"
] | 2 | 2020-02-12T09:57:46.000Z | 2020-04-03T07:40:07.000Z | renderSDK/Rayvision.py | wangshunhui/renderSDK | b512c43fd0111c114b6abc6398c4609b758436e4 | [
"Apache-2.0"
] | null | null | null | renderSDK/Rayvision.py | wangshunhui/renderSDK | b512c43fd0111c114b6abc6398c4609b758436e4 | [
"Apache-2.0"
] | 1 | 2020-04-16T11:19:58.000Z | 2020-04-16T11:19:58.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Main
"""
from .compat import *
import os
import sys
import logging
import codecs
import time
from .RayvisionUtil import get_os, hump2underline, cg_id_name_dict, decorator_use_in_class, format_time
from .RayvisionAPI import RayvisionAPI
from .RayvisionJob import RayvisionJob
from .RayvisionTransfer import RayvisionTransfer
from .RayvisionException import RayvisionError
from .RayvisionManageJob import RayvisionManageJob
from .analyse import RayvisionAnalyse
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
SDK_LOG = logging.getLogger('sdk_log')
class Rayvision(object):
def __init__(self, domain_name, platform, access_id, access_key, workspace=None, *args, **kwargs):
"""
:param str domain_name: domain name, such as: task.renderbus.com
:param str platform: platform number, such as: 2
:param str access_id: Authorization id to identify the API caller
:param str access_key: authorization key used to encrypt the signature string and the server-side verification signature string
:param str workspace: working directory, used to store configuration files and logs generated in the analysis, etc.
:param kwargs:
"""
domain_name = str(domain_name)
platform = str(platform)
access_id = str(access_id)
access_key = str(access_key)
if workspace is None:
workspace = os.path.join(CURRENT_DIR, 'workspace') # default workspace
else:
workspace = str(workspace)
# init log
self.G_SDK_LOG = SDK_LOG
sdk_log_filename = 'run_{0}.log'.format(format_time('%Y%m%d'))
sdk_log_path = os.path.join(workspace, 'log', 'sdk', sdk_log_filename)
self._init_log(self.G_SDK_LOG, sdk_log_path)
self.G_SDK_LOG.info('='*50)
self._user_info = {
'domain_name': domain_name,
'platform': platform,
'access_id': access_id,
'access_key': access_key,
'local_os': get_os(),
'workspace': workspace
}
self._api_obj = RayvisionAPI(domain_name, platform, access_id, access_key, log_obj=self.G_SDK_LOG)
self._login() # update self._user_info
self._manage_job_obj = RayvisionManageJob(self._api_obj)
self._transfer_obj = RayvisionTransfer(self._user_info, self._api_obj, self._manage_job_obj, log_obj=self.G_SDK_LOG)
@decorator_use_in_class(SDK_LOG)
def set_render_env(self, cg_name, cg_version, plugin_config={}, edit_name=None, label_name=None):
"""
Set the job rendering environment, label (optional)
:param str cg_name: Software name, such as 3ds Max, Maya, Houdini
:param str cg_version: software version
:param dict plugin_config: {"3dhippiesterocam":"2.0.13"}
:param str edit_name: The unique identifier name of the rendering environment, temporarily unused
:param str label_name: label name, is project name, optional
"""
cg_name = str(cg_name)
cg_version = str(cg_version)
if edit_name is not None:
edit_name = str(edit_name)
if label_name is not None:
label_name = str(label_name)
self.G_SDK_LOG.info('INPUT:')
self.G_SDK_LOG.info('='*20)
self.G_SDK_LOG.info('cg_name:{0}'.format(cg_name))
self.G_SDK_LOG.info('cg_version:{0}'.format(cg_version))
self.G_SDK_LOG.info('plugin_config:{0}'.format(plugin_config))
self.G_SDK_LOG.info('edit_name:{0}'.format(edit_name))
self.G_SDK_LOG.info('label_name:{0}'.format(label_name))
self.G_SDK_LOG.info('='*20)
# initialize the variables
self.is_analyse = False # Whether to call the analysis method
self.errors_number = 0 # number of errors in tips.json
self.error_warn_info_list = [] # error, warning message
# self.cg_name = str(cg_name) # Software name (3ds Max, Maya, Houdini)
cg_id = cg_id_name_dict.get(cg_name, None) # Software id
if cg_id is None:
raise RayvisionError(1000000, r'Please input correct cg_name!') # Please enter the correct cg_name
# Generate job ID
job_id = str(self._api_obj.create_task().get(r'taskIdList', [''])[0])
if job_id == '':
raise RayvisionError(1000000, r'Failed to create task number!') # task ID creating failed
self.G_SDK_LOG.info('JOB ID:{0}'.format(job_id))
# Instantiate the RayvisionJob object
self._job_info = RayvisionJob(self._user_info, job_id)
self._job_info._task_info['task_info']['cg_id'] = cg_id
# Set up label
self.set_label(label_name)
# Set the task rendering environment (that is, the software configuration of the task)
software_config_dict = {}
software_config_dict['cg_name'] = cg_name
software_config_dict['cg_version'] = cg_version
software_config_dict['plugins'] = plugin_config
self._job_info._task_info['software_config'] = software_config_dict
return job_id
@decorator_use_in_class(SDK_LOG)
def analyse(self, cg_file, project_dir=None, software_path=None):
"""
Analyse cg file.
:param str cg_file: scene file path
:param str project_dir: The project path of the scene. If set, all assets are searched from the project path when rendering.
:param str software_path: Local rendering software path, read from the registry by default, user-definable
:return:
"""
cg_file = str(cg_file)
if project_dir is not None:
project_dir = str(project_dir)
self.G_SDK_LOG.info('INPUT:')
self.G_SDK_LOG.info('='*20)
self.G_SDK_LOG.info('cg_file:{0}'.format(cg_file))
self.G_SDK_LOG.info('project_dir:{0}'.format(project_dir))
self.G_SDK_LOG.info('='*20)
self.is_analyse = True
# Pass self.job_info, directly modify job_info
self._job_info._task_info['task_info']['input_cg_file'] = cg_file.replace('\\', '/')
self._job_info._task_info['task_info']['scenefile'] = cg_file.replace('\\', '/')
self._job_info._task_info['task_info']['cgfile'] = cg_file.replace('\\', '/')
self._job_info._task_info['task_info']['original_cg_file'] = cg_file.replace('\\', '/')
if project_dir is not None:
self._job_info._task_info['task_info']['input_project_path'] = project_dir
RayvisionAnalyse.execute(cg_file, self._job_info, exe_path=software_path)
scene_info_data = self._job_info._task_info['scene_info']
# add frames to scene_info_render.<layer>.common.frames
if self._job_info._task_info['task_info']['cg_id'] == '2000': # Maya
for layer_name, layer_dict in scene_info_data.items():
start_frame = layer_dict['common']['start']
end_frame = layer_dict['common']['end']
by_frame = layer_dict['common']['by_frame']
frames = '{0}-{1}[{2}]'.format(start_frame, end_frame, by_frame)
scene_info_data[layer_name]['common']['frames'] = frames
self._job_info._task_info['scene_info_render'] = scene_info_data
return_scene_info_render = self._job_info._task_info['scene_info_render']
return_task_info = self._job_info._task_info['task_info']
return return_scene_info_render, return_task_info
@decorator_use_in_class(SDK_LOG)
def check_error_warn_info(self, language='0'):
"""
Get the analyzed error and warning information
:param str language: Return language 0: Chinese (default) 1: English
"""
if len(self._job_info._tips_info) > 0:
for code, value in self._job_info._tips_info.items():
code_info_list = self._api_obj.query_error_detail(code, language=language)
for code_info in code_info_list:
code_info['details'] = value
if str(code_info['type']) == '1': # 0:warning 1:error
self.errors_number += 1
self.error_warn_info_list.append(code_info)
self.G_SDK_LOG.info('error_warn_info_list:{0}'.format(self.error_warn_info_list))
return self.error_warn_info_list
@decorator_use_in_class(SDK_LOG)
def submit_job(self, scene_info_render=None, task_info=None, upload_info=None, max_speed=None):
"""
Submit job
(1) Determine if there are any errors or warnings
(2) Edit rendering parameters
(3) Upload configuration files and assets
(4) Submit the job ID
:param dict scene_info_render: rendering parameters
:param dict task_info: task parameters
:param dict upload_info: upload files infomations
:param int max_speed: Upload speed limit.The unit of 'max_speed' is KB/S, default value is 1048576 KB/S, means 1 GB/S
"""
self._is_scene_have_error() # check error
self._edit_param(scene_info_render, task_info, upload_info)
self._upload(max_speed)
self._submit_job()
@decorator_use_in_class(SDK_LOG)
def download(self, job_id_list, local_dir, max_speed=None, print_log=True):
"""
Download
:param list<int> job_id_list:Job ID
:param str local_dir: Download the stored directory
:param int max_speed: Download speed limit.The unit of 'max_speed' is KB/S, default value is 1048576 KB/S, means 1 GB/S
:param bool print_log: Whether to display the download command line. True: display; False: not display
"""
self.G_SDK_LOG.info('INPUT:')
self.G_SDK_LOG.info('='*20)
self.G_SDK_LOG.info('job_id_list:{0}'.format(job_id_list))
self.G_SDK_LOG.info('local_dir:{0}'.format(local_dir))
self.G_SDK_LOG.info('='*20)
self._transfer_obj._download(job_id_list, local_dir, max_speed, print_log)
return True
@decorator_use_in_class(SDK_LOG)
def auto_download(self, job_id_list, local_dir, max_speed=None, print_log=False, sleep_time=10):
"""
Auto download as long as any frame is complete.
:param list<int> job_id_list:Job ID
:param str local_dir: Download the stored directory
:param int max_speed: Download speed limit.The unit of 'max_speed' is KB/S, default value is 1048576 KB/S, means 1 GB/S
:param bool print_log: Whether to display the download command line. True: display; False: not display
:param int/float sleep_time: Sleep time between download, unit is second
"""
self.G_SDK_LOG.info('INPUT:')
self.G_SDK_LOG.info('='*20)
self.G_SDK_LOG.info('job_id_list:{0}'.format(job_id_list))
self.G_SDK_LOG.info('local_dir:{0}'.format(local_dir))
self.G_SDK_LOG.info('='*20)
while True:
if len(job_id_list) > 0:
time.sleep(float(sleep_time))
for job_id in job_id_list:
is_job_end = self._manage_job_obj.is_job_end(job_id)
self._transfer_obj._download([job_id], local_dir, max_speed, print_log)
if is_job_end is True:
self.G_SDK_LOG.info('The job end: {0}'.format(job_id))
job_id_list.remove(job_id)
else:
break
return True
@decorator_use_in_class(SDK_LOG)
def auto_download_after_job_completed(self, job_id_list, local_dir, max_speed=None, print_log=True, sleep_time=10):
"""
Auto download after the job render completed.
:param list<int> job_id_list:Job ID
:param str local_dir: Download the stored directory
:param int max_speed: Download speed limit.The unit of 'max_speed' is KB/S, default value is 1048576 KB/S, means 1 GB/S
:param bool print_log: Whether to display the download command line. True: display; False: not display
:param int/float sleep_time: Sleep time between download, unit is second
"""
self.G_SDK_LOG.info('INPUT:')
self.G_SDK_LOG.info('='*20)
self.G_SDK_LOG.info('job_id_list:{0}'.format(job_id_list))
self.G_SDK_LOG.info('local_dir:{0}'.format(local_dir))
self.G_SDK_LOG.info('='*20)
while True:
if len(job_id_list) > 0:
time.sleep(float(sleep_time))
for job_id in job_id_list:
is_job_end = self._manage_job_obj.is_job_end(job_id)
if is_job_end is True:
self.G_SDK_LOG.info('The job end: {0}'.format(job_id))
self._transfer_obj._download([job_id], local_dir, max_speed, print_log)
job_id_list.remove(job_id)
else:
break
return True
def _init_log(self, log_obj, log_path, is_print_log=True):
log_dir = os.path.dirname(log_path)
# If the log_dir path is a file, add timestamp after the log folder name.
if os.path.exists(log_dir):
if not os.path.isdir(log_dir):
log_dir = '{0}{1}'.format(log_dir, format_time())
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# If the log_path path is a folder, add timestamp after the log file name.
if os.path.isdir(log_path):
log_dir = '{0}{1}'.format(log_path, format_time())
log_obj.setLevel(logging.DEBUG)
# FileHandler
file_handler = logging.FileHandler(log_path, encoding='utf-8')
fm=logging.Formatter("%(asctime)s %(levelname)s - %(message)s","%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(fm)
log_obj.addHandler(file_handler)
# StreamHandler
if is_print_log:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
log_obj.addHandler(stream_handler)
def _login(self):
"""
Query user information and update to self._user_info
1. Get user details (query_user_profile)
2. Get user settings (query_user_setting)
3. Get user transfer BID (get_transfer_bid)
:return: True
"""
self.G_SDK_LOG.info('[Rayvision.login.start.....]')
data1 = self._api_obj.query_user_profile()
data2 = self._api_obj.query_user_setting()
data3 = self._api_obj.get_transfer_bid()
data1.update(data2)
data1.update(data3)
# Update the above interface results to self._user_info and convert all the keys into underscores.
for key, value in data1.items():
if isinstance(value, (int, long, float)):
value = str(value)
key_underline = hump2underline(key) # Variable name: hump to underline
self._user_info[key_underline] = value
self.G_SDK_LOG.info('USER INFO:{0}'.format(self._user_info))
self.G_SDK_LOG.info('[Rayvision.login.end.....]')
return True
def set_label(self, label_name):
"""
Customize the label to the job, find the task by label
:param str label_name: label name
"""
if label_name is not None:
is_label_exist = False
label_id = ''
for _ in range(3): # try by three time
label_dict_list = self._api_obj.get_label_list().get('projectNameList', []) # Get the list of existing users
for label_dict in label_dict_list:
if label_dict['projectName'] == label_name:
is_label_exist = True
label_id = str(label_dict['projectId'])
break
if is_label_exist:
if label_id == '':
continue
break
else: # Add a label if the no label exists
self._api_obj.add_label(label_name, '0')
is_label_exist = True
self._job_info._task_info['task_info']['project_name'] = label_name
self._job_info._task_info['task_info']['project_id'] = str(label_id)
def _edit_param(self, scene_info_render=None, task_info=None, upload_info=None):
"""
Modify rendering parameters, task parameters
:param dict scene_info_render: rendering parameters
:param dict task_info: task parameters
:param dict upload_info: upload path informations
:return: True
"""
self.G_SDK_LOG.info('INPUT:')
self.G_SDK_LOG.info('='*20)
self.G_SDK_LOG.info('scene_info_render:{0}'.format(scene_info_render))
self.G_SDK_LOG.info('task_info:{0}'.format(task_info))
self.G_SDK_LOG.info('='*20)
if scene_info_render is not None:
self._job_info._task_info['scene_info_render'] = scene_info_render
if not self.is_analyse:
self._job_info._task_info['scene_info'] = scene_info_render
if task_info is not None:
modifiable_param = [
'input_cg_file', # The scene file path
'frames_per_task', # Quantity of frames that rendered on one machine
'test_frames', # The frames of test render
'job_stop_time', # Small task stopped due to timingout,unite is second。default is 8 hours
'task_stop_time', # Big task stopped due to timingout,unite is second。default is 24 hours
'time_out', # time-out period,turn into yellow color。unite is second。default is 12 hours
'stop_after_test', # Whether to pause the task after the priority rendering is completed, 1: Pause the task after the priority rendering is completed 2. Do not pause the task after the priority rendering is completed
'tiles_type', # "block(block-based),strip(strip-based)"
'tiles', # If the number of blocks is greater than 1, or stripe is equal to 1 , then it is a single machine.
'is_layer_rendering', # If maya has turned on the layers。"0":Turn off "1":Turn on
'is_distribute_render', # Whether to turn on the distributed rendering。"0":Turn off"1":Turn on
'distribute_render_node', # The quantities of distributed rendering machine
'input_project_path', # Project path
'render_layer_type', # Render layer mode selection。"0":renderlayer mode;"1":rendersetup mode
'os_name', # rendering os type。"0": Linux; "1": Windows
'ram' # rendering machine RAM。"64": 64G;"128": 128G
] # Modifiable parameters list
for key, value in task_info.items():
if key in modifiable_param:
if isinstance(value, (int, long, float)):
value = str(value)
self._job_info._task_info['task_info'][key] = value
# write upload.json
if upload_info is not None:
self._job_info._upload_info = upload_info
with codecs.open(self._job_info._upload_json_path, 'w', 'utf-8') as f_upload_json:
json.dump(upload_info, f_upload_json, indent=4, ensure_ascii=False)
# write task.json
with codecs.open(self._job_info._task_json_path, 'w', 'utf-8') as f_task_json:
json.dump(self._job_info._task_info, f_task_json, indent=4, ensure_ascii=False)
# write tips.json
if not os.path.exists(self._job_info._tips_json_path):
with codecs.open(self._job_info._tips_json_path, 'w', 'utf-8') as f_tips_json:
json.dump(self._job_info._tips_info, f_tips_json, indent=4, ensure_ascii=False)
return True
def _upload(self, max_speed=None):
cfg_list = []
root = self._job_info._work_dir
for file_name in os.listdir(self._job_info._work_dir):
if file_name.endswith('.7z'):
continue
file_path = os.path.join(root, file_name)
cfg_list.append(file_path)
self._transfer_obj._upload(self._job_info._job_id, cfg_list, self._job_info._upload_info, max_speed) # upload assets and config files
return True
def _submit_job(self):
self._api_obj.submit_task(int(self._job_info._job_id))
return True
def _is_scene_have_error(self):
if self.errors_number > 0:
return_message = r'There are {0} errors. error_warn_info_list:{1}'.format(self.errors_number, self.error_warn_info_list)
raise RayvisionError(1000000, return_message) # Analysis completed with errors
| 46.048035 | 233 | 0.625273 | 2,889 | 21,090 | 4.264105 | 0.131187 | 0.028736 | 0.029223 | 0.040182 | 0.419677 | 0.365695 | 0.322997 | 0.272181 | 0.248234 | 0.224694 | 0 | 0.012656 | 0.276908 | 21,090 | 457 | 234 | 46.148797 | 0.795148 | 0.258464 | 0 | 0.265734 | 0 | 0 | 0.089122 | 0.009694 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052448 | false | 0 | 0.045455 | 0 | 0.136364 | 0.027972 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c4810e4fa88798c039566c38fcfb8acd43be82c | 13,764 | py | Python | scqubits/io_utils/fileio_backends.py | dmtvanzanten/scqubits | d4d8a0f71ac91077594a6173348279aa490ed048 | [
"BSD-3-Clause"
] | null | null | null | scqubits/io_utils/fileio_backends.py | dmtvanzanten/scqubits | d4d8a0f71ac91077594a6173348279aa490ed048 | [
"BSD-3-Clause"
] | null | null | null | scqubits/io_utils/fileio_backends.py | dmtvanzanten/scqubits | d4d8a0f71ac91077594a6173348279aa490ed048 | [
"BSD-3-Clause"
] | null | null | null | # fileio_backends.py
#
# This file is part of scqubits.
#
# Copyright (c) 2019 and later, Jens Koch and Peter Groszkowski
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
"""
Helper routines for writing data to h5 files.
"""
import ast
import csv
import os
import re
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Tuple, Union
import numpy as np
from numpy import ndarray
import scqubits.io_utils.fileio as io
import scqubits.utils.misc as utils
try:
import h5py
from h5py import AttributeManager, File, Group
except ImportError:
_HAS_H5PY = False
else:
_HAS_H5PY = True
# from scqubits.core.discretization import Grid1d
# from scqubits.io_utils.fileio import IOData
class IOWriter(ABC):
"""
ABC for writing class instance data to file.
Parameters
----------
filename: str
file_handle: h5.Group, optional
"""
def __init__(self, filename: str, file_handle: Group = None) -> None:
self.filename = filename
self.io_data: io.IOData
self.file_handle = file_handle
@abstractmethod
def to_file(self, io_data: io.IOData, **kwargs):
pass
@abstractmethod
def write_attributes(self, *args, **kwargs):
pass
@abstractmethod
def write_ndarrays(self, *args, **kwargs):
pass
@abstractmethod
def write_objects(self, *args, **kwargs):
pass
class H5Writer(IOWriter):
"""Writes IOData to a custom-format h5 file"""
def write_attributes(self, h5file_group: Union[Group, File]) -> None: # type: ignore
"""
Attribute data consists of
1. `__init__` parameters that are of type str or numerical. These are
directly written into `h5py.Group.attrs` 2. lists are stored under
`<h5py.Group>/__lists` 3. dicts are stored under `<h5py.Group>/__dicts`
"""
h5file_group.attrs.create(
"__type", self.io_data.typename
) # Record the type of the current class instance
attributes = self.io_data.attributes
for attr_name, attr_value in attributes.items():
if isinstance(
attr_value, dict
): # h5py does not serialize dicts automatically, so have to do it manually
group_name = "__dicts/" + attr_name
h5file_group.create_group(group_name)
io.write(
attr_value, self.filename, file_handle=h5file_group[group_name]
)
elif isinstance(attr_value, (list, tuple)):
group_name = "__lists/" + attr_name
h5file_group.create_group(group_name)
io.write(
attr_value, self.filename, file_handle=h5file_group[group_name]
)
else:
h5file_group.attrs[attr_name] = attr_value
def write_ndarrays(self, h5file_group: Union[Group, File]) -> None: # type: ignore
"""
Writes ndarray (float or complex) data contained in `self.iodata` to the
provided `h5py.Group` as a `h5py.Dataset`, using gzip compression.
"""
data_group = h5file_group.file.require_group("__data")
for name, array in self.io_data.ndarrays.items():
array_id = hash(array.tobytes())
h5file_group.create_dataset(name, data=[array_id], dtype="int64")
if str(array_id) not in data_group:
data_group.create_dataset(
str(array_id), data=array, dtype=array.dtype, compression="gzip"
)
def write_objects(self, h5file_group: Union[Group, File]) -> None: # type: ignore
"""
Writes data representing a Python object other than ndarray, list and dict,
contained in `self.iodata` to the provided `h5py.Group` und
`<h5py.Group>/__objects`.
"""
h5file_group = h5file_group.create_group("__objects")
for obj_name in self.io_data.objects.keys():
new_h5group = h5file_group.create_group(obj_name)
io.write(
self.io_data.objects[obj_name], self.filename, file_handle=new_h5group
)
@utils.Required(h5py=_HAS_H5PY)
def to_file(self, io_data: io.IOData, file_handle: Group = None) -> None:
"""
Takes the serialized IOData and writes it either to a new h5 file with file
name given by `self.filename` to to the given h5py.Group of an open h5 file.
"""
self.io_data = io_data
if file_handle is None:
h5file_group = h5py.File(self.filename, "w", rdcc_nbytes=1024 ** 2 * 200)
_ = h5file_group.create_group("__data")
close_when_done = True
else:
h5file_group = file_handle
close_when_done = False
self.write_attributes(h5file_group)
self.write_ndarrays(h5file_group)
self.write_objects(h5file_group)
if close_when_done:
h5file_group.close()
class H5Reader:
"""
Enables reading h5 files generated with scqubits.
"""
def __init__(self, filename: str, file_handle: Group = None) -> None:
self.filename = filename
self.io_data = None
self.file_handle = file_handle
@staticmethod
def h5_attrs_to_dict(
h5_attrs: AttributeManager,
) -> Dict[str, Union[float, str, int]]:
"""
Converts h5 attribute data to a Python dictionary.
Parameters
----------
h5_attrs: h5py.AttributeManager
as obtained by accessing `<h5py.Group>.attrs`
"""
return {attr_name: attr_value for attr_name, attr_value in h5_attrs.items()}
def read_attributes(self, h5file_group: Union[Group, File]) -> Dict[str, Any]:
"""
Read data from h5 file group that is stored directly as `<h5py.Group>.attrs`,
or saved in subgroups titled `<h5py.Group>/__lists` and `<h5py.Group>/__dicts`.
"""
attributes = self.h5_attrs_to_dict(h5file_group.attrs)
if "__dicts" in h5file_group:
for dict_name in h5file_group["__dicts"]:
attributes[dict_name] = io.read(
self.filename, h5file_group["__dicts/" + dict_name]
)
if "__lists" in h5file_group:
for list_name in h5file_group["__lists"]:
attributes[list_name] = io.read(
self.filename, h5file_group["__lists/" + list_name]
)
return attributes
def read_ndarrays(self, h5file_group: Union[Group, File]) -> Dict[str, ndarray]:
"""
Read numpy array data from h5 file group.
"""
ndarrays = {}
if "__data" in h5file_group.file:
datagroup = h5file_group.file.require_group("__data")
for name, id_dataset in h5file_group.items():
if isinstance(id_dataset, h5py.Dataset):
id_int = id_dataset[:][0]
data = datagroup[str(id_int)][:]
ndarrays[name] = data
return ndarrays
# legacy support
ndarrays = {
name: array[:]
for name, array in h5file_group.items()
if isinstance(array, h5py.Dataset)
}
return ndarrays
def read_objects(self, h5file_group: Union[Group, File]) -> Dict[str, io.IOData]:
"""
Read data from the given h5 file group that represents a Python object other
than an ndarray, list, or dict.
"""
inner_objects = {}
h5file_group = h5file_group["__objects"]
for obj_name in h5file_group:
inner_objects[obj_name] = io.read(self.filename, h5file_group[obj_name])
return inner_objects
@utils.Required(h5py=_HAS_H5PY)
def from_file(self, filename: str, file_handle: Group = None) -> io.IOData:
"""
Either opens a new h5 file for reading or accesses an already opened file via
the given h5.Group handle. Reads all data from the three categories of
attributes (incl. lists and dicts), ndarrays, and objects.
"""
if file_handle is None:
h5file_group = h5py.File(filename, "r", rdcc_nbytes=1024 ** 2 * 200)
else:
h5file_group = file_handle
attributes = self.read_attributes(h5file_group)
typename = attributes["__type"]
assert isinstance(typename, str)
del attributes["__type"]
ndarrays = self.read_ndarrays(h5file_group)
inner_objects = self.read_objects(h5file_group)
return io.IOData(typename, attributes, ndarrays, inner_objects)
class CSVWriter(IOWriter):
"""
Given filename='somename.csv', write initdata into somename.csv Then, additional
csv files are written for each dataset, with filenames: 'somename_' + dataname0 +
'.csv' etc.
"""
def append_ndarray_info(self, attributes):
"""Add data set information to attributes, so that dataset names and
dimensions are available in attributes CSV file."""
for index, dataname in enumerate(self.io_data.ndarrays.keys()):
data = self.io_data.ndarrays[dataname]
attributes["dataset" + str(index)] = dataname
if data.ndim == 3:
slice_count = len(data)
else:
slice_count = 1
attributes["dataset" + str(index) + ".slices"] = slice_count
return attributes
def write_attributes(self, filename: str): # type: ignore
attributes = self.io_data.attributes
attributes["__type"] = self.io_data.typename
attributes = self.append_ndarray_info(attributes)
with open(filename, mode="w", newline="") as meta_file:
file_writer = csv.writer(meta_file, delimiter=",")
file_writer.writerow(attributes.keys())
file_writer.writerow(attributes.values())
def write_ndarrays(self, filename: str): # type: ignore
filename_stub, _ = os.path.splitext(filename)
for dataname, dataset in self.io_data.ndarrays.items():
filename = filename_stub + "_" + dataname + ".csv"
self.write_data(filename, dataset)
def write_data(self, filename: str, dataset: ndarray): # type: ignore
if dataset.ndim <= 2:
np.savetxt(filename, dataset)
elif dataset.ndim == 3:
np_savetxt_3d(dataset, filename)
else:
raise Exception("Dataset has dimensions > 3. Cannot write to CSV file.")
def write_objects(self, *args, **kwargs): # type: ignore
raise NotImplementedError
def to_file(self, io_data: io.IOData, **kwargs):
self.io_data = io_data
self.write_attributes(self.filename)
self.write_ndarrays(self.filename)
# no support for write_objects in CSV format
class CSVReader:
@staticmethod
def read_attributes(filename):
with open(filename, mode="r") as meta_file:
file_reader = csv.reader(meta_file, delimiter=",")
meta_keys = file_reader.__next__()
meta_values = file_reader.__next__()
return dict(zip(meta_keys, meta_values))
def process_metadict(self, meta_dict: Dict) -> Tuple[Dict, List[str], ndarray]:
attributes = {
attr_name: utils.to_expression_or_string(attr_value)
for attr_name, attr_value in meta_dict.items()
if not re.match(r"dataset\d+", attr_name)
}
data_names = [
dataname
for datalabel, dataname in meta_dict.items()
if re.match(r"dataset\d+$", datalabel)
]
data_slices = [
ast.literal_eval(value)
for key, value in meta_dict.items()
if re.match(r"dataset\d+.slices", key)
]
return attributes, data_names, data_slices
@staticmethod
def read_data(filename, slices):
try:
data_array = np.loadtxt(filename)
except ValueError:
data_array = np.loadtxt(filename, dtype=np.complex_)
if slices > 1:
nrows, ncols = data_array.shape
return data_array.reshape((slices, nrows // slices, ncols))
return data_array
def from_file(self, filename: str, **kwargs) -> io.IOData:
"""
Returns
-------
class instance generated from file data
"""
ext_attributes = self.read_attributes(filename)
typename = ext_attributes["__type"]
del ext_attributes["__type"]
attributes, data_names, data_slices = self.process_metadict(ext_attributes)
filename_stub, _ = os.path.splitext(filename)
ndarrays = {}
for index, dataname in enumerate(data_names):
data_filename = filename_stub + "_" + dataname + ".csv"
slices = data_slices[index]
ndarrays[dataname] = self.read_data(data_filename, slices)
return io.IOData(typename, attributes, ndarrays, objects=None)
def np_savetxt_3d(array3d: ndarray, filename: str):
"""
Helper function that splits a 3d numpy array into 2d slices for writing as csv
data to a new file. Slices are separated by a comment row `# New slice`.
Parameters
----------
array3d:
ndarray with ndim = 3
"""
with open(filename, mode="w", newline="") as datafile:
datafile.write("# Array shape: {0}\n".format(array3d.shape))
for data_slice in array3d:
np.savetxt(datafile, data_slice)
datafile.write("# New slice\n")
| 35.84375 | 89 | 0.613266 | 1,653 | 13,764 | 4.900181 | 0.174834 | 0.059753 | 0.020988 | 0.008889 | 0.328395 | 0.229136 | 0.169506 | 0.12963 | 0.095926 | 0.062716 | 0 | 0.013732 | 0.285745 | 13,764 | 383 | 90 | 35.937337 | 0.810192 | 0.204083 | 0 | 0.228814 | 0 | 0 | 0.029821 | 0 | 0 | 0 | 0 | 0 | 0.004237 | 1 | 0.110169 | false | 0.016949 | 0.055085 | 0 | 0.237288 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c4e1b3188875d9cc7ead2bb542721689bd51cbf | 2,107 | py | Python | util/trig.py | alemigliardi/talkbot | 371f2c9e7240a4438cbb202865cf18dbefb0c352 | [
"MIT"
] | 2 | 2021-08-07T12:01:26.000Z | 2022-01-31T13:48:25.000Z | util/trig.py | alemigliardi/talkbot | 371f2c9e7240a4438cbb202865cf18dbefb0c352 | [
"MIT"
] | null | null | null | util/trig.py | alemigliardi/talkbot | 371f2c9e7240a4438cbb202865cf18dbefb0c352 | [
"MIT"
] | 1 | 2021-08-07T12:01:28.000Z | 2021-08-07T12:01:28.000Z | import os
import re
import asyncio
from pyrogram.types import Message
from util.message import send_media
class Trigger:
def __init__(self, regex:str,
response:str = "",
from_self:bool = False,
from_others:bool = True,
mention:bool = True,
media_path:str = "",
auto_vanish:int = -1,
):
self.regex = re.compile(regex)
self.response = response
self.from_self = from_self
self.from_others = from_others
self.mention = mention
self.path = media_path
self.vanish = auto_vanish
@staticmethod
def unserialize(obj):
return Trigger(obj["regex"],
response=obj["response"],
from_self=obj["from_self"],
from_others=obj["from_others"],
mention=obj["mention"],
media_path=obj["path"],
auto_vanish=obj["vanish"])
def check(self, message:Message) -> bool:
if message.from_user.is_self:
if not self.from_self:
return False
elif not self.from_others:
return False
if self.mention and message.chat.type != "private" and not message.mentioned:
return False
if self.regex.search(message.text):
return True
return False
async def fire(self, client, message:Message):
if self.path:
msg = await send_media(client, message.chat.id, self.path,
reply_to_message_id=message.message_id, caption=self.response)
else:
msg = await message.reply(self.response)
if self.vanish >= 0:
await asyncio.sleep(self.auto_vanish)
await msg.delete()
def serialize(self) -> dict:
return {
"regex": self.regex.pattern,
"response": self.response,
"from_self": self.from_self,
"from_others": self.from_others,
"mention": self.mention,
"path": self.path,
"vanish": self.vanish
}
class TriggerList:
def __init__(self):
if os.path.isfile("data/triggers.json"):
with open("data/triggers.json") as f:
self.data = json.load(f)
else:
self.data = []
with open("data/triggers.json", "w") as f:
json.dump(self.data, f)
def serialize(self):
with open("data/triggers.json", "w") as f:
json.dump(self.data, f)
def __iter__(self):
return self.data.__iter__()
# TRIGGERS = TriggerList()
| 24.218391 | 79 | 0.688657 | 301 | 2,107 | 4.664452 | 0.252492 | 0.051282 | 0.049858 | 0.042735 | 0.08547 | 0.068376 | 0.068376 | 0.068376 | 0.068376 | 0.068376 | 0 | 0.001156 | 0.178927 | 2,107 | 86 | 80 | 24.5 | 0.810405 | 0.011391 | 0 | 0.135135 | 0 | 0 | 0.086977 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094595 | false | 0 | 0.067568 | 0.040541 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c4fccadd03cd528d853e80ad9d2e69c96250a99 | 3,185 | py | Python | back-app/research/form_vision/template_matching.py | AntoineAwaida/ad-covia | 1dd10a0361421149e6e318fee146ccf370a81074 | [
"MIT"
] | 1 | 2020-04-15T13:48:34.000Z | 2020-04-15T13:48:34.000Z | back-app/research/form_vision/template_matching.py | AntoineAwaida/ad-covia | 1dd10a0361421149e6e318fee146ccf370a81074 | [
"MIT"
] | 1 | 2022-02-13T09:58:43.000Z | 2022-02-13T09:58:43.000Z | back-app/research/form_vision/template_matching.py | AntoineAwaida/ad-covia | 1dd10a0361421149e6e318fee146ccf370a81074 | [
"MIT"
] | null | null | null | import cv2
from typing import Tuple, Generator
from research.form_vision.image import Image, NormalizedCoords
import numpy as np
class TemplateMatcher:
width = 800
def __init__(
self, template_image: Image, reference_region: Tuple[NormalizedCoords, NormalizedCoords]
):
self._template = template_image.resize(self.width)
self._reference_region = reference_region
self._reference = self._template.select_subimage(*reference_region)
self._ref_region_height = reference_region[1][0] - reference_region[0][0]
self._ref_region_width = reference_region[1][1] - reference_region[0][1]
def match(self, img: Image) -> Image:
img = img.crop_to_aspect_ratio(self._template.aspect_ratio)
img = img.resize(width=self.width)
region_to_search_coords = self._around_reference_region()
region_to_search = img.select_subimage(*region_to_search_coords)
best_top_left, most_similar = None, 0.0
for (scaled_rotated_img, scale_factor, angle) in self._get_scaled_rotated_versions(img):
top_left, similarity = self._match_template(scaled_rotated_img)
found_match = self._extract_match(region_to_search, top_left)
return found_match
if similarity > most_similar:
most_similar = similarity
best_top_left = top_left
top_left, sim = self._match_template(region_to_search)
found_match = self._extract_match(region_to_search, top_left)
print(sim)
return found_match
def _around_reference_region(
self, coeff: float = 0.5
) -> Tuple[NormalizedCoords, NormalizedCoords]:
start_vert = max(0, self._reference_region[0][0] - self._ref_region_height * coeff / 2)
start_horiz = max(0, self._reference_region[0][1] - self._ref_region_width * coeff / 2)
end_vert = min(1, self._reference_region[1][0] + self._ref_region_height * coeff / 2)
end_horiz = min(1, self._reference_region[1][1] + self._ref_region_width * coeff / 2)
return ((start_vert, start_horiz), (end_vert, end_horiz))
def _extract_match(self, searched_region: Image, top_left: NormalizedCoords) -> Image:
return searched_region[
top_left[0] : top_left[0] + int(self._ref_region_height * self._template.shape[0]),
top_left[1] : top_left[1] + int(self._ref_region_width * self._template.shape[1]),
]
def _get_scaled_rotated_versions(
self, image: Image
) -> Generator[Tuple[Image, float, float], None, None]:
for scaling_factor in np.arange(0.9, 1.1, 0.02):
for angle in np.arange(-3, 3, 0.3):
yield (
image.resize(int(scaling_factor * image.shape[1])).rotate(angle),
scaling_factor,
angle,
)
def _match_template(self, image_to_search: Image) -> Tuple[NormalizedCoords, float]:
res = cv2.matchTemplate(
image_to_search.image_data, self._reference.image_data, cv2.TM_CCOEFF
)
_, max_val, _, max_loc = cv2.minMaxLoc(res)
return max_loc[::-1], max_val
| 44.236111 | 96 | 0.663736 | 412 | 3,185 | 4.759709 | 0.218447 | 0.107088 | 0.053034 | 0.038756 | 0.164202 | 0.164202 | 0.123406 | 0.047935 | 0.047935 | 0.047935 | 0 | 0.022231 | 0.237363 | 3,185 | 71 | 97 | 44.859155 | 0.785097 | 0 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.066667 | 0.016667 | 0.283333 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c5166b8cabcb5a4f633be4f4e1a16e185d3520c | 763 | py | Python | src/run.py | ShaghayeghAmeri/test | 968eea52ce0ea309f020add61ab597ca402feb14 | [
"MIT"
] | null | null | null | src/run.py | ShaghayeghAmeri/test | 968eea52ce0ea309f020add61ab597ca402feb14 | [
"MIT"
] | null | null | null | src/run.py | ShaghayeghAmeri/test | 968eea52ce0ea309f020add61ab597ca402feb14 | [
"MIT"
] | null | null | null | import os
import emoji
import telebot
from loguru import logger
from src.utils.constant import keyboards
from src.utils.io import write_json
class But:
"""
telegram bot to randomly connect two strangers
"""
def __init__(self):
self.bot = telebot.TeleBot(os.environ['BOT_TOKEN'])
self.echo = self.bot.message_handler(func=lambda m: True)(self.echo_all)
def run(self):
logger.info('bot is running...')
self.bot.infinity_polling()
def echo_all(self, message):
print(emoji.demojize(message.text))
self.bot.send_message(message.chat.id, message.text, reply_markup=keyboards.main)
write_json(message.json, 'message.json')
if __name__=='__main__':
bot = But()
bot.run()
| 23.84375 | 89 | 0.673657 | 104 | 763 | 4.740385 | 0.509615 | 0.056795 | 0.048682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.209699 | 763 | 31 | 90 | 24.612903 | 0.817579 | 0.060288 | 0 | 0 | 0 | 0 | 0.065714 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.3 | 0 | 0.5 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c52b06ffe9aba58e5600ed9c53a392211b57c2c | 713 | py | Python | PythonFiles/MCQGame.py | IamVaibhavsar/Python_Files | 283d73929a3e11955c71499407c4f8bff56e4273 | [
"MIT"
] | null | null | null | PythonFiles/MCQGame.py | IamVaibhavsar/Python_Files | 283d73929a3e11955c71499407c4f8bff56e4273 | [
"MIT"
] | null | null | null | PythonFiles/MCQGame.py | IamVaibhavsar/Python_Files | 283d73929a3e11955c71499407c4f8bff56e4273 | [
"MIT"
] | 1 | 2019-07-26T15:25:21.000Z | 2019-07-26T15:25:21.000Z | from MCQGame2 import Question
MCQ=[
"What color of apples? \na)red\nb)blue\nc)green\nd)yellow",
"Half adder has how many inputs?\na)1\nb)2\nc)3\nd)4",
"which operator can be overloaded?\na).\nb).*\nc)::\nd)+"
]
questions = [
Question(MCQ[0],"a"), #Objects of class Question
Question(MCQ[1],"b"),
Question(MCQ[2],"d"),
]
def run_test(questions): #looping through objects
score=0
for question in questions:
answer=input(question.prompt) #invoke elements of array MCQ
if answer==question.answer:
score=score+1
print("You got " + str(score) + " out of " + str(len(questions)) +" correct")
run_test(questions) | 31 | 82 | 0.601683 | 100 | 713 | 4.27 | 0.61 | 0.103045 | 0.074941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018587 | 0.245442 | 713 | 23 | 83 | 31 | 0.775093 | 0.106592 | 0 | 0 | 0 | 0.105263 | 0.30832 | 0.153344 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.105263 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c52e41b928f577557908dcb035f2bea35861221 | 5,286 | py | Python | gada/main.py | gadalang/gada | 2dd4f4dfd5b7390c06307040cad23203a015f7a4 | [
"MIT"
] | null | null | null | gada/main.py | gadalang/gada | 2dd4f4dfd5b7390c06307040cad23203a015f7a4 | [
"MIT"
] | null | null | null | gada/main.py | gadalang/gada | 2dd4f4dfd5b7390c06307040cad23203a015f7a4 | [
"MIT"
] | 1 | 2021-06-15T13:52:33.000Z | 2021-06-15T13:52:33.000Z | from __future__ import annotations
__all__ = ["run", "main"]
import os
import sys
import io
import argparse
from typing import Optional
from gada import component, runners, datadir
def split_unknown_args(argv: list[str]) -> tuple[list[str], list[str]]:
"""Separate known command-line arguments from unknown one.
Unknown arguments are separated from known arguments by
the special **--** argument.
:param argv: command-line arguments
:return: tuple (known_args, unknown_args)
"""
for i in range(len(argv)):
if argv[i] == "--":
return argv[:i], argv[i + 1 :]
return argv, []
def run(
node: str,
argv: Optional[list[str]] = None,
*,
stdin=None,
stdout=None,
stderr=None,
):
"""Run a gada node:
.. code-block:: python
>>> import gada
>>>
>>> # Overwrite "gada/test/testnodes/config.yml" for this test
>>> gada.test_utils.write_testnodes_config({
... 'nodes': {
... 'echo': {
... 'runner': 'generic',
... 'bin': 'echo'
... }
... }
... })
>>>
>>> # Need to create fake stdin and stdout for unittests
>>> with gada.test_utils.PipeStream() as stdin:
... with gada.test_utils.PipeStream() as stdout:
... # Run node with CLI arguments
... gada.run(
... 'testnodes.echo',
... ['hello'],
... stdin=stdin.reader,
... stdout=stdout.writer,
... stderr=stdout.writer
... )
...
... # Close writer end so we can read form it
... stdout.writer.close()
...
... # Read node output
... stdout.reader.read().decode().strip()
'hello'
>>>
The three parameters ``stdin``, ``stdout`` or ``stderr`` are provided as a convenience
for writing unit tests when you can't use ``sys.stdin`` or ``sys.stdout``, or simply
when you want to be able to read from the output.
:param node: node to run
:param argv: additional CLI arguments
:param stdin: input stream
:param stdout: output stream
:param stderr: error stream
"""
# Load gada configuration
gada_config = datadir.load_config()
# Check command format
node_argv = node.split(".")
if len(node_argv) != 2:
raise Exception(f"invalid command {node}")
# Load component module
comp = component.load(node_argv[0])
# Load node configuration
node_config = component.get_node_config(component.load_config(comp), node_argv[1])
# Load correct runner
runner = runners.load(node_config.get("runner", None))
# Run component
runner.run(
comp=comp,
gada_config=gada_config,
node_config=node_config,
argv=argv,
stdin=stdin,
stdout=stdout,
stderr=stderr,
)
def main(
argv: Optional[list[str]] = None,
*,
stdin=None,
stdout=None,
stderr=None,
):
"""Gada main:
.. code-block:: python
>>> import gada
>>>
>>> # Overwrite "gada/test/testnodes/config.yml" for this test
>>> gada.test_utils.write_testnodes_config({
... 'nodes': {
... 'echo': {
... 'runner': 'generic',
... 'bin': 'echo'
... }
... }
... })
>>>
>>> # Need to create fake stdin and stdout for unittests
>>> with gada.test_utils.PipeStream() as stdin:
... with gada.test_utils.PipeStream() as stdout:
... # Run node with CLI arguments
... gada.main(
... ['gada', 'testnodes.echo', 'hello'],
... stdin=stdin.reader,
... stdout=stdout.writer,
... stderr=stdout.writer
... )
...
... # Close writer end so we can read form it
... stdout.writer.close()
...
... # Read node output
... stdout.reader.read().decode().strip()
'hello'
>>>
The three parameters ``stdin``, ``stdout`` or ``stderr`` are provided as a convenience
for writing unit tests when you can't use ``sys.stdin`` or ``sys.stdout``, or simply
when you want to be able to read from the output.
:param argv: command line arguments
:param stdin: input stream
:param stdout: output stream
:param stderr: error stream
"""
argv = sys.argv if argv is None else argv
parser = argparse.ArgumentParser(prog="Service", description="Help")
parser.add_argument("node", type=str, help="command name")
parser.add_argument(
"argv", type=str, nargs=argparse.REMAINDER, help="additional CLI arguments"
)
parser.add_argument("-v", "--verbose", action="store_true", help="Verbosity level")
args = parser.parse_args(args=argv[1:])
node_argv, gada_argv = split_unknown_args(args.argv)
run(node=args.node, argv=node_argv, stdin=stdin, stdout=stdout, stderr=stderr)
if __name__ == "__main__":
main(sys.argv)
| 30.034091 | 90 | 0.541241 | 582 | 5,286 | 4.821306 | 0.252577 | 0.022808 | 0.027798 | 0.024234 | 0.560941 | 0.54526 | 0.54526 | 0.518175 | 0.518175 | 0.518175 | 0 | 0.001399 | 0.323685 | 5,286 | 175 | 91 | 30.205714 | 0.783497 | 0.588347 | 0 | 0.214286 | 0 | 0 | 0.075607 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053571 | false | 0 | 0.125 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c542dbe200f504a92d4395bd7676e95bb9c6b42 | 2,912 | py | Python | Example_Cases/Gypsum_Model/Scripts/external_gyp1d.py | koverholt/bayes-fire | 4333cdf7b93bf77d8e021f0c4a1931a77056534d | [
"BSD-3-Clause"
] | 6 | 2016-06-19T12:44:22.000Z | 2021-12-21T07:01:38.000Z | Example_Cases/Gypsum_Model/Scripts/external_gyp1d.py | koverholt/bayes-fire | 4333cdf7b93bf77d8e021f0c4a1931a77056534d | [
"BSD-3-Clause"
] | null | null | null | Example_Cases/Gypsum_Model/Scripts/external_gyp1d.py | koverholt/bayes-fire | 4333cdf7b93bf77d8e021f0c4a1931a77056534d | [
"BSD-3-Clause"
] | 2 | 2017-10-15T02:37:25.000Z | 2022-03-04T16:22:44.000Z | #!/usr/bin/env python
"""Module for gyp1d functions"""
import numpy as np
import platform
import subprocess
import os
import data_expt as de
# Detect operating system
op_sys = platform.system()
def gen_input( k1, k2, k3, k4, rho_0, c_p1, c_p2, c_p3, eps, Y1_0,
A1, A2, E1, E2, dh1, dh2 ):
"""Generate gyp1d input file from template.
Keyword arguments:
matl = [ k1, k2, k3, k4, rho_0, c_p1, c_p2, c_p3, eps, Y1_0,
A1, A2, E1, E2, dh1, dh2 ]
"""
template = """
&matl
k_temps = 273.15, 448.15, 1088.15, 1473.15
k_vals = %(k1)s, %(k2)s, %(k3)s, %(k4)s
rho_0 = %(rho_0)s
c_p = %(c_p1)s, %(c_p2)s, %(c_p3)s
eps = %(eps)s
Y1_0 = %(Y1_0)s
A = %(A1)s, %(A2)s
E = %(E1)s, %(E2)s
dh = %(dh1)s, %(dh2)s /
&scen
L = 0.0159
L_a = 0.092
t_end = 3601
H = 3.048 /
&numr
N = 30
N_t = 160000
N_sol = 100 /
"""
# ==================================================
# = Generate gyp1d input file =
# ==================================================
outcase = template % {'k1':str(k1),
'k2':str(k2),
'k3':str(k3),
'k4':str(k4),
'rho_0':str(rho_0),
'c_p1':str(c_p1),
'c_p2':str(c_p2),
'c_p3':str(c_p3),
'eps':str(eps),
'Y1_0':str(Y1_0),
'A1':str(A1),
'A2':str(A2),
'E1':str(E1),
'E2':str(E2),
'dh1':str(dh1),
'dh2':str(dh2)}
# =====================
# = Write gyp1d files =
# =====================
casename = 'case'
filename = '../' + casename + '.inp'
# Opens a new file, writes the gyp1d input file, and closes the file
f = open(filename, 'w')
f.write(outcase)
f.close()
return casename
def run_gyp1d(casename):
"""Run gyp1d on case file."""
os.chdir('../')
# Run appropriate executable depending on operating system
if op_sys == 'Linux':
p = subprocess.Popen(['./gyp1d_intel_linux_64', casename + '.inp'])
p.wait()
if op_sys == 'Darwin':
p = subprocess.Popen(['./gyp1d_gfortran_osx_64', casename + '.inp'])
p.wait()
os.chdir('./Scripts')
def read_gyp1d(casename):
"""Read in gyp1d output."""
temp_file = '../temp_nom.out'
temps = np.genfromtxt(temp_file)
#mlrs = np.genfromtxt(mlr_file, delimiter=',', skip_header=2)
time = temps[:,0]
T_1b = temps[:,1]
T_2b = temps[:,2]
# interpolate to experimental times
time_expt = de.time
T_1b_interp = np.interp(time_expt, time, T_1b)
os.remove('../temp_nom.out')
return T_1b_interp
| 26 | 76 | 0.457074 | 380 | 2,912 | 3.339474 | 0.363158 | 0.018913 | 0.014184 | 0.016548 | 0.092987 | 0.064618 | 0.064618 | 0.064618 | 0.064618 | 0.064618 | 0 | 0.082458 | 0.346154 | 2,912 | 111 | 77 | 26.234234 | 0.584034 | 0.246566 | 0 | 0.028571 | 0 | 0.014286 | 0.261538 | 0.020979 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0 | 0.071429 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c54677ea98da9a5515de3a923f5ef0038257527 | 6,356 | py | Python | datacollector/collector/maincollector.py | samharju/5GDrones-data-collector | 331d8c433f5b46eaba62a55c39bbe12d365b2474 | [
"Apache-2.0"
] | 2 | 2021-04-26T07:08:26.000Z | 2021-05-01T16:01:26.000Z | datacollector/collector/maincollector.py | samharju/5GDrones-data-collector | 331d8c433f5b46eaba62a55c39bbe12d365b2474 | [
"Apache-2.0"
] | null | null | null | datacollector/collector/maincollector.py | samharju/5GDrones-data-collector | 331d8c433f5b46eaba62a55c39bbe12d365b2474 | [
"Apache-2.0"
] | 1 | 2021-05-01T15:04:46.000Z | 2021-05-01T15:04:46.000Z | # © 2021 Nokia
#
# Licensed under the Apache license, version 2.0
# SPDX-License-Identifier: Apache-2.0
"""Class for handling collector threads."""
import logging
import time
from datetime import datetime
from threading import Event, Thread
from datacollector.collector.connection_config_parser import ConnectionConfig
from datacollector.collector.memcpunodecollector import MemCpuNodeCollector
from datacollector.collector.sshconnection import SshConnection
class UnhandledException(Exception):
"""Raised when unable to collect properly."""
class NoNodesException(Exception):
"""Raised when no nodes are discovered."""
class MainCollector(Thread):
"""Maincollector class."""
def __init__(self, agent):
"""Initialize main collector."""
super().__init__(daemon=True)
self.agent = agent
self.stop = False
self.name = "{addition}-{default}".format(addition=type(self).__name__, default=self.name)
self._config = None
self._reconnect_lock = False
self._stop_event = Event()
self._collect_interval = agent.collect_interval
self._start_time = agent.collect_start_time
self._collect_start_time = datetime.min
self._node_collectors = []
@property
def collect_interval(self):
"""Public access for collect_interval."""
return self._collect_interval
@property
def start_time(self):
"""Public access for start_time."""
return self._start_time
@property
def lock_status(self):
"""Public access for lock_status"""
return self._reconnect_lock
@property
def config(self):
"""Public access for config."""
return self._config
def lock_connections(self):
"""Set reconnect_lock to True."""
self._reconnect_lock = True
def unlock_connections(self):
"""Set reconnect_lock to False."""
self._reconnect_lock = False
def node_finished(self):
"""Interface for NodeCollector threads to report when they have finished.
Last thread that finishes informs adapter that data has been collected.
"""
if self._all_nodes_finished():
elapsed_time = datetime.utcnow() - self._collect_start_time
logging.info("Time elapsed during collection: %ss", elapsed_time)
def run(self):
"""Check when we can stop. See _run method."""
logging.info("%s started.", self.name)
while not self.stop:
self._run()
logging.info("%s finished.", self.name)
def signal_stop(self):
"""Call after StopCollector message."""
logging.debug("%s Received stop signal", self.name)
self.stop = True
self._stop_event.set()
def _read_config(self):
self._config.read_config()
def _all_nodes_finished(self):
return all(not collector.collecting for
collector in self._node_collectors)
def _run(self):
"""Start collector threads and triggers first collection immediately.
Then triggers collection between every collect interval.
"""
try:
self._main_logic()
except Exception as e:
logging.error(self.name + " unhandled exception" + str(e))
self.stop = True
def _main_logic(self):
"""Run main logic."""
try:
logging.info("Running MainCollector _main_logic...")
self._create_node_collectors()
self._start_node_collectors()
self.agent._reconnect_start_time = None
self.agent._reconnect = False
self._collect() # Trigger first collection immediately.
while not self._stop_event.wait(timeout=self._collect_interval) and self._node_collectors != []:
self._collect()
self._collect() # Collect last time after stop event has been set.
time.sleep(self._collect_interval)
self._stop_node_collectors()
except Exception as e:
logging.warning("Collector ran into an issue. Shutting down...")
self.stop = True
def _create_node_collectors(self):
logging.info("Creating NodeCollectors...")
"""Create NodeCollectors with implemented ConnectionConfig- and IConnection-objects."""
self._config = ConnectionConfig('device')
self._node_collectors.append(MemCpuNodeCollector(self, SshConnection(self._config.hostname, self._config.port,
self._config.username,
self._config.password)))
def _start_node_collectors(self):
logging.info("Starting NodeCollectors...")
if not self._node_collectors:
raise NoNodesException
for collector in self._node_collectors:
collector.start()
def _collect(self):
"""Distribute collect command to all NodeCollector threads.
If last collection is still ongoing, skips the new
incoming request.
There is some delay between starting collectors,
so that ssh connections get time to authenticate themselves.
"""
if not self._check_alive_collectors():
self.signal_stop()
if not self._all_nodes_finished():
logging.warning(
"New collect ordered before last one was finished, skipping.")
return
logging.info("Triggering new collection for all nodes.")
self._collect_start_time = datetime.utcnow()
for collector in self._node_collectors:
collector.collect()
def _stop_node_collectors(self):
"""Set stop flag for each NodeCollector thread and then waits for them to join."""
logging.info("%s stopping node collector threads.", self.name)
for collector in self._node_collectors:
collector.connection.close_session()
collector.stop()
for collector in self._node_collectors:
collector.join()
logging.info("%s all node collectors stopped.", self.name)
def _check_alive_collectors(self):
for collector in self._node_collectors:
if not collector.isAlive():
return False
return True
| 35.50838 | 118 | 0.637823 | 695 | 6,356 | 5.620144 | 0.284892 | 0.060932 | 0.046083 | 0.02765 | 0.117256 | 0.075269 | 0.041987 | 0 | 0 | 0 | 0 | 0.001736 | 0.274858 | 6,356 | 178 | 119 | 35.707865 | 0.84552 | 0.188483 | 0 | 0.184211 | 0 | 0 | 0.086735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0.008772 | 0.061404 | 0.008772 | 0.324561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c55b9fde76dce6c7aa8c956dc28575c3c2afb98 | 5,959 | py | Python | utils/memory.py | alanmackey/DRL-for-BG-Control | a7c5015b828c250205b9ecb1bf4bdb928e0d975f | [
"MIT"
] | null | null | null | utils/memory.py | alanmackey/DRL-for-BG-Control | a7c5015b828c250205b9ecb1bf4bdb928e0d975f | [
"MIT"
] | null | null | null | utils/memory.py | alanmackey/DRL-for-BG-Control | a7c5015b828c250205b9ecb1bf4bdb928e0d975f | [
"MIT"
] | null | null | null | import numpy as np
import torch
class ReplayBuffer(object):
def __init__(
self, state_dim, action_dim, hidden_size,
max_size=int(5e3), recurrent=False
):
self.max_size = int(max_size)
self.ptr = 0
self.size = 0
self.recurrent = recurrent
self.state = np.zeros((self.max_size, state_dim))
self.action = np.zeros((self.max_size, action_dim))
self.next_state = np.zeros((self.max_size, state_dim))
self.reward = np.zeros((self.max_size, 1))
self.not_done = np.zeros((self.max_size, 1))
if self.recurrent:
self.h = np.zeros((self.max_size, hidden_size))
self.nh = np.zeros((self.max_size, hidden_size))
self.c = np.zeros((self.max_size, hidden_size))
self.nc = np.zeros((self.max_size, hidden_size))
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
def add(
self, state, action, next_state, reward, done, hiddens, next_hiddens
):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
if self.recurrent:
h, c = hiddens
nh, nc = next_hiddens
# Detach the hidden state so that BPTT only goes through 1 timestep
self.h[self.ptr] = h.detach().cpu()
self.c[self.ptr] = c.detach().cpu()
self.nh[self.ptr] = nh.detach().cpu()
self.nc[self.ptr] = nc.detach().cpu()
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size=100):
# TODO: Clean this up. There's probably a cleaner way to seperate
# on-policy and off-policy sampling. Clean up extra-dimension indexing
# also
ind = np.random.randint(0, self.size, size=int(batch_size))
# TODO: Clean up indexing. RNNs needs batch shape of
# Batch size * Timesteps * Input size
if not self.recurrent:
return self._ff_sampling(ind)
h = torch.tensor(self.h[ind][None, ...],
requires_grad=True,
dtype=torch.float).to(self.device)
c = torch.tensor(self.c[ind][None, ...],
requires_grad=True,
dtype=torch.float).to(self.device)
nh = torch.tensor(self.nh[ind][None, ...],
requires_grad=True,
dtype=torch.float).to(self.device)
nc = torch.tensor(self.nc[ind][None, ...],
requires_grad=True,
dtype=torch.float).to(self.device)
# TODO: Return hidden states or not, or only return the
# first hidden state (although it's already been detached,
# so returning nothing might be better)
hidden = (h, c)
next_hidden = (nh, nc)
s = torch.FloatTensor(
self.state[ind][:, None, :]).to(self.device)
a = torch.FloatTensor(
self.action[ind][:, None, :]).to(self.device)
ns = torch.FloatTensor(
self.next_state[ind][:, None, :]).to(self.device)
r = torch.FloatTensor(
self.reward[ind][:, None, :]).to(self.device)
d = torch.FloatTensor(
self.not_done[ind][:, None, :]).to(self.device)
return s, a, ns, r, d, hidden, next_hidden
def on_policy_sample(self):
ind = np.arange(0, self.size)
# TODO: Clean up indexing. RNNs needs batch shape of
# Batch size * Timesteps * Input size
if not self.recurrent:
return self._ff_sampling(ind)
h = torch.tensor(self.h[ind][None, ...],
requires_grad=True,
dtype=torch.float).to(self.device)
c = torch.tensor(self.c[ind][None, ...],
requires_grad=True,
dtype=torch.float).to(self.device)
nh = torch.tensor(self.nh[ind][None, ...],
requires_grad=True,
dtype=torch.float).to(self.device)
nc = torch.tensor(self.nc[ind][None, ...],
requires_grad=True,
dtype=torch.float).to(self.device)
# TODO: Return hidden states or not, or only return the
# first hidden state (although it's already been detached,
# so returning nothing might be better)
hidden = (h, c)
next_hidden = (nh, nc)
s = torch.FloatTensor(
self.state[ind][:, None, :]).to(self.device)
a = torch.FloatTensor(
self.action[ind][:, None, :]).to(self.device)
ns = torch.FloatTensor(
self.next_state[ind][:, None, :]).to(self.device)
# reward and dones don't need to be "batched"
r = torch.FloatTensor(
self.reward[ind]).to(self.device)
d = torch.FloatTensor(
self.not_done[ind]).to(self.device)
return s, a, ns, r, d, hidden, next_hidden
def _ff_sampling(self, ind):
# FF only need Batch size * Input size, on_policy or not
hidden = None
next_hidden = None
s = torch.FloatTensor(self.state[ind]).to(self.device)
a = torch.FloatTensor(self.action[ind]).to(self.device)
ns = \
torch.FloatTensor(self.next_state[ind]).to(self.device)
r = torch.FloatTensor(self.reward[ind]).to(self.device)
d = torch.FloatTensor(self.not_done[ind]).to(self.device)
return s, a, ns, r, d, hidden, next_hidden
def clear_memory(self):
self.ptr = 0
self.size = 0
| 38.445161 | 80 | 0.538345 | 749 | 5,959 | 4.190921 | 0.158879 | 0.076457 | 0.087926 | 0.04014 | 0.670596 | 0.662313 | 0.630137 | 0.630137 | 0.58936 | 0.540299 | 0 | 0.004302 | 0.336801 | 5,959 | 154 | 81 | 38.694805 | 0.78998 | 0.129888 | 0 | 0.531532 | 0 | 0 | 0.001396 | 0 | 0 | 0 | 0 | 0.006494 | 0 | 1 | 0.054054 | false | 0 | 0.018018 | 0 | 0.126126 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c566a2dcb7cec78c109c0686b33ccf0da89a5b9 | 1,043 | py | Python | TilingDomains/main.py | ClathomasPrime/linear-prefs | e700589a82667ca0f307459816e4f5b211fbd7d2 | [
"BSD-3-Clause"
] | null | null | null | TilingDomains/main.py | ClathomasPrime/linear-prefs | e700589a82667ca0f307459816e4f5b211fbd7d2 | [
"BSD-3-Clause"
] | null | null | null | TilingDomains/main.py | ClathomasPrime/linear-prefs | e700589a82667ca0f307459816e4f5b211fbd7d2 | [
"BSD-3-Clause"
] | null | null | null | from GraphDraw import *
from Graph import *
from Implicit import *
from impExamples import *
import os
import errno
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
# For trying stuff
# Currently just displays the alternating domain on 9 outcomes
n = 9
impG = impAlternatingDomain(n)
G = impG.explicit()
d = G.d
G.computePoset()
G.completeInversionOrder()
size = G.sizeOfDomain()
snakes = G.getTrackSnakes()
try:
file_handle = os.open("test-output/best/info"+str(d)+".txt", flags)
except OSError as e:
if e.errno == errno.EEXIST: # Failed as the file already exists.
pass
else: # Something unexpected went wrong so reraise the exception.
raise
else: # No exception, so the file must have been created successfully.
with os.fdopen(file_handle, 'w') as F:
F.write(str(G.getVRSystem()))
F.write("Size of domain: " + str(size))
F.write("")
F.write(str(snakes))
drawGraph(G, "best/graphs/bestgraph"+str(d))
drawPoset(G, "best/posets/bestposet"+str(d))
drawGraphAndPoset(G, "best/both/best"+str(d))
| 21.285714 | 71 | 0.700863 | 156 | 1,043 | 4.653846 | 0.557692 | 0.022039 | 0.024793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002315 | 0.17162 | 1,043 | 48 | 72 | 21.729167 | 0.837963 | 0.223394 | 0 | 0.064516 | 0 | 0 | 0.121891 | 0.078358 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.032258 | 0.193548 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c583e62e222fa52ab726c248c5d0731a9185e71 | 3,647 | py | Python | reinforcement-learning/deep-line-wars/game_1.py | cair/experiments | d19a9eda0e9e743e401f46eec358f0c815c64a7c | [
"MIT"
] | null | null | null | reinforcement-learning/deep-line-wars/game_1.py | cair/experiments | d19a9eda0e9e743e401f46eec358f0c815c64a7c | [
"MIT"
] | null | null | null | reinforcement-learning/deep-line-wars/game_1.py | cair/experiments | d19a9eda0e9e743e401f46eec358f0c815c64a7c | [
"MIT"
] | null | null | null | import random
import numpy as np
from PIL import Image
from DeepLineWars.Game import Game
import uuid
# https://github.com/cair/DeepRTS
# https://github.com/reinforceio/tensorforce
class GameInstance:
@staticmethod
def start(data_queue):
g = GameInstance(data_queue)
g.loop()
return True
def get_stacked_state(self, swapaxes=False):
if len(self.states) > self.stack:
if swapaxes:
return np.swapaxes(np.array(self.states[-1 * self.stack:]), 0, 2)
else:
return np.array(self.states[-1 * self.stack:])
return None
def __init__(self, data_queue):
self.id = uuid.uuid4()
print("Game %s - Start" % self.id)
self.data_queue = data_queue
self.game = Game({
"game": {
"width": 11,
"height": 11,
"tile_width": 32,
"tile_height": 32
},
"mechanics": {
"complexity": {
"build_anywhere": False
},
"start_health": 50,
"start_gold": 100,
"start_lumber": 0,
"start_income": 20,
"income_frequency": 10,
"ticks_per_second": 20,
"fps": 10,
"ups": 10008000,
"income_ratio": 0.20,
"kill_gold_ratio": 0.10
},
"gui": {
"enabled": True,
"draw_friendly": True,
"minimal": True
}
})
self.states = list()
self.experience_replay = list()
self.s0 = None
self.player_1 = self.game.players[0]
self.player_2 = self.game.players[1]
self.episode = 1
self.representation = "image_grayscaled"
self.running = False
self.stack = 4
self.num_ticks = 10
self.tick_limit = 30000
def loop(self):
self.running = True
t = 0
while self.running:
# Do action
self.player_1.do_action(random.randint(0, 12))
self.player_2.do_action(random.randint(0, 12))
# Process game
for i in range(self.num_ticks):
self.game.update()
t += 1
# Update image state
self.game.render()
# Retrieve state, add to list of states,
s1 = self.game.get_state(representation=self.representation)
self.states.append(s1)
self.s0 = s1
# Terminal State, Reset Game
if self.game.is_terminal() or t >= self.tick_limit:
self.game.reset()
print("Game %s - %s#%s" % (self.id, self.episode, t))
self.episode += 1
if t < self.tick_limit:
self.data_queue.put(self.states)
self.states.clear()
t = 0
if __name__ == "__main__":
import multiprocessing
import threading
n_proc = 10
processes = []
data_queue = multiprocessing.Queue()
def on_data():
while True:
data = data_queue.get(block=True)
#print(data)
t = threading.Thread(target=on_data, args=())
t.start()
g = GameInstance(data_queue)
g.loop()
with multiprocessing.Pool(processes=n_proc):
for n in range(n_proc):
p = multiprocessing.Process(target=GameInstance.start, args=(data_queue, ))
p.start()
processes.append(p)
for p in processes:
p.join()
| 26.427536 | 87 | 0.502879 | 398 | 3,647 | 4.462312 | 0.344221 | 0.050676 | 0.016892 | 0.024775 | 0.108108 | 0.087838 | 0.030405 | 0 | 0 | 0 | 0 | 0.03278 | 0.389361 | 3,647 | 137 | 88 | 26.620438 | 0.764706 | 0.05292 | 0 | 0.058824 | 0 | 0 | 0.076722 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04902 | false | 0 | 0.068627 | 0 | 0.166667 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c59f1356149160db7e39588bbaf937c38c8c634 | 790 | py | Python | src/back-django/scraper_api/tests/views/test_page_list_view.py | Arkko002/PyWalker | 9e8a02b74a1217cfed385898654815a218297cce | [
"MIT"
] | null | null | null | src/back-django/scraper_api/tests/views/test_page_list_view.py | Arkko002/PyWalker | 9e8a02b74a1217cfed385898654815a218297cce | [
"MIT"
] | null | null | null | src/back-django/scraper_api/tests/views/test_page_list_view.py | Arkko002/PyWalker | 9e8a02b74a1217cfed385898654815a218297cce | [
"MIT"
] | null | null | null | import json
import pytest
from django.urls import reverse
from scraper_api.models import ScrapedPage
class TestPageListView:
@pytest.mark.django_db
def test_get(self, client, fill_db, url, html):
url = reverse("pages/")
response = client.get(url)
pages = json.loads(response.body)
assert pages.len() == fill_db
@pytest.mark.django_db
def test_post(self, client):
url = reverse("pages/")
data = {"url": "url1"}
page1 = ScrapedPage(url="url1")
page2 = ScrapedPage(url="url2")
page1.save()
page2.save()
response = client.post(url, json.dumps(data))
res_pages = json.loads(response.body)
assert (res_pages[0].url == "url1"
and res_pages.len() == 1)
| 23.939394 | 53 | 0.605063 | 98 | 790 | 4.77551 | 0.418367 | 0.044872 | 0.068376 | 0.076923 | 0.24359 | 0.24359 | 0 | 0 | 0 | 0 | 0 | 0.017331 | 0.26962 | 790 | 32 | 54 | 24.6875 | 0.793761 | 0 | 0 | 0.173913 | 0 | 0 | 0.039241 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.086957 | false | 0 | 0.173913 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c5bc0aa11bc0c3b60a29a13ecd16e57a11921fb | 594 | py | Python | skfasttext/sk_ft_example.py | tatacoa/hatespeech | fab054d86848c2242443ae3ddf532e94f404f529 | [
"MIT"
] | 2 | 2020-03-15T13:46:46.000Z | 2020-05-26T06:56:52.000Z | skfasttext/sk_ft_example.py | tatacoa/hatespeech | fab054d86848c2242443ae3ddf532e94f404f529 | [
"MIT"
] | 1 | 2018-05-22T20:22:39.000Z | 2018-05-22T20:22:39.000Z | skfasttext/sk_ft_example.py | tatacoa/hatespeech | fab054d86848c2242443ae3ddf532e94f404f529 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 2 17:34:43 2018
@author: arndt
"""
# http://scikit-learn.org/stable/developers/contributing.html#rolling-your-own-estimator
# https://github.com/vishnumani2009/sklearn-fasttext
from os import chdir
chdir("/home/arndt/git-reps/hatespeech/")
from skfasttext import SimpleFastTextClassifier
# files were previously created in the HateSpeech.py script
train_file="data/train_data.txt"
test_file="data/test_data.txt"
clf=SimpleFastTextClassifier()
model = clf.fit(train_file)
predictions = clf.predict(test_file, k_best=2) | 25.826087 | 88 | 0.76936 | 86 | 594 | 5.232558 | 0.755814 | 0.04 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033457 | 0.094276 | 594 | 23 | 89 | 25.826087 | 0.802974 | 0.488215 | 0 | 0 | 0 | 0 | 0.236301 | 0.109589 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c5c4a72fd65572e3fc2aa97977ae16747547c5e | 15,366 | py | Python | src/niweb/apps/noclook/tests/schema/admin/test_schema.py | SUNET/ni | f652e230524346bf0801cdf8bbb6ee63f4985cc2 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/niweb/apps/noclook/tests/schema/admin/test_schema.py | SUNET/ni | f652e230524346bf0801cdf8bbb6ee63f4985cc2 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2019-07-24T12:41:11.000Z | 2020-03-31T10:10:04.000Z | src/niweb/apps/noclook/tests/schema/admin/test_schema.py | SUNET/ni | f652e230524346bf0801cdf8bbb6ee63f4985cc2 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-02-25T14:58:20.000Z | 2019-02-25T14:58:20.000Z | # -*- coding: utf-8 -*-
__author__ = 'ffuentes'
from apps.noclook.tests.schema.base import Neo4jGraphQLGenericTest
from apps.noclook.models import NodeHandleContext
from django.contrib.auth.models import User
from niweb.schema import schema
from pprint import pformat
from . import BasicAdminTest
import apps.noclook.vakt.utils as sriutils
import graphene
class GenericUserPermissionTest(BasicAdminTest):
def test_contexts(self):
if not hasattr(self, 'test_type'):
return
# add adittional contexts
sriutils.set_nodehandle_context(self.network_ctxt, self.organization)
sriutils.set_nodehandle_context(self.contracts_ctxt, self.host)
sriutils.set_nodehandle_context(self.community_ctxt, self.address)
# query
context_t = "contexts: [{context_input}]"
query_t = """
{{
ninodes(filter: {{
type_in: [{types_str}]
with_context: {{
{context}
exclude: {exclude}
}}
}}){{
edges{{
node{{
__typename
name
contexts
}}
}}
}}
}}
"""
types_str = ", ".join([
'"{}"'.format(x) for x in \
["Organization", "Host", "Address", "Service", "Cable"]
])
context = context_t.format(context_input=', '.join(
[ '"{}"'.format(v.name) for k, v in sriutils.get_all_contexts().items()]
))
query = query_t.format(types_str=types_str, context=context,
exclude='false')
result = schema.execute(query, context=self.context)
assert not result.errors, pformat(result.errors, indent=1)
if self.test_type == "admin" or self.test_type == "superadmin":
# check the contexts are the expecteds
expected = [{'node': {'__typename': 'Organization',
'contexts': ['Community', 'Network'],
'name': 'organization1'}},
{'node': {'__typename': 'Organization',
'contexts': ['Community', 'Network'],
'name': 'organization1'}},
{'node': {'__typename': 'Host',
'contexts': ['Network', 'Contracts'],
'name': 'host1'}},
{'node': {'__typename': 'Host',
'contexts': ['Network', 'Contracts'],
'name': 'host1'}},
{'node': {'__typename': 'Address',
'contexts': ['Contracts', 'Community'],
'name': 'address1'}},
{'node': {'__typename': 'Address',
'contexts': ['Contracts', 'Community'],
'name': 'address1'}}]
self.assertEquals(result.data['ninodes']['edges'], expected)
else:
# test contexts attribute comes empty
for node in result.data['ninodes']['edges']:
self.assertEquals(node['node']['contexts'], None)
def test_node_list(self):
if not hasattr(self, 'test_type'):
return
context_t = "contexts: [{context_input}]"
query_t = """
{{
ninodes(filter: {{
type_in: [{types_str}]
with_context: {{
{context}
exclude: {exclude}
}}
}}
orderBy: name_ASC
){{
edges{{
node{{
__typename
id
name
}}
}}
}}
}}
"""
types_str = ", ".join([
'"{}"'.format(x) for x in \
["Organization", "Host", "Address", "Service", "Cable"]
])
organization_id = graphene.relay.Node.to_global_id(
str(self.organization.node_type), str(self.organization.handle_id))
host_id = graphene.relay.Node.to_global_id(
str(self.host.node_type), str(self.host.handle_id))
address_id = graphene.relay.Node.to_global_id(
str(self.address.node_type), str(self.address.handle_id))
service_id = graphene.relay.Node.to_global_id(
str(self.service.node_type), str(self.service.handle_id))
cable_id = graphene.relay.Node.to_global_id(
str(self.cable.node_type), str(self.cable.handle_id))
# test empty context (test empty parameter and invalid contexts):
for context_input in [None, '"Invalid Ctx", "Module"', ""]:
context_str = ""
if context_input != None:
context_str = context_t.format(context_input=context_input)
# test exclude true: only contexted nodes
exclude = str(True).lower()
query = query_t.format(
types_str=types_str, context=context_str, exclude=exclude,
)
result = schema.execute(query, context=self.context)
assert not result.errors, pformat(result.errors, indent=1)
expected = {'ninodes':
{'edges': [
{'node': {'__typename': 'Organization',
'id': organization_id,
'name': 'organization1'}},
{'node': {'__typename': 'Host',
'id': host_id,
'name': 'host1'}},
{'node': {'__typename': 'Address',
'id': address_id,
'name': 'address1'}}
]
}
}
self.assert_correct(result, expected)
# test exclude false: uncontexted nodes (only for superadmin)
exclude = str(False).lower()
query = query_t.format(
types_str=types_str, context=context_str, exclude=exclude,
)
result = schema.execute(query, context=self.context)
assert not result.errors, pformat(result.errors, indent=1)
expected = {'ninodes': {'edges': []}}
if self.test_type == "superadmin":
expected = {'ninodes': {'edges': [
{'node': {'__typename': 'Service',
'id': service_id,
'name': 'service1'}},
{'node': {'__typename': 'Cable',
'id': cable_id,
'name': 'cable1'}}
]}}
self.assert_correct(result, expected)
# test filled context:
context_input = ", ".join([
'"{}"'.format(x) for x in ["Community", "Network"]
])
context_str = context_t.format(context_input=context_input)
# test exclude true: show nodes out of those contexts
exclude = str(True).lower()
query = query_t.format(
types_str=types_str, context=context_str, exclude=exclude,
)
result = schema.execute(query, context=self.context)
assert not result.errors, pformat(result.errors, indent=1)
expected = {'ninodes':
{'edges': [
{'node': {'__typename': 'Address',
'id': address_id,
'name': 'address1'}}
]
}
}
self.assert_correct(result, expected)
# test exclude false: show nodes in of those contexts
exclude = str(False).lower()
query = query_t.format(
types_str=types_str, context=context_str, exclude=exclude,
)
result = schema.execute(query, context=self.context)
assert not result.errors, pformat(result.errors, indent=1)
expected = {'ninodes':
{'edges': [
{'node': {'__typename': 'Organization',
'id': organization_id,
'name': 'organization1'}},
{'node': {'__typename': 'Host',
'id': host_id,
'name': 'host1'}},
]
}
}
self.assert_correct(result, expected)
def test_user_list(self):
if not hasattr(self, 'test_type'):
return
query_t = """
{{
users( filter:{{ username_contains: "{name_contains}" }} ){{
edges{{
node{{
id
username
}}
}}
}}
}}
"""
# get both users
name_contains = "user"
query = query_t.format(name_contains=name_contains)
result = schema.execute(query, context=self.context)
assert not result.errors, pformat(result.errors, indent=1)
expected = {
'users': {
'edges': [
{'node': {
'id': str(self.user.id),
'username': 'test user'
}},
{'node': {
'id': str(self.another_user.id),
'username': 'another_user'
}},
{'node': {
'id': str(self.other_user.id),
'username': 'other_user'
}},
]
}
}
self.assert_correct(result, expected)
# get only one
name_contains = "test"
query = query_t.format(name_contains=name_contains)
result = schema.execute(query, context=self.context)
assert not result.errors, pformat(result.errors, indent=1)
expected = {
'users': {
'edges': [
{'node': {
'id': str(self.user.id),
'username': 'test user'
}},
]
}
}
self.assert_correct(result, expected)
def test_user_permissions(self):
# create a simple group with another user
test_user = self.user
self.user = self.another_user
self.group1 = self.create_node('group1', 'group', meta='Logical')
NodeHandleContext(
nodehandle=self.group1, context=self.community_ctxt).save()
self.user = test_user
query = """
{
all_groups{
name
modifier{
user_permissions{
community{
read
list
write
}
network{
read
list
write
}
contracts{
read
list
write
}
}
}
}
}
"""
result = schema.execute(query, context=self.context)
assert not result.errors, pformat(result.errors, indent=1)
expected = None
if hasattr(self, 'test_type'):
if self.test_type == "user":
expected = {
'all_groups': [{
'name': 'group1',
'modifier': {
'user_permissions': None,
}
}]
}
elif self.test_type == "admin" or self.test_type == "superadmin":
# check that an admin or superadmin can read permissions of
# another user
expected = {
'all_groups': [{
'name': 'group1',
'modifier': {
'user_permissions': {
'community': {
'read': False,
'list': False,
'write': False,
},
'network': {
'read': False,
'list': False,
'write': False,
},
'contracts': {
'read': False,
'list': False,
'write': False,
},
}
}
}]
}
self.assert_correct(result, expected)
class PlainUserPermissionsTest(GenericUserPermissionTest):
def setUp(self, group_dict=None):
group_dict = {
'community': {
'admin': False,
'read': True,
'list': True,
'write': True,
},
'network': {
'admin': False,
'read': True,
'list': True,
'write': True,
},
'contracts': {
'admin': False,
'read': True,
'list': True,
'write': True,
},
}
self.test_type = "user"
super().setUp(group_dict=group_dict)
class AdminUserPermissionsTest(GenericUserPermissionTest):
def setUp(self, group_dict=None):
group_dict = {
'community': {
'admin': False,
'read': True,
'list': True,
'write': True,
},
'network': {
'admin': True,
'read': True,
'list': True,
'write': True,
},
'contracts': {
'admin': False,
'read': True,
'list': True,
'write': True,
},
}
self.test_type = "admin"
super().setUp(group_dict=group_dict)
class SuperAdminUserPermissionsTest(GenericUserPermissionTest):
def setUp(self, group_dict=None):
group_dict = {
'community': {
'admin': True,
'read': True,
'list': True,
'write': True,
},
'network': {
'admin': True,
'read': True,
'list': True,
'write': True,
},
'contracts': {
'admin': True,
'read': True,
'list': True,
'write': True,
},
}
self.test_type = "superadmin"
super().setUp(group_dict=group_dict)
| 32.486258 | 84 | 0.416829 | 1,166 | 15,366 | 5.327616 | 0.135506 | 0.030908 | 0.025113 | 0.023181 | 0.680779 | 0.634095 | 0.602061 | 0.591436 | 0.553928 | 0.489375 | 0 | 0.003561 | 0.470064 | 15,366 | 472 | 85 | 32.555085 | 0.759302 | 0.035923 | 0 | 0.638961 | 0 | 0 | 0.20815 | 0 | 0 | 0 | 0 | 0 | 0.044156 | 1 | 0.018182 | false | 0 | 0.020779 | 0 | 0.057143 | 0.002597 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c5e15ddaa18b77dfb245e91f9e636201f9744ac | 1,542 | py | Python | alphamind/tests/analysis/test_perfanalysis.py | rongliang-tech/alpha-mind | 39f720974c637d17e185e445dc05c9fc4863a241 | [
"MIT"
] | 186 | 2017-11-27T01:26:44.000Z | 2022-03-28T16:11:33.000Z | alphamind/tests/analysis/test_perfanalysis.py | atefar2/alpha-mind | 66d839affb5d81d31d5cac7e5e224278e3f99a8b | [
"MIT"
] | 2 | 2017-12-19T02:47:36.000Z | 2021-01-09T05:25:18.000Z | alphamind/tests/analysis/test_perfanalysis.py | atefar2/alpha-mind | 66d839affb5d81d31d5cac7e5e224278e3f99a8b | [
"MIT"
] | 65 | 2017-11-27T01:26:47.000Z | 2022-03-17T10:50:52.000Z | # -*- coding: utf-8 -*-
"""
Created on 2017-5-12
@author: cheng.li
"""
import unittest
import numpy as np
import pandas as pd
from alphamind.analysis.perfanalysis import perf_attribution_by_pos
class TestPerformanceAnalysis(unittest.TestCase):
@classmethod
def test_perf_attribution_by_pos(cls):
n_samples = 36000
n_dates = 20
n_risk_factors = 35
dates = np.sort(np.random.randint(n_dates, size=n_samples))
weights_series = pd.Series(data=np.random.randn(n_samples), index=dates)
bm_series = pd.Series(data=np.random.randn(n_samples), index=dates)
next_bar_return_series = pd.Series(data=np.random.randn(n_samples), index=dates)
risk_table = pd.DataFrame(data=np.random.randn(n_samples, n_risk_factors),
columns=list(range(n_risk_factors)),
index=dates)
explained_table = perf_attribution_by_pos(weights_series - bm_series,
next_bar_return_series,
risk_table)
to_explain = (weights_series - bm_series).multiply(next_bar_return_series, axis=0)
aggregated_to_explain = pd.Series(to_explain).groupby(dates).sum()
aggregated_explained = explained_table.sum(axis=1)
np.testing.assert_array_almost_equal(aggregated_to_explain.values,
aggregated_explained.values)
if __name__ == '__main__':
unittest.main()
| 33.521739 | 90 | 0.630999 | 186 | 1,542 | 4.897849 | 0.419355 | 0.052689 | 0.052689 | 0.074643 | 0.188804 | 0.188804 | 0.161361 | 0.161361 | 0.161361 | 0.161361 | 0 | 0.017086 | 0.278859 | 1,542 | 45 | 91 | 34.266667 | 0.802158 | 0.040208 | 0 | 0 | 0 | 0 | 0.005435 | 0 | 0 | 0 | 0 | 0 | 0.037037 | 1 | 0.037037 | false | 0 | 0.148148 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c5f7612ff8eec29cef986e54718511e7172e74e | 1,457 | py | Python | appengine/src/greenday_api/tests/test_distinct_channels_api.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 6 | 2018-07-31T16:48:07.000Z | 2020-02-01T03:17:51.000Z | appengine/src/greenday_api/tests/test_distinct_channels_api.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 41 | 2018-08-07T16:43:07.000Z | 2020-06-05T18:54:50.000Z | appengine/src/greenday_api/tests/test_distinct_channels_api.py | meedan/montage | 4da0116931edc9af91f226876330645837dc9bcc | [
"Apache-2.0"
] | 1 | 2018-08-07T16:40:18.000Z | 2018-08-07T16:40:18.000Z | """
Tests for :mod:`greenday_api.misc.distinct_channels_api <greenday_api.misc.distinct_channels_api>`
"""
from protorpc import message_types
from .base import ApiTestCase
from ..misc.distinct_channels_api import DistinctChannelsAPI
class DistinctChannelsAPITests(ApiTestCase):
"""
Test case for
:func:`greenday_api.misc.distinct_channels_api <greenday_api.misc.distinct_channels_api>`
"""
api_type = DistinctChannelsAPI
def setUp(self):
"""
Bootstrap test case
"""
super(DistinctChannelsAPITests, self).setUp()
self.video_1 = self.create_video(
channel_id="123", channel_name="foo")
self.video_2 = self.create_video(
channel_id="123", channel_name="fez")
self.video_3 = self.create_video(
channel_id="456", channel_name="bar")
def test_get_distinct_channels(self):
"""
Gets all distinct channels across all videos in Montage
"""
self._sign_in(self.admin)
request = message_types.VoidMessage()
response = self.api.get_distinct_channels(request)
self.assertEqual(2, len(response.items))
channel_123_resp = next(r for r in response.items if r.id == "123")
self.assertEqual("fez", channel_123_resp.name)
channel_456_resp = next(r for r in response.items if r.id == "456")
self.assertEqual("bar", channel_456_resp.name)
| 29.734694 | 102 | 0.660261 | 178 | 1,457 | 5.157303 | 0.331461 | 0.139434 | 0.108932 | 0.125272 | 0.328976 | 0.302832 | 0.302832 | 0.302832 | 0.220044 | 0.220044 | 0 | 0.027828 | 0.235415 | 1,457 | 48 | 103 | 30.354167 | 0.79623 | 0.190803 | 0 | 0 | 0 | 0 | 0.027624 | 0 | 0 | 0 | 0 | 0 | 0.136364 | 1 | 0.090909 | false | 0 | 0.136364 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c6349a307102d7f6afaff901e97fb40d85722ea | 3,142 | py | Python | pypipegraph/utils/log_listen.py | bopopescu/pypipegraph-2 | 3a3f9ba565789d8d73c8cd503703a957de2be9d8 | [
"MIT"
] | 4 | 2017-05-24T16:57:42.000Z | 2017-09-21T19:55:27.000Z | pypipegraph/utils/log_listen.py | bopopescu/pypipegraph-2 | 3a3f9ba565789d8d73c8cd503703a957de2be9d8 | [
"MIT"
] | 2 | 2019-11-22T15:33:47.000Z | 2020-07-27T11:59:44.000Z | pypipegraph/utils/log_listen.py | bopopescu/pypipegraph-2 | 3a3f9ba565789d8d73c8cd503703a957de2be9d8 | [
"MIT"
] | 4 | 2015-08-26T15:43:00.000Z | 2020-07-20T03:36:40.000Z | """
The MIT License (MIT)
Copyright (c) 2012, Florian Finkernagel <finkernagel@imt.uni-marburg.de>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from twisted.internet.protocol import Protocol, Factory, ClientFactory
from twisted.internet.error import CannotListenError
from twisted.internet import reactor
import logging
import cPickle
import sys
try:
port = int(sys.argv[1])
except IndexError:
port = 5005
of = open("log.txt", "wb")
class Debug(Protocol):
def dataReceived(self, data):
global of
try:
if data == "please exit":
print("End log because other logger requested access")
of.write("End log because other logger requested access")
reactor.stop()
lr = logging.makeLogRecord(cPickle.loads(data[4:]))
if lr.getMessage().find("New Pipegraph") != -1:
of.close()
of = open("log.txt", "wb")
print("%s:%i: %s" % (lr.name, lr.lineno, lr.getMessage()))
of.write("%s:%i: %s\n" % (lr.name, lr.lineno, lr.getMessage()))
of.flush()
except cPickle.UnpicklingError:
print("a messages was missing")
# self.transport.write(data, (host, port))
class Killer(Protocol):
def connectionMade(self):
print("sending please exit")
self.transport.write("please exit") # no need for address
self.transport.loseConnection()
def connectionLost(self, reason):
print("Other logger apperantly exited, now trying to listen again in 2 seconds")
reactor.callLater(2, start_listening)
class KillerFactory(ClientFactory):
protocol = Killer
def start_listening():
factory = Factory()
factory.protocol = Debug
def listening():
print("now listening")
factory.startFactory = listening
reactor.listenTCP(port, factory)
try:
start_listening()
except CannotListenError:
print("trying to send kill signal to already running instance")
d = reactor.connectTCP("localhost", port, KillerFactory())
# print "Going to listen on port %i, logging to %s" % (port, of.name)
reactor.run()
| 33.425532 | 88 | 0.693507 | 414 | 3,142 | 5.256039 | 0.478261 | 0.040441 | 0.026195 | 0.011029 | 0.074449 | 0.061581 | 0.061581 | 0 | 0 | 0 | 0 | 0.00528 | 0.216423 | 3,142 | 93 | 89 | 33.784946 | 0.878554 | 0.397518 | 0 | 0.098039 | 0 | 0 | 0.186405 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098039 | false | 0 | 0.117647 | 0 | 0.294118 | 0.137255 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c68214d1193846a164314f13ae12a80f459ffd1 | 9,130 | py | Python | ldapauthenticator/ldapauthenticator.py | jbmarcille/ldapauthenticator | f6037d72bd8c76317b8741d96de1c7b1dee26298 | [
"BSD-3-Clause"
] | null | null | null | ldapauthenticator/ldapauthenticator.py | jbmarcille/ldapauthenticator | f6037d72bd8c76317b8741d96de1c7b1dee26298 | [
"BSD-3-Clause"
] | null | null | null | ldapauthenticator/ldapauthenticator.py | jbmarcille/ldapauthenticator | f6037d72bd8c76317b8741d96de1c7b1dee26298 | [
"BSD-3-Clause"
] | null | null | null | import ldap3
import re
from jupyterhub.auth import Authenticator
from tornado import gen
from traitlets import Unicode, Int, Bool, List, Union
class LDAPAuthenticator(Authenticator):
server_address = Unicode(
config=True,
help="""
Address of the LDAP server to contact.
Could be an IP address or hostname.
"""
)
server_port = Int(
config=True,
help="""
Port on which to contact the LDAP server.
Defaults to `636` if `use_ssl` is set, `389` otherwise.
"""
)
def _server_port_default(self):
if self.use_ssl:
return 636 # default SSL port for LDAP
else:
return 389 # default plaintext port for LDAP
use_ssl = Bool(
True,
config=True,
help="""
Use SSL to communicate with the LDAP server.
Highly recommended! Your LDAP server must be configured to support this, however.
"""
)
bind_dn_template = Union(
[List(),Unicode()],
config=True,
help="""
Template from which to construct the full dn
when authenticating to LDAP. {username} is replaced
with the actual username used to log in.
If your LDAP is set in such a way that the userdn can not
be formed from a template, but must be looked up with an attribute
(such as uid or sAMAccountName), please see `lookup_dn`. It might
be particularly relevant for ActiveDirectory installs.
Unicode Example:
uid={username},ou=people,dc=wikimedia,dc=org
List Example:
[
uid={username},ou=people,dc=wikimedia,dc=org,
uid={username},ou=Developers,dc=wikimedia,dc=org
]
"""
)
allowed_groups = List(
config=True,
allow_none=True,
default=None,
help="""
List of LDAP group DNs that users could be members of to be granted access.
If a user is in any one of the listed groups, then that user is granted access.
Membership is tested by fetching info about each group and looking for the User's
dn to be a value of one of `member` or `uniqueMember`, *or* if the username being
used to log in with is value of the `uid`.
Set to an empty list or None to allow all users that have an LDAP account to log in,
without performing any group membership checks.
"""
)
# FIXME: Use something other than this? THIS IS LAME, akin to websites restricting things you
# can use in usernames / passwords to protect from SQL injection!
valid_username_regex = Unicode(
r'^[a-z][.a-z0-9_-]*$',
config=True,
help="""
Regex for validating usernames - those that do not match this regex will be rejected.
This is primarily used as a measure against LDAP injection, which has fatal security
considerations. The default works for most LDAP installations, but some users might need
to modify it to fit their custom installs. If you are modifying it, be sure to understand
the implications of allowing additional characters in usernames and what that means for
LDAP injection issues. See https://www.owasp.org/index.php/LDAP_injection for an overview
of LDAP injection.
"""
)
lookup_dn = Bool(
False,
config=True,
help="""
Form user's DN by looking up an entry from directory
By default, LDAPAuthenticator finds the user's DN by using `bind_dn_template`.
However, in some installations, the user's DN does not contain the username, and
hence needs to be looked up. You can set this to True and then use `user_search_base`
and `user_attribute` to accomplish this.
"""
)
user_search_base = Unicode(
config=True,
default=None,
allow_none=True,
help="""
Base for looking up user accounts in the directory, if `lookup_dn` is set to True.
LDAPAuthenticator will search all objects matching under this base where the `user_attribute`
is set to the current username to form the userdn.
For example, if all users objects existed under the base ou=people,dc=wikimedia,dc=org, and
the username users use is set with the attribute `uid`, you can use the following config:
```
c.LDAPAuthenticator.lookup_dn = True
c.LDAPAuthenticator.user_search_base = 'ou=people,dc=wikimedia,dc=org'
c.LDAPAuthenticator.user_attribute = 'uid'
```
"""
)
user_attribute = Unicode(
config=True,
default=None,
allow_none=True,
help="""
Attribute containing user's name, if `lookup_dn` is set to True.
See `user_search_base` for info on how this attribute is used.
For most LDAP servers, this is uid. For Active Directory, it is
sAMAccountName.
"""
)
@gen.coroutine
def authenticate(self, handler, data):
username = data['username']
password = data['password']
# Get LDAP Connection
def getConnection(userdn, username, password):
server = ldap3.Server(
self.server_address,
port=self.server_port,
use_ssl=self.use_ssl
)
self.log.debug('Attempting to bind {username} with {userdn}'.format(
username=username,
userdn=userdn
))
conn = ldap3.Connection(server, user=userdn, password=password)
return conn
# Protect against invalid usernames as well as LDAP injection attacks
if not re.match(self.valid_username_regex, username):
self.log.warn('username:%s Illegal characters in username, must match regex %s', username, self.valid_username_regex)
return None
# No empty passwords!
if password is None or password.strip() == '':
self.log.warn('username:%s Login denied for blank password', username)
return None
isBound = False
self.log.debug("TYPE= '%s'",isinstance(self.bind_dn_template, list))
# In case, there are multiple binding templates
if isinstance(self.bind_dn_template, list):
for dn in self.bind_dn_template:
userdn = dn.format(username=username)
conn = getConnection(userdn, username, password)
isBound = conn.bind()
self.log.debug('Status of user bind {username} with {userdn} : {isBound}'.format(
username=username,
userdn=userdn,
isBound=isBound
))
if isBound:
break
else:
userdn = self.bind_dn_template.format(username=username)
conn = getConnection(userdn, username, password)
isBound = conn.bind()
if isBound:
if self.allowed_groups:
if self.lookup_dn:
# In some cases, like AD, we don't bind with the DN, and need to discover it.
conn.search(
search_base=self.user_search_base,
search_scope=ldap3.SUBTREE,
search_filter='({userattr}={username})'.format(
userattr=self.user_attribute,
username=username
),
attributes=[self.user_attribute]
)
if len(conn.response) == 0:
self.log.warn('username:%s No such user entry found when looking up with attribute %s', username, self.user_attribute)
return None
userdn = conn.response[0]['dn']
self.log.debug('username:%s Using dn %s', username, userdn)
for group in self.allowed_groups:
groupfilter = (
'(|'
'(member={userdn})'
'(uniqueMember={userdn})'
'(memberUid={uid})'
')'
).format(userdn=userdn, uid=username)
groupattributes = ['member', 'uniqueMember', 'memberUid']
if conn.search(
group,
search_scope=ldap3.BASE,
search_filter=groupfilter,
attributes=groupattributes
):
return username
# If we reach here, then none of the groups matched
self.log.warn('username:%s User not in any of the allowed groups', username)
return None
else:
return username
else:
self.log.warn('Invalid password for user {username}'.format(
username=username,
))
return None
| 37.572016 | 142 | 0.571742 | 1,064 | 9,130 | 4.843985 | 0.265977 | 0.017462 | 0.016298 | 0.015522 | 0.123399 | 0.094684 | 0.082266 | 0.063252 | 0.063252 | 0.029492 | 0 | 0.003565 | 0.354874 | 9,130 | 242 | 143 | 37.727273 | 0.871477 | 0.053998 | 0 | 0.288557 | 0 | 0.014925 | 0.476119 | 0.039764 | 0 | 0 | 0 | 0.004132 | 0 | 1 | 0.014925 | false | 0.039801 | 0.024876 | 0 | 0.139303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c6b0c96f51c49cf2ddaf1214e424470ce2b7534 | 23,329 | py | Python | PyText3.py | Shock9616/PyText3 | f16214060390ae8b457fc8dfbdb0943c7c34720b | [
"MIT"
] | null | null | null | PyText3.py | Shock9616/PyText3 | f16214060390ae8b457fc8dfbdb0943c7c34720b | [
"MIT"
] | null | null | null | PyText3.py | Shock9616/PyText3 | f16214060390ae8b457fc8dfbdb0943c7c34720b | [
"MIT"
] | 1 | 2021-11-06T13:45:11.000Z | 2021-11-06T13:45:11.000Z | #!/usr/bin/env python3
"""
The main file for a basic code editor written
entirely with the python standard library
main.py
PyText3 Text Editor
Created by Kaleb Rosborough on 10/23/2018
Copyright © Shock9616 2018 All rights reserved
"""
#region Imports
from tkinter import *
import tkinter as tk
from tkinter import filedialog, simpledialog, messagebox, END
from tkinter import ttk
import os
import sys
import prefs
import themes
try:
# Try to import third party modules.
import syntaxhighlighting
except ImportError:
pass
#endregion
#region Global Variables
COL_BG = "grey"
COL_FG = "white"
CURRENT_FILE = "untitled"
#endregion
#region Custom Classes
class TextLineNumbers(tk.Canvas):
""" Custom canvas class for creating line numbers """
def __init__(self, *args, **kwargs):
tk.Canvas.__init__(self, *args, **kwargs)
self.textwidget = None
def attach(self, text_widget):
self.textwidget = text_widget
def redraw(self, fillcolor):
""" redraw line numbers """
self.delete("all")
i = self.textwidget.index("@0,0")
while True:
dline = self.textwidget.dlineinfo(i)
if dline is None:
break
y = dline[1]
linenum = str(i).split(".")[0]
self.create_text(2, y, anchor="nw", text=linenum, fill=fillcolor)
i = self.textwidget.index("%s+1line" % i)
class CustomText(tk.Text):
""" A custom text field class that can have line numbers attatched to it """
def __init__(self, *args, **kwargs):
tk.Text.__init__(self, *args, **kwargs)
# create a proxy for the underlying widget
self._orig = self._w + "_orig"
self.tk.call("rename", self._w, self._orig)
self.tk.createcommand(self._w, self._proxy)
def _proxy(self, *args):
# let the actual widget perform the requested action
cmd = (self._orig,) + args
try:
result = self.tk.call(cmd)
except Exception:
return None
# generate an event if something was added or deleted,
# or the cursor position changed
if (args[0] in ("insert", "replace", "delete") or
args[0:3] == ("mark", "set", "insert") or
args[0:2] == ("xview", "moveto") or
args[0:2] == ("xview", "scroll") or
args[0:2] == ("yview", "moveto") or
args[0:2] == ("yview", "scroll")
):
self.event_generate("<<Change>>", when="tail")
# return what the actual widget returned
return result
##########################################
#endregion
#region Command Definitions
def onChange(event):
fillColor = "#FFFFFF"
linenumbers.redraw(fillcolor=fillColor)
cursorPos = (textField.index(INSERT)).split(".")
lineCount.config(text=("Line: " + cursorPos[0]))
columnCount.config(text=("Column: " + str(int(cursorPos[1]) + 1)))
# syntaxhighlighting.HighlightSyntax(textField, defaultTheme, language)
def closeWindow(event=None):
if messagebox.askyesno("Quit", "Are you sure you want to exit?", icon="question"):
if messagebox.askyesno("Save", "Would you like to save the current file?", icon="question"):
saveFile()
root.destroy()
quit(1)
#region File Menu Commands
def newFile(event=None):
# if len(textField.get("1.0", END + "-1c")) > 0: # If there is something in the file
if messagebox.askyesno("Save", "Would you like to save the current file?", icon="question"):
saveFile()
textField.delete("1.0", tk.END)
else:
textField.delete("1.0", tk.END)
def openFile(event=None):
file = filedialog.askopenfile(parent=root, mode="rb", title="Select a file to open")
if messagebox.askyesno("Save", "Would you like to save the current file?", icon="question"):
saveFile()
if file is not None:
contents = file.read()
textField.delete("1.0", END)
textField.insert("1.0", contents)
CURRENT_FILE = file.name
root.title(CURRENT_FILE)
file.close()
#######################################################
def saveFileAs(event=None):
selectedLanguage = language.get()
if selectedLanguage == prefs.LANGUAGES[0]:
file = filedialog.asksaveasfile(mode="w", defaultextension=".cpp", filetypes=(
("C++ File", "*.cpp"),
("HTML File", "*.html"),
("Java File", "*.java"),
("JavaScript File", "*.js"),
("Python File", "*.py"),
("Swift File", "*.swift"),
("Text File", "*.txt"),
("All Files", "*.*")
))
elif selectedLanguage == prefs.LANGUAGES[1]:
file = filedialog.asksaveasfile(mode="w", defaultextension=".html", filetypes=(
("HTML File", "*.html"),
("C++ File", "*.cpp"),
("Java File", "*.java"),
("JavaScript File", "*.js"),
("Python File", "*.py"),
("Swift File", "*.swift"),
("Text File", "*.txt"),
("All Files", "*.*")
))
elif selectedLanguage == prefs.LANGUAGES[2]:
file = filedialog.asksaveasfile(mode="w", defaultextension=".java", filetypes=(
("Java File", "*.java"),
("C++ File", "*.cpp"),
("HTML File", "*.html"),
("JavaScript File", "*.js"),
("Python File", "*.py"),
("Swift File", "*.swift"),
("Text File", "*.txt"),
("All Files", "*.*")
))
elif selectedLanguage == prefs.LANGUAGES[3]:
file = filedialog.asksaveasfile(mode="w", defaultextension=".js", filetypes=(
("JavaScript File", "*.js"),
("C++ File", "*.cpp"),
("HTML File", "*.html"),
("Java File", "*.java"),
("Python File", "*.py"),
("Swift File", "*.swift"),
("Text File", "*.txt"),
("All Files", "*.*")
))
elif selectedLanguage == prefs.LANGUAGES[4]:
file = filedialog.asksaveasfile(mode="w", defaultextension=".txt", filetypes=(
("Text File", "*.txt"),
("C++ File", "*.cpp"),
("HTML File", "*.html"),
("Java File", "*.java"),
("JavaScript File", "*.js"),
("Python File", "*.py"),
("Swift File", "*.swift"),
("All Files", "*.*")
))
elif selectedLanguage == prefs.LANGUAGES[5]:
file = filedialog.asksaveasfile(mode="w", defaultextension=".py", filetypes=(
("Python File", "*.py"),
("C++ File", "*.cpp"),
("HTML File", "*.html"),
("Java File", "*.java"),
("JavaScript File", "*.js"),
("Swift File", "*.swift"),
("Text File", "*.txt"),
("All Files", "*.*")
))
elif selectedLanguage == prefs.LANGUAGES[6]:
file = filedialog.asksaveasfile(mode="w", defaultextension=".swift", filetypes=(
("Swift File", "*.swift"),
("C++ File", "*.cpp"),
("HTML File", "*.html"),
("Java File", "*.java"),
("JavaScript File", "*.js"),
("Python File", "*.py"),
("Text File", "*.txt"),
("All Files", "*.*")
))
else:
file = filedialog.asksaveasfile(mode="w", defaultextension=".", filetypes=(
("All Files", "*.*"),
("Text File", "*.txt"),
("C++ File", "*.cpp"),
("HTML File", "*.html"),
("Java File", "*.java"),
("JavaScript File", "*.js"),
("Python File", "*.py"),
("Swift File", "*.swift")
))
if file is None:
return
fileContent = textField.get(1.0, "end")
file.write(fileContent)
CURRENT_FILE = file.name
root.title(CURRENT_FILE)
file.close()
def saveFile(event=None):
print("Saving file...")
exists = os.path.isfile(str(CURRENT_FILE))
if exists:
with open(CURRENT_FILE, "w") as file:
file.write(textField.get("1.0", "end"))
else:
saveFileAs()
#######################################################
#endregion
#region Edit Menu Commands
def undo(event=None):
textField.event_generate("<<Undo>>")
def redo(event=None):
textField.event_generate("<<Redo>>")
#######################################################
def copySelected(event=None):
selectedText = textField.selection_get()
root.clipboard_clear()
root.clipboard_append(selectedText)
def cutSelected(event=None):
textField.event_generate("<<Cut>>")
def paste(event=None):
textField.event_generate("<<Paste>>")
return "break"
def selectAll(event=None):
textField.tag_add(SEL, "1.0", END)
textField.mark_set(INSERT, "1.0")
textField.see(INSERT)
return "break"
#######################################################
def find(event=None):
textField.tag_remove("found", "1.0", END)
searchedText = simpledialog.askstring("Find", "Enter the text you want to find:")
if searchedText == "":
done = messagebox.showerror("Find", "Error: You did not enter any text")
if searchedText:
idx = "1.0"
while 1:
idx = textField.search(searchedText, idx, nocase=1, stopindex=END)
if not idx:
break
lastidx = "%s+%dc" % (idx, len(searchedText))
textField.tag_add("found", idx, lastidx)
idx = lastidx
done = messagebox.showinfo("Find", "Highlighting all instances of " + searchedText + ".")
if done:
textField.tag_remove("found", "1.0", END)
def replace(event=None):
searchedText = simpledialog.askstring("Replace", "Enter the text you want to replace:")
if searchedText:
idx = "1.0"
while 1:
idx = textField.search(searchedText, idx, nocase=1, stopindex=END)
if not idx:
break
lastidx = "%s+%dc" % (idx, len(searchedText))
textField.tag_add("replace", idx, lastidx)
idx = lastidx
replaceText = simpledialog.askstring("Replace", "Enter the text you want to replace with:")
if replaceText:
idx = "1.0"
while 1:
idx = textField.search(searchedText, idx, nocase=1, stopindex=END)
if not idx:
break
start = textField.index("replace.first")
end = textField.index("replace.last")
textField.insert(end, replaceText)
textField.delete(start, end)
#endregion
#region Options Menu Commands
def openPreferences():
def applySettings():
newFont = font.get()
fontSaveFile = open("font.sav", "w+")
fontSaveFile.write(newFont)
fontSaveFile.close()
if sys.platform.startswith("darwin"):
textField.configure(font=(newFont, 12))
else:
textField.configure(font=(newFont, 10))
newTheme = theme.get()
themeSaveFile = open("theme.sav", "w+")
themeSaveFile.write(newTheme)
themeSaveFile.close()
themes.setTheme(textField, linenumbers, newTheme)
def applyAndCloseSettings():
applySettings()
pw.destroy()
def cancelSettings():
pw.destroy()
pw = Toplevel()
pw.minsize(width=250, height=226)
pw.title("Preferences")
if sys.platform.startswith("darwin"):
pw.iconbitmap("images/settingsicon.icns")
else:
pw.iconbitmap("images/settingsicon.ico")
pw.wm_attributes("-topmost", 1)
labelColumn = 0
listColumn = 1
fontRow = 0
themeRow = 1
# ***** Font Settings *****
currentFont = open("font.sav", "r").readline()
font = StringVar(pw)
if currentFont in prefs.FONTS:
font.set(currentFont)
else:
font.set(prefs.FONTS[2]) # Default font
fontLabel = Label(pw, text="Font", padx=10)
fontLabel.grid(row=0, column=0)
fontList = OptionMenu(pw, font, *prefs.FONTS)
fontList.config(width=15)
fontList.grid(row=0, column=1)
# ***** Theme Settings *****
currentTheme = open("theme.sav", "r").readline()
theme = StringVar(pw)
if currentTheme in prefs.THEMES:
theme.set(currentTheme) # Default Theme
else:
theme.set(prefs.THEMES[26])
themeLabel = Label(pw, text="Theme", padx=10)
themeLabel.grid(row=1, column=0)
themeList = OptionMenu(pw, theme, *prefs.THEMES)
themeList.config(width=15)
themeList.grid(row=1, column=1)
# ***** Preview *****
PREVEIW_TEXT = Text(pw, width=48, height=11)
previewTextContent = prefs.PREVEIW_TEXT
PREVEIW_TEXT.insert(END, previewTextContent)
PREVEIW_TEXT.config(state="disabled")
PREVEIW_TEXT.grid(row=0, column=3, rowspan=3, columnspan=15, padx=5, pady=5)
# ***** Apply, Ok, and Cancel buttons *****
okButton = Button(pw, text="Ok", command=applyAndCloseSettings)
okButton.grid(row=5, column=13, padx=5, pady=5)
applyButton = Button(pw, text="Apply", command=applySettings)
applyButton.grid(row=5, column=14, padx=5, pady=5)
cancelButton = Button(pw, text="Cancel", command=cancelSettings)
cancelButton.grid(row=5, column=15, padx=5, pady=5)
pw.mainloop()
#endregion
#region Help Menu Commands
def aboutPyText3(event=None):
label = messagebox.showinfo("About PyText3", "Shock9616\nVersion: 1.0\n© 2018 Shock9616 All rights reserved",
icon="info")
def showCredits(event=None):
cw = messagebox.showinfo("PyText3 Credits", prefs.CREDITS_TEXT, icon="info")
#endregion
#endregion
#region UI Setup
if __name__ == "__main__":
print("Running PyText3 on " + sys.platform)
root = tk.Tk()
root.configure()
root.minsize(width=650, height=450)
root.title(CURRENT_FILE)
if sys.platform.startswith("darwin"):
root.iconbitmap("images/icon.icns")
else:
root.iconbitmap("images/icon.ico")
root.protocol("WM_DELETE_WINDOW", closeWindow)
textFont = open("font.sav", "r").readline()
#region Set up basic UI elements
defaultTheme = open("theme.sav", "r").readline()
defaultFont = open("font.sav", "r").readline()
toolBar = Frame(root, bd=1, relief="sunken")
toolBar.pack(side="top", fill="x")
textField = CustomText(root, wrap=NONE, undo=True, border=0)
hsb = tk.Scrollbar(orient="horizontal", command=textField.xview)
vsb = tk.Scrollbar(orient="vertical", command=textField.yview)
textField.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set, font=(textFont, 10))
textField.tag_configure("bigfont", font=("Helvetica", "10", "bold"))
textField.tag_configure("found", background="gray")
textField.tag_configure("replace", background="gray")
if sys.platform.startswith("darwin"):
textField.configure(font=(defaultFont, 12))
else:
textField.configure(font=(defaultFont, 10))
linenumbers = TextLineNumbers(width=30, highlightthickness=1)
linenumbers.attach(textField)
statusBar = Frame(root, bd=1, height=50, relief="sunken")
statusBar.pack(side="bottom", fill="x")
hsb.pack(side="bottom", fill="x")
vsb.pack(side="right", fill="y")
linenumbers.pack(side="left", fill="y")
textField.pack(side="right", fill="both", expand=True)
textField.bind("<<Change>>", onChange)
textField.bind("<Configure>", onChange)
linenumbers.configure(highlightthickness=0)
themes.setTheme(textField, linenumbers, defaultTheme)
cursorPos = str(textField.index(INSERT)).split(".")
#endregion
#region Set up menu bar
menuBar = Menu(root)
root.config(menu=menuBar)
#region Create Menu Bar Sub-Menus
fileMenu = Menu(menuBar, tearoff=False)
menuBar.add_cascade(label="File", menu=fileMenu)
editMenu = Menu(menuBar, tearoff=False)
menuBar.add_cascade(label="Edit", menu=editMenu)
optionsMenu = Menu(menuBar, tearoff=False)
menuBar.add_cascade(label="Options", menu=optionsMenu)
#endregion
#region Fill Sub-Menus for Windows and Linux
if sys.platform.startswith("win32") or sys.platform.startswith("linux"):
# ***** File Menu *****
fileMenu.add_command(label="New File", command=newFile, accelerator="Ctrl+N")
fileMenu.add_command(label="Open", command=openFile, accelerator="Ctrl+O")
fileMenu.add_separator()
fileMenu.add_command(label="Save", command=saveFile, accelerator="Ctrl+S")
fileMenu.add_command(label="Save As", command=saveFileAs, accelerator="Ctrl+^+N")
# ***** Edit Menu *****
editMenu.add_command(label="Undo", command=undo, accelerator="Ctrl+Z")
editMenu.add_command(label="Redo", command=redo, accelerator="Ctrl+^+Z")
editMenu.add_separator()
editMenu.add_command(label="Cut", command=cutSelected, accelerator="Ctrl+X")
editMenu.add_command(label="Copy", command=copySelected, accelerator="Ctrl+C")
editMenu.add_command(label="Paste", command=paste, accelerator="Ctrl+V")
editMenu.add_command(label="Select All", command=selectAll, accelerator="Ctrl+A")
editMenu.add_separator()
editMenu.add_command(label="Find", command=find, accelerator="Ctrl+F")
editMenu.add_command(label="Replace", command=replace, accelerator="Ctrl+H")
# ***** Options Menu *****
optionsMenu.add_command(label="Preferences", command=openPreferences)
#endregion
#region Fill Sub-Menus for Mac OS
elif sys.platform.startswith("darwin"):
# ***** File Menu *****
fileMenu.add_command(label="New File", command=newFile, accelerator="Cmd+N")
fileMenu.add_command(label="Open", command=openFile, accelerator="Cmd+O")
fileMenu.add_separator()
fileMenu.add_command(label="Save", command=saveFile, accelerator="Cmd+S")
fileMenu.add_command(label="Save As", command=saveFileAs, accelerator="Cm^+S")
# ***** Edit Menu *****
editMenu.add_command(label="Undo", command=undo, accelerator="Cmd+Z")
editMenu.add_command(label="Redo", command=redo, accelerator="Cmd+Shift+Z")
editMenu.add_separator()
editMenu.add_command(label="Cut", command=cutSelected, accelerator="Cmd+X")
editMenu.add_command(label="Copy", command=copySelected, accelerator="Cmd+C")
editMenu.add_command(label="Paste", command=paste, accelerator="Cmd+V")
editMenu.add_command(label="Select All", command=selectAll, accelerator="CtCmd")
editMenu.add_separator()
editMenu.add_command(label="Find", command=find, accelerator="Cmd+F")
editMenu.add_command(label="Replace", command=replace, accelerator="Cmd+H")
# ***** Options Menu *****
optionsMenu.add_command(label="Preferences", command=openPreferences, accelerator="Cmd+,")
#endregion
#region Help Menu
helpMenu = Menu(menuBar, tearoff=False)
menuBar.add_cascade(menu=helpMenu, label="Help")
helpMenu.add_command(label="About", command=aboutPyText3)
helpMenu.add_command(label="Credits", command=showCredits)
#endregion
#region Status Bar
lineCount = Label(statusBar, text=("Line " + cursorPos[0]), bd=1)
lineCount.pack(side="left")
columnCount = Label(statusBar, text=("Column " + cursorPos[1]), bd=1)
columnCount.pack(side="left")
language = StringVar(root)
language.set(prefs.LANGUAGES[4])
languageSwitcher = OptionMenu(statusBar, language, *prefs.LANGUAGES)
languageSwitcher.config(indicator=False, compound="none", relief="flat")
languageSwitcher.pack(side="right", expand="no", fill="y")
separator = ttk.Separator(statusBar, orient="vertical")
separator.pack(side="right", fill="y")
#endregion
#endregion
#region Set up key bindings
#region Windows and Linux
if sys.platform.startswith("win32") or sys.platform.startswith("linux"):
textField.bind("<Control-n>", newFile)
textField.bind("<Control-N>", newFile)
textField.bind("<Control-o>", openFile)
textField.bind("<Control-O>", openFile)
textField.bind("<Control-s>", saveFile)
textField.bind("<Control-S>", saveFile)
textField.bind("<Control-Shift-s>", saveFileAs)
textField.bind("<Control-Shift-S>", saveFileAs)
textField.bind("<Control-n>", newFile)
textField.bind("<Control-n>", newFile)
textField.bind("<Control-q>", closeWindow)
textField.bind("<Control-Q>", closeWindow)
textField.bind("<Control-z>", undo)
textField.bind("<Control-Z>", undo)
textField.bind("<Control-Shift-z>", redo)
textField.bind("<Control-Shift-Z>", redo)
textField.bind("<Control-c>", copySelected)
textField.bind("<Control-C>", copySelected)
textField.bind("<Control-v>", paste)
textField.bind("<Control-V>", paste)
textField.bind("<Control-a>", selectAll)
textField.bind("<Control-A>", selectAll)
textField.bind("<Control-f>", find)
textField.bind("<Control-F>", find)
textField.bind("<Control-h>", replace)
textField.bind("<Control-H>", replace)
#endregion
#region Mac OS
elif sys.platform.startswith("darwin"):
textField.bind("<Command-n>", newFile)
textField.bind("<Command-N>", newFile)
textField.bind("<Command-o>", openFile)
textField.bind("<Command-O>", openFile)
textField.bind("<Command-s>", saveFile)
textField.bind("<Command-S>", saveFile)
textField.bind("<Command-Shift-s>", saveFileAs)
textField.bind("<Command-Shift-S>", saveFileAs)
textField.bind("<Command-n>", newFile)
textField.bind("<Command-n>", newFile)
textField.bind("<Command-q>", closeWindow)
textField.bind("<Command-Q>", closeWindow)
textField.bind("<Command-z>", undo)
textField.bind("<Command-Z>", undo)
textField.bind("<Command-Shift-z>", redo)
textField.bind("<Command-Shift-Z>", redo)
textField.bind("<Command-x>", cutSelected)
textField.bind("<Command-X>", cutSelected)
textField.bind("<Command-c>", copySelected)
textField.bind("<Command-C>", copySelected)
textField.bind("<Command-v>", paste)
textField.bind("<Command-V>", paste)
textField.bind("<Command-a>", selectAll)
textField.bind("<Command-A>", selectAll)
textField.bind("<Command-f>", find)
textField.bind("<Command-F>", find)
textField.bind("<Command-h>", replace)
textField.bind("<Command-H>", replace)
#endregion
#endregion
root.mainloop()
#endregion
| 37.207337 | 114 | 0.582365 | 2,506 | 23,329 | 5.378292 | 0.180367 | 0.054014 | 0.031162 | 0.027304 | 0.481451 | 0.433076 | 0.387298 | 0.374833 | 0.290473 | 0.225479 | 0 | 0.011048 | 0.251147 | 23,329 | 626 | 115 | 37.266773 | 0.760332 | 0.071885 | 0 | 0.311301 | 0 | 0 | 0.158422 | 0.002273 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053305 | false | 0.002132 | 0.021322 | 0 | 0.089552 | 0.004264 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c6b575ef99f368943f9457f0e564df734ee7fbc | 569 | py | Python | array/0075_sort_colors/0075_sort_colors.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | 6 | 2019-09-16T01:50:44.000Z | 2020-09-17T08:52:25.000Z | array/0075_sort_colors/0075_sort_colors.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | null | null | null | array/0075_sort_colors/0075_sort_colors.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | 4 | 2020-02-07T12:43:16.000Z | 2021-04-11T06:38:55.000Z | class Solution(object):
def sortColors(self, nums):
def triPartition(nums, target):
i,j,n = 0, 0,len(nums) -1
while j <= n:
if nums[j] < target:
nums[i], nums[j] = nums[j], nums[i]
i += 1
j += 1
elif nums[j] > target:
nums[j], nums[n] = nums[n], nums[j]
n -=1
else:
j +=1
triPartition(nums, 1)
nums = [2,0,2,1,1,0]
Solution().sortColors(nums)
print(nums) | 27.095238 | 55 | 0.391916 | 70 | 569 | 3.185714 | 0.3 | 0.134529 | 0.121076 | 0.134529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046358 | 0.469244 | 569 | 21 | 56 | 27.095238 | 0.692053 | 0 | 0 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.166667 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c6b6b00c0e5fe6ce8b9350c4a38df05692a0c5c | 2,545 | py | Python | transforms/h52h5.py | srujanm/ibex | ed8167b8b1573830bee39c469db7733fdfcb41d1 | [
"MIT"
] | 3 | 2018-08-10T21:11:09.000Z | 2019-07-26T13:47:24.000Z | transforms/h52h5.py | srujanm/ibex | ed8167b8b1573830bee39c469db7733fdfcb41d1 | [
"MIT"
] | null | null | null | transforms/h52h5.py | srujanm/ibex | ed8167b8b1573830bee39c469db7733fdfcb41d1 | [
"MIT"
] | 6 | 2018-03-05T20:14:11.000Z | 2020-07-23T18:39:16.000Z | # general functions for transforming h5 files
from ibex.utilities.constants import *
from numba import jit
import numpy as np
import math
# downsample the data by (z, y, x) ratio
@jit(nopython=True)
def DownsampleData(data, ratio=(1, 2, 2)):
# get the size of the current dataset
(zres, yres, xres) = data.shape
# create an empty array for downsampling
(down_zres, down_yres, down_xres) = (int(zres / ratio[IB_Z]), int(yres / ratio[IB_Y]), int(xres / ratio[IB_X]))
downsampled_data = np.zeros((down_zres, down_yres, down_xres), dtype=data.dtype)
# fill in the entries of the array
for iz in range(down_zres):
for iy in range(down_yres):
for ix in range(down_xres):
downsampled_data[iz,iy,ix] = data[int(iz * ratio[IB_Z]), int(iy * ratio[IB_Y]), int(ix * ratio[IB_X])]
return downsampled_data
@jit(nopython=True)
def MaskAndCropSegmentation(data, labels):
# create a set of valid segments
ids = set()
for label in labels:
ids.add(label)
# get the shape of the data
zres, yres, xres = data.shape
zmin, ymin, xmin = data.shape
zmax, ymax, xmax = (0, 0, 0)
masked_data = np.zeros((zres, yres, xres), dtype=np.int64)
# go through the entire data set
for iz in range(zres):
for iy in range(yres):
for ix in range(xres):
# skip masked out values
if not data[iz,iy,ix] in ids: continue
masked_data[iz,iy,ix] = data[iz,iy,ix]
if iz < zmin: zmin = iz
if iy < ymin: ymin = iy
if ix < xmin: xmin = ix
if iz > zmax: zmax = iz
if iy > ymax: ymax = iy
if ix > xmax: xmax = ix
return masked_data[zmin:zmax,ymin:ymax,xmin:xmax]
# split the data to create training and validation data
@jit(nopython=True)
def SplitData(data, axis, threshold=0.5):
assert (0 <= axis and axis <= 2)
# get the separation index
separation = int(threshold * data.shape[axis])
# split the data into two components
if (axis == 0):
training_data = data[0:separation,:,:]
validation_data = data[separation:,:,:]
elif (axis == 1):
training_data = data[:,0:separation,:]
validation_data = data[:,separation:,:]
else:
training_data = data[:,:,0:separation]
validation_data = data[:,:,separation:]
# return the training and validation data
return training_data, validation_data | 31.036585 | 118 | 0.604715 | 362 | 2,545 | 4.171271 | 0.28453 | 0.027815 | 0.021192 | 0.02649 | 0.25894 | 0.14106 | 0.109272 | 0.109272 | 0.109272 | 0 | 0 | 0.009896 | 0.285265 | 2,545 | 82 | 119 | 31.036585 | 0.820231 | 0.178782 | 0 | 0.061224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020408 | 1 | 0.061224 | false | 0 | 0.081633 | 0 | 0.204082 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c6ed73d803fe3c8ab6eab6f07fcada596456cd6 | 7,933 | py | Python | src/lingcomp/farm/prediction_head.py | CharlottePouw/interpreting-complexity | b9a73c0aff18e4c6b4209a6511d00639494c70da | [
"Apache-2.0"
] | 2 | 2020-12-18T12:26:22.000Z | 2020-12-19T18:47:07.000Z | src/lingcomp/farm/prediction_head.py | CharlottePouw/interpreting-complexity | b9a73c0aff18e4c6b4209a6511d00639494c70da | [
"Apache-2.0"
] | null | null | null | src/lingcomp/farm/prediction_head.py | CharlottePouw/interpreting-complexity | b9a73c0aff18e4c6b4209a6511d00639494c70da | [
"Apache-2.0"
] | 1 | 2021-05-19T13:39:45.000Z | 2021-05-19T13:39:45.000Z | import logging
import os
import torch
from farm.modeling.prediction_head import FeedForwardBlock, PredictionHead
from torch.nn import MSELoss
from torch.nn.functional import pad
from lingcomp.farm.utils import roll
logger = logging.getLogger(__name__)
# TokenRegressionHead
class TokenRegressionHead(PredictionHead):
def __init__(self, layer_dims=[768, 1], task_name="token_regression", spillover=0, mask_cls=True, **kwargs):
"""
:param layer_dims: The size of the layers in the feed forward component. The feed forward will have as many layers as there are ints in this list.
:type layer_dims: list
:param task_name:
:param spillover: If > 0, token values are summed with a kernel of this size before being passed to the feedforward layer.
:param mask_cls: If spillover is specified, defines if the initial token should be masked or not during averaging.
:param kwargs:
"""
super(TokenRegressionHead, self).__init__()
# num_labels could in most cases also be automatically retrieved from the data processor
self.layer_dims = layer_dims
# num_labels is being set to 2 since it is being hijacked to store the scaling factor and the mean
self.num_labels = 2
if spillover > 0:
logger.info(f"Spillover mode with size {spillover}, mask_cls: {mask_cls}")
logger.info(
f"Prediction head initialized with size [{self.layer_dims[0]} * {(spillover + 1)}, {self.layer_dims[1]}]"
)
self.feed_forward = FeedForwardBlock([self.layer_dims[0] * (spillover + 1), self.layer_dims[1]])
else:
logger.info(f"Prediction head initialized with size {self.layer_dims}")
self.feed_forward = FeedForwardBlock(self.layer_dims)
self.loss_fct = MSELoss(reduction="none")
self.ph_output_type = "per_token"
self.model_type = "token_regression"
self.task_name = task_name
self.spillover = spillover
self.mask_cls = mask_cls
self.generate_config()
@classmethod
def load(cls, pretrained_model_name_or_path):
"""
Load a prediction head from a saved FARM or transformers model. `pretrained_model_name_or_path`
can be one of the following:
a) Local path to a FARM prediction head config (e.g. my-bert/prediction_head_0_config.json)
:param pretrained_model_name_or_path: local path of a saved model or name of a publicly available model.
See https://huggingface.co/models for full list
"""
if (
os.path.exists(pretrained_model_name_or_path)
and "config.json" in pretrained_model_name_or_path
and "prediction_head" in pretrained_model_name_or_path
):
# a) FARM style
head = super(TokenRegressionHead, cls).load(pretrained_model_name_or_path)
else:
raise NotImplementedError("Load from Transformers not supported yet.")
return head
def forward(self, X):
if self.spillover > 0:
if self.mask_cls:
# Create mask on [CLS]
cls_mask = torch.ones(X.size()).bool()
cls_mask[:, 0, :] = False
# Apply mask
# [batch, seq_len, hidden] => [batch, seq_len - 1, hidden]
m = X[cls_mask].reshape(X.shape[0], X.shape[1] - 1, X.shape[2])
# Rolling concat of embeddings for spillover tokens
# [batch, seq_len - 1, hidden] => [batch, seq_len - 1, hidden * (spillover + 1)]
out = torch.cat([roll(m, shift, 1, 0) for shift in range(self.spillover, -1, -1)], dim=2)
ret = torch.cat((pad(X[:, 0, :].unsqueeze(1), (0, out.shape[2] - X.shape[2], 0, 0)), out), dim=1)
else:
# Rolling sum of the unmasked sequence
ret = torch.cat([roll(X, shift, 1, 0) for shift in range(self.spillover, -1, -1)], dim=2)
logits = self.feed_forward(ret)
else:
logits = self.feed_forward(X)
return logits
def logits_to_loss(self, logits, initial_mask, padding_mask=None, **kwargs):
label_ids = kwargs.get(self.label_tensor_name)
label_ids = label_ids.float()
# masking on padding and non-initial tokens
active_loss = (padding_mask.view(-1) == 1) & (initial_mask.view(-1) == 1)
active_logits = logits.view(-1)[active_loss]
active_labels = label_ids.view(-1)[active_loss]
loss = self.loss_fct(active_logits, active_labels) # loss is a 1 dimensional (active) token loss
return loss
def logits_to_preds(self, logits, initial_mask, **kwargs):
preds_token = logits.detach().cpu().numpy()
initial_mask = initial_mask.detach().cpu().numpy()
preds_word_all = []
for preds_token_one_sample, initial_mask_one_sample in zip(preds_token, initial_mask):
# Get labels and predictions for just the word initial tokens
preds_word_id = self.initial_token_only(preds_token_one_sample, initial_mask=initial_mask_one_sample)
# Rescaling predictions to actual label distributions
preds_word = [x[0] * self.label_list[1] + self.label_list[0] for x in preds_word_id]
preds_word_all.append(preds_word)
return preds_word_all
def prepare_labels(self, initial_mask, **kwargs):
label_ids = kwargs.get(self.label_tensor_name)
label_ids = label_ids.cpu().numpy()
initial_mask = initial_mask.detach().cpu().numpy()
labels_all = []
for label_ids_one_sample, initial_mask_one_sample in zip(label_ids, initial_mask):
label_ids = self.initial_token_only(label_ids_one_sample, initial_mask=initial_mask_one_sample)
labels = [x * self.label_list[1] + self.label_list[0] for x in label_ids]
labels_all.append(labels)
return labels_all
@staticmethod
def initial_token_only(seq, initial_mask):
ret = []
for init, s in zip(initial_mask, seq):
if init:
ret.append(s)
return ret
def formatted_preds(self, logits, initial_mask, samples, **kwargs):
preds = self.logits_to_preds(logits, initial_mask)
# align back with original input by getting the original word spans
spans = []
for sample, _ in zip(samples, preds):
word_spans = []
span = None
for token, offset, start_of_word in zip(
sample.tokenized["tokens"], sample.tokenized["offsets"], sample.tokenized["start_of_word"],
):
if start_of_word:
# previous word has ended unless it's the very first word
if span is not None:
word_spans.append(span)
span = {"start": offset, "end": offset + len(token)}
else:
# expand the span to include the subword-token
span["end"] = offset + len(token.replace("##", ""))
word_spans.append(span)
spans.append(word_spans)
assert len(preds) == len(spans)
res = {"task": self.task_name, "predictions": []}
for preds_seq, sample, spans_seq in zip(preds, samples, spans):
seq_res = []
for score, span in zip(preds_seq, spans_seq):
context = sample.clear_text["text"][span["start"] : span["end"]]
seq_res.append(
{
"start": span["start"],
"end": span["end"],
"context": f"{context}",
f"{self.task_name}_score": f"{score}",
}
)
res["predictions"].append(seq_res)
return res
| 46.391813 | 154 | 0.607715 | 1,019 | 7,933 | 4.525025 | 0.232581 | 0.047712 | 0.022555 | 0.03188 | 0.235741 | 0.200174 | 0.163522 | 0.147257 | 0.115159 | 0.096075 | 0 | 0.010009 | 0.294718 | 7,933 | 170 | 155 | 46.664706 | 0.81412 | 0.212656 | 0 | 0.107438 | 0 | 0.008264 | 0.076092 | 0.010473 | 0 | 0 | 0 | 0 | 0.008264 | 1 | 0.066116 | false | 0 | 0.057851 | 0 | 0.190083 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c7013fe7de804ea6c1f8cce79d5e2ec488e9041 | 381 | py | Python | 6.00.2x/week1/lecture1/more_on_plotting.py | NicholasAsimov/courses | d60981f25816445578eb9e89bbbeef2d38eaf014 | [
"MIT"
] | null | null | null | 6.00.2x/week1/lecture1/more_on_plotting.py | NicholasAsimov/courses | d60981f25816445578eb9e89bbbeef2d38eaf014 | [
"MIT"
] | null | null | null | 6.00.2x/week1/lecture1/more_on_plotting.py | NicholasAsimov/courses | d60981f25816445578eb9e89bbbeef2d38eaf014 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as pylab
principal = 10000
interestRate = 0.05
years = 20
values = []
for i in range(years + 1):
values.append(principal)
principal += principal * interestRate
pylab.plot(range(years + 1), values, linewidth=3)
pylab.title('5% Growth, Compaunded Annually')
pylab.xlabel('Years or Compounding')
pylab.ylabel('Value of Principal ($)')
pylab.show()
| 22.411765 | 49 | 0.721785 | 51 | 381 | 5.392157 | 0.666667 | 0.072727 | 0.08 | 0.123636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043077 | 0.146982 | 381 | 16 | 50 | 23.8125 | 0.803077 | 0 | 0 | 0 | 0 | 0 | 0.188976 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c7496e73af3dc1a12f6aa6a4517eb8851e79e00 | 972 | py | Python | activitypub/lib.py | wakin-/simple_ap | f13013fdc79207cfb07f3944caeeef45fe31bbf7 | [
"MIT"
] | 10 | 2018-06-18T09:17:59.000Z | 2020-04-22T11:46:12.000Z | activitypub/lib.py | wakin-/simple_ap | f13013fdc79207cfb07f3944caeeef45fe31bbf7 | [
"MIT"
] | 4 | 2020-06-05T18:24:12.000Z | 2021-06-10T20:29:49.000Z | activitypub/lib.py | wakin-/simple_ap | f13013fdc79207cfb07f3944caeeef45fe31bbf7 | [
"MIT"
] | null | null | null | import requests
import json
from urllib.parse import urlparse
from httpsig import HeaderSigner
from datetime import datetime
def sign_headers(account, method, path):
sign = HeaderSigner(account.ap_id(), account.private_key, algorithm='rsa-sha256', headers=['(request-target)', 'date']).sign({'Date': datetime.now().isoformat()}, method=method, path=path)
auth = sign.pop('authorization')
sign['Signature'] = auth[len('Signature '):] if auth.startswith('Signature ') else ''
return sign
def post_accept(account, target, activity):
to = target.inbox
jsn = {
'@context': 'https://www.w3.org/ns/activitystreams',
'type': 'Accept',
'actor': account.ap_id(),
'object': activity,
}
headers = sign_headers(account, 'POST', urlparse(to).path)
response = requests.post(to, json=jsn, headers=headers)
if response.status_code >= 400 and response.status_code < 600:
raise Exception('accept post error')
| 38.88 | 192 | 0.68107 | 119 | 972 | 5.495798 | 0.521008 | 0.033639 | 0.055046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012422 | 0.171811 | 972 | 24 | 193 | 40.5 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0.167695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.227273 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c74d6e02e3c3e03fa94c6d04730c15d838799a4 | 4,230 | py | Python | account/views.py | josephdubon/boilerplate_image_share_app | e079715577ca112e4de234c8a35dde73639c3366 | [
"Unlicense"
] | null | null | null | account/views.py | josephdubon/boilerplate_image_share_app | e079715577ca112e4de234c8a35dde73639c3366 | [
"Unlicense"
] | 3 | 2021-09-22T18:45:21.000Z | 2022-03-12T00:58:12.000Z | account/views.py | josephdubon/boilerplate_image_share_app | e079715577ca112e4de234c8a35dde73639c3366 | [
"Unlicense"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from .forms import (
LoginForm,
UserRegistrationForm,
UserEditForm,
ProfileEditForm
)
from .models import Profile
# Login view
def user_login(request):
if request.method == "POST":
# Instantiate the form with the submitted data with form = LoginForm(request.POST).
form = LoginForm(request.POST)
# Check whether the form is valid with form.is_valid(). If it is not valid, you display
# - the form errors in your template (for example, if the user didn't fill in one of the fields).
if form.is_valid():
cd = form.cleaned_data
# Authenticate the user against the database using the authenticate() method.
user = authenticate(request,
username=cd['username'],
password=cd['password']
)
if user is not None:
# If user is registered and active log user in
if user.is_active:
login(request, user)
return HttpResponse('Authenticated successfully')
else:
# If user account is disabled
return HttpResponse('Disabled account')
else:
# If there user account does not exist
return HttpResponse('Invalid login')
else:
# Return clean form
form = LoginForm()
return render(request, 'account/login.html', {
'form': form
})
# Dashboard view
# Check if current user is authenticated
@login_required
def dashboard(request):
return render(request,
'account/dashboard.html',
{
'section': 'dashboard'
})
# User registration view
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user obj but don't save yet
new_user = user_form.save(commit=False)
# Set the chosen password
# For security reasons, instead of saving the raw password entered by the
# - user, you use the set_password() method of the user model that handles hashing.
new_user.set_password(
user_form.cleaned_data['password'])
# Save the user obj
new_user.save()
# Create the user an empty profile
Profile.objects.create(user=new_user)
return render(request,
'account/register_done.html',
{
'new_user': new_user
})
else:
user_form = UserRegistrationForm()
return render(request,
'account/register.html',
{
'user_form': user_form
})
# Edit user and profile view
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
# Send message to user on success
messages.success(request,
'Profile updated successfully')
else:
# Send message to user on fail
messages.error(request,
'Error updating your profile')
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(
instance=request.user.profile)
return render(request,
'account/edit.html',
{'user_form': user_form,
'profile_form': profile_form})
| 35.25 | 105 | 0.56052 | 438 | 4,230 | 5.321918 | 0.280822 | 0.044616 | 0.028314 | 0.05577 | 0.192192 | 0.103818 | 0.073788 | 0 | 0 | 0 | 0 | 0 | 0.367139 | 4,230 | 119 | 106 | 35.546218 | 0.870751 | 0.215839 | 0 | 0.229885 | 0 | 0 | 0.093475 | 0.020941 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045977 | false | 0.034483 | 0.08046 | 0.011494 | 0.218391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c7775424a57f17a890bdf4b7a7ff0cf9abb892f | 932 | py | Python | admin_tasks.py | draem0507/rietveld | 70bda77edf3a642ef51ecc2d73c165345af5fdee | [
"Apache-2.0"
] | 583 | 2015-03-28T23:49:34.000Z | 2022-03-25T10:58:07.000Z | admin_tasks.py | draem0507/rietveld | 70bda77edf3a642ef51ecc2d73c165345af5fdee | [
"Apache-2.0"
] | 61 | 2015-04-02T01:08:34.000Z | 2021-05-27T16:19:35.000Z | admin_tasks.py | draem0507/rietveld | 70bda77edf3a642ef51ecc2d73c165345af5fdee | [
"Apache-2.0"
] | 175 | 2015-03-29T13:06:36.000Z | 2022-03-31T07:02:20.000Z | """Collection of mapreduce jobs."""
import logging
from mapreduce import operation as op
from codereview.models import Account, Issue
def delete_unused_accounts(account):
"""Delete accounts for uses that don't participate in any reviews."""
email = account.user.email()
if Issue.query(Issue.owner_email == email).get():
return
if Issue.query(Issue.cc == email).get():
return
if Issue.query(Issue.reviewers == email).get():
return
logging.warn('Deleting %s' % email)
yield op.db.Delete(account)
def update_account_schema(account):
"""Update schema for all Accounts by saving them back to the datastore."""
# Make sure we don't alter the modified time of any accounts. Because of how
# mapreduce is designed, we just set this to False on every function
# invocation (since there's no convenient once-per-instance place to do it).
Account.modified.auto_now = False
yield op.db.Put(account)
| 31.066667 | 78 | 0.730687 | 140 | 932 | 4.821429 | 0.585714 | 0.031111 | 0.053333 | 0.075556 | 0.091852 | 0.091852 | 0.091852 | 0 | 0 | 0 | 0 | 0 | 0.167382 | 932 | 29 | 79 | 32.137931 | 0.869845 | 0.407725 | 0 | 0.1875 | 0 | 0 | 0.020599 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.1875 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c778b23ac35cdce2fabe050d19757ad54f60c5c | 3,426 | py | Python | core/pwc_tiny.py | hologerry/RAFT | a80209c442ea2e2a8860af3c9ca96e62498533ca | [
"BSD-3-Clause"
] | null | null | null | core/pwc_tiny.py | hologerry/RAFT | a80209c442ea2e2a8860af3c9ca96e62498533ca | [
"BSD-3-Clause"
] | null | null | null | core/pwc_tiny.py | hologerry/RAFT | a80209c442ea2e2a8860af3c9ca96e62498533ca | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.corr import AlternateCorrBlock, CorrBlock
from core.extractor import BasicEncoder, SmallEncoder
from core.mobilenetv3 import MobileNetV3
from core.pwc_decoder import Decoder
from core.pwc_refiner import Refiner
from core.update import BasicUpdateBlock, SmallUpdateBlock, TinyUpdateBlock
from core.utils.utils import bilinear_sampler, coords_grid, upflow8, upflow4
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class PWCTiny(nn.Module):
def __init__(self, args):
super(PWCTiny, self).__init__()
self.args = args
self.extractor = MobileNetV3('mobilenet_v3_small', last_stage=5, norm_type='instance')
self.decoder_2 = Decoder(level=2)
self.decoder_3 = Decoder(level=3)
self.decoder_4 = Decoder(level=4)
self.decoder_5 = Decoder(level=5)
self.refiner = Refiner()
def is_training(self):
return self.args.mode == 'train'
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
""" Estimate optical flow between pair of frames """
# image1, image2 = torch.chunk(x, 2, dim=1)
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
image1 = image1.contiguous()
image2 = image2.contiguous()
# run the feature network
feats1 = self.extractor(image1)
feats2 = self.extractor(image2)
estimate_out = self.decoder_5(feats1['C5'], feats2['C5'], None)
flow_5 = estimate_out['flow']
estimate_out = self.decoder_4(feats1['C4'], feats2['C4'], estimate_out)
flow_4 = estimate_out['flow']
estimate_out = self.decoder_3(feats1['C3'], feats2['C3'], estimate_out)
flow_3 = estimate_out['flow']
estimate_out = self.decoder_2(feats1['C2'], feats2['C2'], estimate_out)
flow_2 = estimate_out['flow'] + self.refiner(estimate_out['feat'])
flow_out = upflow4(flow_2)
if not test_mode:
return flow_out, flow_2, flow_3, flow_4, flow_5
else:
return flow_out * 20.0, flow_out * 20.0
if __name__ == '__main__':
import argparse
from ptflops import get_model_complexity_info
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='pwc_tiny', help="name your experiment")
parser.add_argument('--mode', default='train', help="")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
args = parser.parse_args()
net = PWCTiny(args).cuda()
with torch.cuda.device(0):
macs, params = get_model_complexity_info(net, (6, 160, 96), as_strings=True, print_per_layer_stat=True, verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
data = torch.randn((2, 6, 224, 224)).cuda()
out = net(data)
| 34.606061 | 125 | 0.646235 | 447 | 3,426 | 4.744966 | 0.364653 | 0.062235 | 0.049505 | 0.04149 | 0.073079 | 0.052334 | 0.052334 | 0 | 0 | 0 | 0 | 0.041272 | 0.22913 | 3,426 | 98 | 126 | 34.959184 | 0.761833 | 0.042323 | 0 | 0.041096 | 0 | 0 | 0.08318 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09589 | false | 0.041096 | 0.178082 | 0.013699 | 0.342466 | 0.041096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c784ec9ca5bd9cab47e3e48eed02b1261eb6710 | 1,631 | py | Python | dependencies_analysis/src/utils.py | afdaniele/director | 845ba027f9009803fcf77f44874f2ab9d7ab72e3 | [
"BSD-3-Clause"
] | null | null | null | dependencies_analysis/src/utils.py | afdaniele/director | 845ba027f9009803fcf77f44874f2ab9d7ab72e3 | [
"BSD-3-Clause"
] | null | null | null | dependencies_analysis/src/utils.py | afdaniele/director | 845ba027f9009803fcf77f44874f2ab9d7ab72e3 | [
"BSD-3-Clause"
] | null | null | null | '''
Created by:
@author: Andrea F Daniele - TTIC - Toyota Technological Institute at Chicago
Feb 6, 2019 - Mountain View, CA
'''
import sys
import numpy as np
from os.path import isfile
class ProgressBar(object):
def __init__(self, maxVal=100, precision=5, doneMessage=True ):
self.maxVal = float( max(1.0, maxVal) )
self.doneMessage = doneMessage
self.precision = precision
self.currentLength = -1
self.currentVal = 0.0
self.barParts = [ '[0%' ]
for i in range(10,101,10): self.barParts.extend( ['.'] * self.precision + ['%d%%' % i] )
self.barParts[-1] += ']'
if doneMessage: self.barParts[-1] += ' Done!'
self.barLength = len(self.barParts)
self.step = float(self.barLength-1) / self.maxVal
def next(self):
newLength = int(np.floor( (self.currentVal + 1.0) * self.step ))
if newLength > self.currentLength and newLength <= self.barLength:
for i in range(self.currentLength+1, newLength+1):
sys.stdout.write(self.barParts[i]); sys.stdout.flush()
if newLength == self.barLength-1: print
self.currentLength = newLength
self.currentVal += 1
def setMessage(self, message):
self.barParts[-1] = '100%%] :: %s\n' % message
class FileReader(object):
def __init__(self, file_path):
if not isfile(file_path):
raise ValueError("The file '%s' does not exist" % file_path)
self._file_path = file_path
# open file
self._lines = None
def lines(self):
# read lines
if not self._lines:
with open(self._file_path, "r") as fo:
self._lines = fo.readlines()
# return lines
return self._lines
| 28.614035 | 92 | 0.650521 | 221 | 1,631 | 4.710407 | 0.402715 | 0.080692 | 0.037464 | 0.032661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027259 | 0.212753 | 1,631 | 56 | 93 | 29.125 | 0.783489 | 0.095647 | 0 | 0 | 0 | 0 | 0.039617 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.081081 | 0 | 0.297297 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c78b387f4bdb0a5aec5ed308c83c4da0844e7b4 | 1,673 | py | Python | attendees/persons/serializers/folk_serializer.py | xjlin0/-attendees30 | 48a2f2cbec11ec471d7a40d24903b48890feebf9 | [
"MIT"
] | null | null | null | attendees/persons/serializers/folk_serializer.py | xjlin0/-attendees30 | 48a2f2cbec11ec471d7a40d24903b48890feebf9 | [
"MIT"
] | null | null | null | attendees/persons/serializers/folk_serializer.py | xjlin0/-attendees30 | 48a2f2cbec11ec471d7a40d24903b48890feebf9 | [
"MIT"
] | null | null | null | from attendees.persons.models import Folk, FolkAttendee, Relation, Utility, Attendee
from attendees.whereabouts.serializers import PlaceSerializer
from rest_framework import serializers
class FolkSerializer(serializers.ModelSerializer):
places = PlaceSerializer(many=True, read_only=True)
class Meta:
model = Folk
fields = '__all__'
def create(self, validated_data):
"""
Create or update `Family` instance, given the validated data.
"""
raw_data = self._kwargs.get('data', {})
family_id = raw_data.get('id')
folk, folk_created = Folk.objects.update_or_create(
id=family_id,
defaults=validated_data,
)
if folk_created:
for attendee_id in raw_data.get('attendees', []):
unspecified_role = Relation.objects.filter(title='unspecified').first
attendee = Attendee.objects.get(pk=attendee_id)
FolkAttendee.objects.update_or_create(
attendee=attendee,
folk=folk,
defaults={
'attendee': attendee,
'folk': folk,
'role': unspecified_role,
'start': Utility.now_with_timezone()
},
)
return folk
def update(self, instance, validated_data):
"""
Update and return an existing `Family` instance, given the validated data.
"""
obj, created = Folk.objects.update_or_create(
id=instance.id,
defaults=validated_data,
)
return obj
| 30.418182 | 85 | 0.569038 | 160 | 1,673 | 5.7625 | 0.39375 | 0.084599 | 0.048807 | 0.06833 | 0.149675 | 0.149675 | 0.073753 | 0 | 0 | 0 | 0 | 0 | 0.344889 | 1,673 | 54 | 86 | 30.981481 | 0.841241 | 0.081291 | 0 | 0.055556 | 0 | 0 | 0.036266 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.083333 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c7a53fa7046ae0ca4fe288507356e009d06bfd4 | 6,826 | py | Python | V4 - HybridTTS/Utils/functions.py | riju-stone/chatbot | cfbd1a162f777440b138e2824d07f70cca8c4a48 | [
"MIT"
] | null | null | null | V4 - HybridTTS/Utils/functions.py | riju-stone/chatbot | cfbd1a162f777440b138e2824d07f70cca8c4a48 | [
"MIT"
] | 1 | 2021-10-06T15:56:14.000Z | 2021-10-07T04:54:15.000Z | V4 - HybridTTS/Utils/functions.py | riju-stone/chatbot | cfbd1a162f777440b138e2824d07f70cca8c4a48 | [
"MIT"
] | null | null | null | import string
import random
from nltk.tokenize import TweetTokenizer
import re
import time
import sys
import numpy as np
# source: https://gist.github.com/nealrs/96342d8231b75cf4bb82
cList = {
"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how're": "how are",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"i'd": "I would",
"i'd've": "I would have",
"i'll": "I will",
"i'll've": "I will have",
"i'm": "I am",
"i've": "i have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there had",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we had",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'alls": "you alls",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"
}
c_re = re.compile('(%s)' % '|'.join(cList.keys()))
def expandContractions(text):
global cList
global c_re
def replace(match):
return cList[match.group(0)]
return c_re.sub(replace, text)
def replace(text, regex, replacement):
def replace_fn(match):
return replacement
return regex.sub(replace_fn, text)
def clean(text):
filter_list_1 = ['’']
replacement_1 = "'"
regex_1 = re.compile('(%s)' % '|'.join(filter_list_1))
text = replace(text, regex_1, replacement_1)
filter_list_2 = ['\[wp\]', 'eli5\:', 'cmv\:',
'\[d\]', '\[r\]', '\[n\]', '\>\;', '/r/', 'r/']
replacement_2 = ''
regex_2 = re.compile('(%s)' % '|'.join(filter_list_2))
text = replace(text, regex_2, replacement_2)
filter_list_3 = ['@[a-z0-9]+', '\/u\/[0-9a-z]+',
'\[[0-9a-z]+\]\(\/u\/[0-9a-z]+\)']
replacement_3 = 'someone'
regex_3 = re.compile('(%s)' % '|'.join(filter_list_3))
text = replace(text, regex_3, replacement_3)
filter_list_4 = ['\[', '\]']
replacement_4 = ' '
regex_4 = re.compile('(%s)' % '|'.join(filter_list_4))
text = replace(text, regex_4, replacement_4)
return text
def simple_preprocess(text, return_tokenized=False, for_speech=False):
tw = TweetTokenizer()
text = text.lower()
if not for_speech:
text = clean(text)
else:
text = text.replace("*", "")
text = expandContractions(text)
# sometimes two iterations are needed for double contractions
text = expandContractions(text)
if for_speech:
text = re.sub(
r"[' '\(]*https\:[^ ]*|[' '\(]*http\:[^ ]*|[' ']*www\..[^ ]*", ' ', text)
else:
text = re.sub(
r"[' '\(]*https\:[^ ]*|[' '\(]*http\:[^ ]*|[' ']*www\..[^ ]*", ' (url) ', text)
if return_tokenized:
tokenized_text = tw.tokenize(text)
return tokenized_text
else:
return text
# "but hey aren ’ t snobby . the wayne ’ s are well known for their philanthropy ."
#print(simple_preprocess("[Zaflis000](/u/Zaflis000) @fodor000, > [can] http://plato.stanford.edu/entries/other-minds/ you're she'll do this for me? http://plato.stanford.edu/entries/other-minds/"))
# Adapted from: https://stackoverflow.com/questions/9246076/how-to-print-one-character-at-a-time-on-one-line
def delay_print(s, t=0.05):
for c in s:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(t)
def cosine_similarity_nd(embd1, embd2):
numerator = np.multiply(embd1, embd2)
numerator = np.sum(numerator, axis=1)
eucli_norm_1 = np.sqrt(np.sum(np.power(embd1, 2), axis=1))
eucli_norm_2 = np.sqrt(np.sum(np.power(embd2, 2), axis=1))
denominator = np.multiply(eucli_norm_1, eucli_norm_2)
denominator = denominator + 1e-10 # remove zeros
cosine_similarity = np.divide(numerator, denominator)
return cosine_similarity.reshape((-1))
def normalize(values):
# shift and normalize - create probability distribution
minimum_val = np.amin(values)
values = values - minimum_val
norm_denom = np.sum(values)
if norm_denom == 0:
size = values.shape[-1]
return np.asarray([1/size for _ in range(size)], np.float32)
else:
return values/norm_denom
| 28.206612 | 200 | 0.547612 | 1,005 | 6,826 | 3.654726 | 0.228856 | 0.008984 | 0.013613 | 0.019058 | 0.068064 | 0.068064 | 0.032126 | 0 | 0 | 0 | 0 | 0.017456 | 0.236302 | 6,826 | 241 | 201 | 28.323651 | 0.687128 | 0.08409 | 0 | 0.049505 | 0 | 0 | 0.347534 | 0.004965 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044554 | false | 0 | 0.034653 | 0.009901 | 0.128713 | 0.004951 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c7b3b7be42370503ba918b7f804028312f44fe9 | 896 | py | Python | reverse_integer.py | alexhla/programming-problems-in-python | 2db759f6196c026f43c6f2d6c9104d04a6850829 | [
"MIT"
] | null | null | null | reverse_integer.py | alexhla/programming-problems-in-python | 2db759f6196c026f43c6f2d6c9104d04a6850829 | [
"MIT"
] | null | null | null | reverse_integer.py | alexhla/programming-problems-in-python | 2db759f6196c026f43c6f2d6c9104d04a6850829 | [
"MIT"
] | null | null | null | class Solution:
def reverse(self, x):
sign = -1 if x<0 else 1
x = abs(x) # floor division ALWAYS rounds down (-1//10 = -1)
n = 0 # thus save sign and take absolute value
while x != 0:
n *= 10 # left shift digits one place
n += x%10 # push least significant digit
x //= 10 # pop processed digit
if n > (2**31)-1: # check for overflow
return 0
else:
return sign * n # recombine with sign and return
def reverse2(self, x):
sign = -1 if x<0 else 1
s = str(abs(x)) # convert absolute value of int to string to avoid dash
n = sign*int(s[::-1]) # reverse string and convert back to int adding sign last
if (n > (2**31)-1) or (n < -2**31): # check for overflow
return 0
else:
return n
obj = Solution()
num = 123456789
print("Reversing {}" .format(num))
print("Answer-1: {}" .format(obj.reverse(num)))
print("Answer-2: {}" .format(obj.reverse2(num))) | 28.903226 | 81 | 0.626116 | 153 | 896 | 3.666667 | 0.431373 | 0.035651 | 0.02139 | 0.035651 | 0.210339 | 0.185383 | 0.185383 | 0.067736 | 0.067736 | 0 | 0 | 0.065407 | 0.232143 | 896 | 31 | 82 | 28.903226 | 0.75 | 0.382813 | 0 | 0.230769 | 0 | 0 | 0.066298 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.269231 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c827b375cd33ffc35313851f36e3892e6b7ddad | 17,537 | py | Python | revisiting_rainbow/Agents/quantile_agent_new.py | jiawei415/revisiting_rainbow | 7cd2bc6f64d08ebc2233d93210063cc64d2598a7 | [
"Apache-2.0"
] | 72 | 2020-11-24T22:12:59.000Z | 2022-03-21T21:18:21.000Z | revisiting_rainbow/Agents/quantile_agent_new.py | jiawei415/revisiting_rainbow | 7cd2bc6f64d08ebc2233d93210063cc64d2598a7 | [
"Apache-2.0"
] | 2 | 2021-06-02T08:01:10.000Z | 2021-07-03T03:11:54.000Z | revisiting_rainbow/Agents/quantile_agent_new.py | jiawei415/revisiting_rainbow | 7cd2bc6f64d08ebc2233d93210063cc64d2598a7 | [
"Apache-2.0"
] | 6 | 2021-01-13T22:15:17.000Z | 2021-11-04T04:00:05.000Z | """An extension of Rainbow to perform quantile regression.
This loss is computed as in "Distributional Reinforcement Learning with Quantile
Regression" - Dabney et. al, 2017"
Specifically, we implement the following components:
* n-step updates
* prioritized replay
* double_dqn
* noisy
* dueling
"""
import copy
import time
import functools
from dopamine.jax import networks
from dopamine.jax.agents.dqn import dqn_agent
from dopamine.replay_memory import prioritized_replay_buffer #check
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import tensorflow as tf
@functools.partial(jax.vmap, in_axes=(None, None, 0, 0, 0, None))
def target_distributionDouble(model,target_network, next_states, rewards, terminals,
cumulative_gamma):
"""Builds the Quantile target distribution as per Dabney et al. (2017).
Args:
target_network: Jax Module used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
cumulative_gamma: float, cumulative gamma to use (static_argnum).
Returns:
The target distribution from the replay.
"""
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
next_state_target_outputs = model(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
next_dist = target_network(next_states)
logits = jnp.squeeze(next_dist.logits)
next_logits = logits[next_qt_argmax]
return jax.lax.stop_gradient(rewards + gamma_with_terminal * next_logits)
@functools.partial(jax.vmap, in_axes=(None, 0, 0, 0, None))
def target_distribution(target_network, next_states, rewards, terminals,
cumulative_gamma):
"""Builds the Quantile target distribution as per Dabney et al. (2017).
Args:
target_network: Jax Module used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
cumulative_gamma: float, cumulative gamma to use (static_argnum).
Returns:
The target distribution from the replay.
"""
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
next_state_target_outputs = target_network(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
logits = jnp.squeeze(next_state_target_outputs.logits)
next_logits = logits[next_qt_argmax]
return jax.lax.stop_gradient(rewards + gamma_with_terminal * next_logits)
@functools.partial(jax.jit, static_argnums=(0, 9, 10, 11, 12))
def train(network_def, target_params, optimizer, states, actions, next_states, rewards,
terminals, loss_weights, kappa, num_atoms, cumulative_gamma, double_dqn, rng):
"""Run a training step."""
online_params = optimizer.target
def loss_fn(params,rng_input, target, loss_multipliers):
def q_online(state):
return network_def.apply(params, state, rng=rng_input)
logits = jax.vmap(q_online)(states).logits
logits = jnp.squeeze(logits)
# Fetch the logits for its selected action. We use vmap to perform this
# indexing across the batch.
chosen_action_logits = jax.vmap(lambda x, y: x[y])(logits, actions)
bellman_errors = (target[:, None, :] -
chosen_action_logits[:, :, None]) # Input `u' of Eq. 9.
# Eq. 9 of paper.
huber_loss = (
(jnp.abs(bellman_errors) <= kappa).astype(jnp.float32) *
0.5 * bellman_errors ** 2 +
(jnp.abs(bellman_errors) > kappa).astype(jnp.float32) *
kappa * (jnp.abs(bellman_errors) - 0.5 * kappa))
tau_hat = ((jnp.arange(num_atoms, dtype=jnp.float32) + 0.5) /
num_atoms) # Quantile midpoints. See Lemma 2 of paper.
# Eq. 10 of paper.
tau_bellman_diff = jnp.abs(
tau_hat[None, :, None] - (bellman_errors < 0).astype(jnp.float32))
quantile_huber_loss = tau_bellman_diff * huber_loss
# Sum over tau dimension, average over target value dimension.
loss = jnp.sum(jnp.mean(quantile_huber_loss, 2), 1)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
rng, rng2, rng3, rng4 = jax.random.split(rng, 4)
def q_target(state):
return network_def.apply(target_params, state, rng=rng2)
def q_target_online(state):
return network_def.apply(online_params, state, rng=rng4)
if double_dqn:
target = target_distributionDouble(q_target_online, q_target, next_states, rewards, terminals, cumulative_gamma)
else:
target = target_distribution(q_target, next_states, rewards, terminals, cumulative_gamma)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(mean_loss, loss), grad = grad_fn(online_params, rng3, target, loss_weights)
optimizer = optimizer.apply_gradient(grad)
return optimizer, loss, mean_loss
@functools.partial(jax.jit, static_argnums=(0, 4, 5, 6, 7, 8, 10, 11))
def select_action(network_def, params, state, rng, num_actions, eval_mode,
epsilon_eval, epsilon_train, epsilon_decay_period,
training_steps, min_replay_history, epsilon_fn):
epsilon = jnp.where(eval_mode,
epsilon_eval,
epsilon_fn(epsilon_decay_period,
training_steps,
min_replay_history,
epsilon_train))
rng, rng1, rng2, rng3 = jax.random.split(rng, num=4)
selected_action = jnp.argmax(network_def.apply(params, state, rng=rng3).q_values)
p = jax.random.uniform(rng1)
return rng, jnp.where(p <= epsilon,
jax.random.randint(rng2, (), 0, num_actions),
selected_action)
@gin.configurable
class JaxQuantileAgentNew(dqn_agent.JaxDQNAgent):
"""An implementation of Quantile regression DQN agent."""
def __init__(self,
num_actions,
kappa=1.0,
num_atoms=200,
noisy = False,
dueling = False,
initzer = 'variance_scaling',
net_conf = None,
env = "CartPole",
normalize_obs = True,
hidden_layer=2,
neurons=512,
double_dqn=False,
replay_scheme='prioritized',
optimizer='adam',
network=networks.QuantileNetwork,
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
seed=None):
"""Initializes the agent and constructs the Graph.
Args:
num_actions: Int, number of actions the agent can take at any state.
observation_shape: tuple of ints or an int. If single int, the observation
is assumed to be a 2D square.
observation_dtype: DType, specifies the type of the observations. Note
that if your inputs are continuous, you should set this to jnp.float32.
stack_size: int, number of frames to use in state stack.
network: tf.Keras.Model, expects 3 parameters: num_actions, num_atoms,
network_type. A call to this object will return an instantiation of the
network provided. The network returned can be run with different inputs
to create different outputs. See
dopamine.discrete_domains.jax.networks.QuantileNetwork as an example.
kappa: Float, Huber loss cutoff.
num_atoms: Int, the number of buckets for the value function distribution.
gamma: Float, exponential decay factor as commonly used in the RL
literature.
update_horizon: Int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: Int, number of stored transitions for training to
start.
update_period: Int, period between DQN updates.
target_update_period: Int, ppdate period for the target network.
epsilon_fn: Function expecting 4 parameters: (decay_period, step,
warmup_steps, epsilon), and which returns the epsilon value used for
exploration during training.
epsilon_train: Float, final epsilon for training.
epsilon_eval: Float, epsilon during evaluation.
epsilon_decay_period: Int, number of steps for epsilon to decay.
replay_scheme: String, replay memory scheme to be used. Choices are:
uniform - Standard (DQN) replay buffer (Mnih et al., 2015)
prioritized - Prioritized replay buffer (Schaul et al., 2015)
optimizer: str, name of optimizer to use.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
summary_writing_frequency: int, frequency with which summaries will be
written. Lower values will result in slower training.
allow_partial_reload: bool, whether we allow reloading a partial agent
(for instance, only the network parameters).
"""
seed = int(time.time() * 1e6) if seed is None else seed
self._num_atoms = num_atoms
self._kappa = kappa
self._replay_scheme = replay_scheme
self._double_dqn = double_dqn
self._net_conf = net_conf
self._env = env
self._normalize_obs = normalize_obs
self._hidden_layer= hidden_layer
self._neurons=neurons
self._noisy = noisy
self._dueling = dueling
self._initzer = initzer
self._rng = jax.random.PRNGKey(seed)
super(JaxQuantileAgentNew, self).__init__(
num_actions=num_actions,
optimizer=optimizer,
epsilon_fn = dqn_agent.identity_epsilon if self._noisy == True else epsilon_fn,
network=functools.partial(network, num_atoms=self._num_atoms , net_conf=self._net_conf,
env=self._env,
normalize_obs=self._normalize_obs,
hidden_layer=self._hidden_layer,
neurons=self._neurons,
noisy=self._noisy,
dueling=self._dueling,
initzer=self._initzer))
def _build_networks_and_optimizer(self):
self._rng, rng = jax.random.split(self._rng)
online_network_params = self.network_def.init(rng, x=self.state, rng=self._rng)
optimizer_def = dqn_agent.create_optimizer(self._optimizer_name)
self.optimizer = optimizer_def.create(online_network_params)
self.target_network_params = copy.deepcopy(online_network_params)
def _build_replay_buffer(self):
"""Creates the replay buffer used by the agent."""
if self._replay_scheme not in ['uniform', 'prioritized']:
raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))
# Both replay schemes use the same data structure, but the 'uniform' scheme
# sets all priorities to the same value (which yields uniform sampling).
return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype)
def begin_episode(self, observation):
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn)
self.action = onp.asarray(self.action)
return self.action
def step(self, reward, observation):
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn)
self.action = onp.asarray(self.action)
return self.action
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of
# 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)
# suggested a fixed exponent actually performs better, except on Pong.
probs = self.replay_elements['sampling_probabilities']
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer, loss, mean_loss = train(
self.network_def,
self.target_network_params,
self.optimizer,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._kappa,
self._num_atoms,
self.cumulative_gamma,
self._double_dqn,
self._rng)
if self._replay_scheme == 'prioritized':
# Rainbow and prioritized replay are parametrized by an exponent
# alpha, but in both cases it is set to 0.5 - for simplicity's sake we
# leave it as is here, using the more direct sqrt(). Taking the square
# root "makes sense", as we are dealing with a squared loss. Add a
# small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will
# cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if self.summary_writer is not None:
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='QuantileLoss',
simple_value=mean_loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1
def _store_transition(self,
last_observation,
action,
reward,
is_terminal,
priority=None):
"""Stores a transition when in training mode.
Stores the following tuple in the replay buffer (last_observation, action,
reward, is_terminal, priority).
Args:
last_observation: Last observation, type determined via observation_type
parameter in the replay_memory constructor.
action: An integer, the action taken.
reward: A float, the reward.
is_terminal: Boolean indicating if the current state is a terminal state.
priority: Float. Priority of sampling the transition. If None, the default
priority will be used. If replay scheme is uniform, the default priority
is 1. If the replay scheme is prioritized, the default priority is the
maximum ever seen [Schaul et al., 2015].
"""
if priority is None:
if self._replay_scheme == 'uniform':
priority = 1.
else:
priority = self._replay.sum_tree.max_recorded_priority
if not self.eval_mode:
self._replay.add(last_observation, action, reward, is_terminal, priority) | 43.301235 | 117 | 0.642584 | 2,144 | 17,537 | 5.046642 | 0.210354 | 0.016636 | 0.013309 | 0.012754 | 0.287246 | 0.26146 | 0.241867 | 0.216821 | 0.200555 | 0.190573 | 0 | 0.011204 | 0.282374 | 17,537 | 405 | 118 | 43.301235 | 0.84855 | 0.32383 | 0 | 0.227848 | 0 | 0 | 0.016672 | 0.00191 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063291 | false | 0 | 0.046414 | 0.012658 | 0.160338 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c846fbf1fb31138dd5ef08bbb8178074cf8b762 | 4,395 | py | Python | utils/visualize_utils.py | ljrprocc/Motif-Removal | 8979ca91398212248a2be61345c99bdec53ae37e | [
"MIT"
] | null | null | null | utils/visualize_utils.py | ljrprocc/Motif-Removal | 8979ca91398212248a2be61345c99bdec53ae37e | [
"MIT"
] | null | null | null | utils/visualize_utils.py | ljrprocc/Motif-Removal | 8979ca91398212248a2be61345c99bdec53ae37e | [
"MIT"
] | null | null | null | import torch
import os.path
from utils.train_utils import load_globals, init_folders, init_nets
from loaders.motif_dataset import MotifDS
from PIL import Image
import numpy as np
# network names
root_path = '..'
train_tag = 'vm_demo_text_remover'
load_tag = ''
device = torch.device('cuda:0')
net_path = '%s/checkpoints/%s' % (root_path, train_tag)
resources_root = 'test images folder'
target_root = '%s/data/tmp' % root_path
def load_image(image_path, _device, include_tensor=False):
numpy_image = None
tensor_image = None
if os.path.isfile(image_path):
to_save = False
row_image = Image.open(image_path)
w, h = row_image.size
if h > 512:
to_save = True
h = int((512. * h) / w)
row_image = row_image.resize((512, h), Image.BICUBIC)
w, h = row_image.size
if w % 16 != 0 or h % 16 != 0:
to_save = True
row_image = row_image.crop((0, 0, (w // 16) * 16, (h // 16) * 16))
if to_save:
row_image.save(image_path)
numpy_image = np.array(row_image)
if len(numpy_image.shape) != 3:
numpy_image = np.repeat(np.expand_dims(numpy_image, 2), 3, axis=2)
if numpy_image.shape[2] != 3:
numpy_image = numpy_image[:, :, :3]
if include_tensor:
tensor_image = MotifDS.trans(MotifDS.flip(numpy_image)[0])[0]
tensor_image = torch.unsqueeze(torch.from_numpy(tensor_image), 0).to(_device)
numpy_image = np.expand_dims(numpy_image / 255, 0)
return numpy_image, tensor_image
def transform_to_numpy_image(tensor_image):
image = tensor_image.cpu().detach().numpy()
image = np.transpose(image, (0, 2, 3, 1))
if image.shape[3] != 3:
image = np.repeat(image, 3, axis=3)
else:
image = (image / 2 + 0.5)
return image
def collect_synthesized(_source):
paths = []
for root, _, files in os.walk(_source):
for file in files:
file_name, file_extension = os.path.splitext(file)
if (file_extension == '.png' or file_extension == '.jpg' or file_extension == '.jpeg') and \
('real' not in file_name and 'reconstructed' not in file_name and 'grid' not in file_name):
paths.append(os.path.join(root, file))
return paths
def save_numpy_image(images, suffix, _target_root, _resources_root, prefix='', start_count=0):
images = (images * 255).astype(np.uint8) # unnormalize
for image_index in range(images.shape[0]):
if prefix == '':
image_path = '%s/%d_%s.png' % (_resources_root, image_index + start_count, suffix)
else:
image_path = '%s/%s_%s.png' % (_target_root, prefix, suffix)
image = Image.fromarray(images[image_index])
image.save(image_path)
def run_net(opt, _device, _net_path, _source, _target, _train_tag, _tag=''):
net = init_nets(opt, _net_path, _device, _tag).eval()
synthesized_paths = collect_synthesized(_source)
image_suffixes = ['reconstructed_image', 'reconstructed_motif']
for path in synthesized_paths:
prefix, _ = os.path.splitext(os.path.split(path)[-1])
prefix = prefix.split('_')[0]
sy_np, sy_ts = load_image(path, _device, True)
results = list(net(sy_ts))
for idx, result in enumerate(results):
results[idx] = transform_to_numpy_image(result)
reconstructed_mask = results[1]
reconstructed_motif = None
if len(results) == 3:
reconstructed_raw_motif = results[2]
reconstructed_motif = (reconstructed_raw_motif - 1) * reconstructed_mask + 1
reconstructed_image = reconstructed_mask * results[0] + (1 - reconstructed_mask) * sy_np
for idx, image in enumerate([reconstructed_image, reconstructed_motif]):
if image is not None and idx < len(image_suffixes):
save_numpy_image(image, '%s_%s' % (image_suffixes[idx], _train_tag), _target, _source,
prefix=prefix)
print('done')
if __name__ == '__main__':
_opt = load_globals(net_path, {}, override=False)
init_folders(target_root)
run_net(_opt, device, net_path, resources_root, target_root, train_tag, load_tag)
| 40.321101 | 111 | 0.61661 | 581 | 4,395 | 4.371773 | 0.2358 | 0.066929 | 0.018898 | 0.015354 | 0.059843 | 0.029921 | 0 | 0 | 0 | 0 | 0 | 0.020516 | 0.268032 | 4,395 | 108 | 112 | 40.694444 | 0.769039 | 0.005688 | 0 | 0.065217 | 0 | 0 | 0.044142 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054348 | false | 0 | 0.065217 | 0 | 0.152174 | 0.01087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c8726dfb28106d95bb06c1763a01f4331da1d8a | 651 | py | Python | python/Data Structures and Algorithms in Python Book/stacks/match_delimiter.py | gauravssnl/Data-Structures-and-Algorithms | 1c335c72ce514d4f95090241bbd6edf01a1141a8 | [
"MIT"
] | 7 | 2020-05-10T09:57:23.000Z | 2021-03-27T11:55:07.000Z | python/Data Structures and Algorithms in Python Book/stacks/match_delimiter.py | gauravssnl/Data-Structures-and-Algorithms | 1c335c72ce514d4f95090241bbd6edf01a1141a8 | [
"MIT"
] | null | null | null | python/Data Structures and Algorithms in Python Book/stacks/match_delimiter.py | gauravssnl/Data-Structures-and-Algorithms | 1c335c72ce514d4f95090241bbd6edf01a1141a8 | [
"MIT"
] | 3 | 2021-03-27T03:42:57.000Z | 2021-08-09T12:03:41.000Z | from arraystack import ArrayStack
def is_matched(expr):
"""Return True if all delimiters properly match (or closed); False otherwise"""
left_delimiters = "({["
right_delimiter = ")}]"
S = ArrayStack()
for c in expr:
if c in left_delimiters:
S.push(c)
elif c in right_delimiter:
if S.is_empty():
return False
if right_delimiter.index(c) != left_delimiters.index(S.pop()):
return False
return S.is_empty()
if __name__ == "__main__":
expr = "[(5+x)-(y+z)]"
print(is_matched(expr))
expr = "[(5+x)-y+z)]"
print(is_matched(expr)) | 28.304348 | 84 | 0.572965 | 85 | 651 | 4.164706 | 0.435294 | 0.076271 | 0.110169 | 0.039548 | 0.146893 | 0.146893 | 0.146893 | 0.146893 | 0.146893 | 0 | 0 | 0.004338 | 0.291859 | 651 | 23 | 85 | 28.304348 | 0.763557 | 0.113671 | 0 | 0.210526 | 0 | 0 | 0.068182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.263158 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c889835bd8ac35d8cbc3f5d416195de17ced7a8 | 24,522 | py | Python | cogs/werewolf.py | vluk/baymaxBot | fb7e7dd4b2d99a6987a8e8c09afc7e43af844ffc | [
"MIT"
] | null | null | null | cogs/werewolf.py | vluk/baymaxBot | fb7e7dd4b2d99a6987a8e8c09afc7e43af844ffc | [
"MIT"
] | 4 | 2021-04-24T06:38:49.000Z | 2021-04-24T06:44:21.000Z | cogs/werewolf.py | vluk/baymaxBot | fb7e7dd4b2d99a6987a8e8c09afc7e43af844ffc | [
"MIT"
] | 1 | 2021-05-02T08:04:08.000Z | 2021-05-02T08:04:08.000Z | import discord
from discord.ext import commands
import random
import asyncio
cards = ["villager", "werewolf", "minion", "mason", "seer", "robber", "troublemaker", "drunk", "insomniac", "tanner", "hunter"]
aesthetics = {
"werewolf" : {
"color" : 0x25d0ff,
"thumbnail" : "https://cdn.discordapp.com/attachments/323535193073778689/716453639782137876/unknown.png"
}
}
class Game:
def __init__(self, host, join_message):
self.state = "preparing"
self.players = [1, 2, 3]
self.host = host
self.join_message = join_message
self.initial_roles = []
self.current_roles = []
self.votes = {}
def fetch_player(self, arg):
try:
for player in self.players:
if not isinstance(player, int):
if player.id == int(arg):
return self.players.index(player)
except ValueError:
pass
arg = str(arg)
for player in self.players:
if not isinstance(player, int):
if player.name.lower() == arg.lower():
return self.players.index(player)
for player in self.players:
if not isinstance(player, int):
if player.nick != None and player.nick.lower() == arg.lower():
return self.players.index(player)
return -1
def get_refreshed_embed(self):
embed = self.join_message.embeds[0]
embed.clear_fields()
players = ", ".join(self.get_player_list()) if len(self.players) > 3 else "None"
embed.add_field(name="Players:", value = players)
roles = ", ".join([cards[i] for i in self.initial_roles]) if len(self.initial_roles) > 0 else "None"
embed.add_field(name="Roles:", value = roles)
return embed
def get_player_list(self):
return [i.display_name for i in self.players if not isinstance(i, int)]
def get_debrief(self):
return [(self.players[i].display_name, cards[self.current_roles[i]]) for i in range(len(self.current_roles)) if not isinstance(self.players[i], int)]
def simulate(self, instructions):
for i in instructions:
for j in i:
swap = self.current_roles[j[0]]
self.current_roles[j[0]] = self.current_roles[j[1]]
self.current_roles[j[1]] = swap
class Werewolf(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.games = {}
async def do_villager():
pass
async def do_werewolf(self, user, game):
werewolves = []
for i in range(len(game.players)):
if not isinstance(game.players[i], int) and game.initial_roles[i] == 1:
werewolves.append(game.players[i])
if len(werewolves) == 1:
embed = discord.Embed(
title = "You are a werewolf!",
description = " ".join([
"You are a werewolf, the very embodiment of evil itself.",
"As a werewolf, your goal is to stay alive by deceiving the other players.",
"If all of the werewolves manage to stay alive, then their team wins.",
"Since it looks like you're the only werewolf, you get to look at a card from the center.",
"Click on one of the reactions on this message to reveal a card."
]),
color = aesthetics["werewolf"]["color"]
)
embed.set_thumbnail(url=aesthetics["werewolf"]["thumbnail"])
embed.add_field(
name="Werewolves",
value = "Just you!"
)
message = await user.send(embed=embed)
key = {"1️⃣" : 1, "2️⃣" : 2, "3️⃣" : 3}
for i in key:
await message.add_reaction(i)
def check(r, u):
return u.id == user.id and r.message.id == message.id and str(r.emoji) in key
reaction, user = await self.bot.wait_for("reaction_add", check=check)
selection = key[str(reaction.emoji)]
revealed = cards[game.initial_roles[game.players.index(selection)]].capitalize()
embed.add_field(
name="Revealed Card",
value=revealed,
)
await message.edit(embed=embed)
elif len(werewolves) > 1:
embed = discord.Embed(
title = "You are a werewolf!",
description = " ".join([
"As a werewolf, your goal is to stay alive by deceiving the other players.",
"If all of the werewolves manage to stay alive, then their team wins."
]),
color = aesthetics["werewolf"]["color"]
)
embed.set_thumbnail(url=aesthetics["werewolf"]["thumbnail"])
embed.add_field(
name="Werewolves:",
value = ", ".join([werewolf.display_name for werewolf in werewolves])
)
await user.send(embed=embed)
return []
async def do_minion(self, user, game):
werewolves = []
for i in range(len(game.players)):
if not isinstance(game.players[i], int) and game.initial_roles[i] == 1:
werewolves.append(game.players[i])
if len(werewolves) == 0:
embed = discord.Embed(
title = "You are a minion!",
description = " ".join([
"You are a dastardly minion, only barely tolerated by the werewolves.",
"Try to draw the fire of the other players, or divert suspicion towards one of the villagers.",
"If all of the werewolves manage to stay alive, then you win.",
])
)
embed.add_field(
name="Werewolves:",
value = "None"
)
await user.send(embed=embed)
else:
embed = discord.Embed(
title = "You are a minion!",
description = " ".join([
"You are a minion, dashing but with a heart of coal.",
"Try to draw the fire of the other players, or divert suspicion towards one of the villagers.",
"If all of the werewolves manage to stay alive, then you win.",
])
)
embed.add_field(
name="Werewolves:",
value = ", ".join([werewolf.display_name for werewolf in werewolves])
)
await user.send(embed=embed)
return []
async def do_mason(self, user, game):
masons = []
for i in range(len(game.players)):
if not isinstance(game.players[i], int) and game.initial_roles[i] == 3:
masons.append(game.players[i])
embed = discord.Embed(
title = "You are a mason!",
description = " ".join([
"Your sublime bond with your partner is unbreakable.",
"Leverage your maybe-platonic love to narrow down the suspects.",
"If you manage to kill a werewolf, then you win.",
])
)
embed.add_field(
name="Masons",
value = ", ".join([mason.display_name for mason in masons])
)
message = await user.send(embed=embed)
return []
async def do_seer(self, user, game):
embed = discord.Embed(
title = "You are a seer!",
description = " ".join([
"You are one with the very fabric of reality itself.",
"Use your eldritch knowledge to gain insights into the game.",
"If you manage to kill a werewolf, then you win.",
"You can either look at either another player's card or two cards in the center. "
"React with either 🇵 or 🇨 to choose."
])
)
message = await user.send(embed=embed)
key = {"🇵" : "player", "🇨" : "center"}
for i in key:
await message.add_reaction(i)
def check(r, u):
return u.id == user.id and r.message.id == message.id and str(r.emoji) in key
reaction, user = await self.bot.wait_for("reaction_add", check=check)
selection = key[str(reaction.emoji)]
if selection == "player":
await user.send("Choose which player, using either their full username or nickname.")
def user_check(m):
if m.author.id != self.bot.user.id:
if m.channel.id == user.dm_channel.id:
if game.fetch_player(m.content) != -1:
self.bot.loop.create_task(m.add_reaction("✅"))
return True
else:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
player = game.fetch_player((await self.bot.wait_for("message", check=user_check)).content)
await user.send(cards[game.initial_roles[player]])
elif selection == "center":
await user.send("Choose which two using two numbers (1, 2, 3) seperated with a space.")
def card_check(m):
if m.channel.id != user.dm_channel.id:
return False
split = m.content.split()
if len(split) != 2:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
try:
valid = int(split[0]) in [1, 2, 3] and int(split[1]) in [1, 2, 3] and split[0] != split[1]
if valid:
self.bot.loop.create_task(m.add_reaction("✅"))
return True
self.bot.loop.create_task(m.add_reaction("❌"))
return False
except ValueError:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
centers = [int(i) for i in (await self.bot.wait_for("message", check=card_check)).content.split()]
await user.send(cards[game.initial_roles[game.players.index(centers[0])]])
await user.send(cards[game.initial_roles[game.players.index(centers[1])]])
await user.send("You're good to go!")
return []
async def do_robber(self, user, game):
embed = discord.Embed(
title = "You are a robber!",
description = " ".join([
"Your morals are flexible, and so is your identity.",
"Choose another player to swap your card with.",
"Whoever ends up with your card will be on the villager team.",
"(Send a message containing their full username or nickname.)"
])
)
message = await user.send(embed=embed)
initial = game.fetch_player(user.id)
def check(m):
if m.channel.id == user.dm_channel.id:
if (game.fetch_player(m.content) != -1
and game.fetch_player(m.content) != initial):
self.bot.loop.create_task(m.add_reaction("✅"))
return True
else:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
target = game.fetch_player((await self.bot.wait_for("message", check=check)).content)
await user.send("you are now the " + cards[game.initial_roles[target]])
await user.send("You're good to go!")
return [(initial, target)]
async def do_troublemaker(self, user, game):
await user.send("choose two players to swap (seperate messages)")
initial = game.fetch_player(user.id)
first = None
def check(m):
if m.channel.id == user.dm_channel.id:
if (game.fetch_player(m.content) != -1
and game.fetch_player(m.content) != first
and game.fetch_player(m.content) != initial):
self.bot.loop.create_task(m.add_reaction("✅"))
return True
else:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
first_message = await self.bot.wait_for("message", check=check)
first = game.fetch_player(first_message.content)
second_message = await self.bot.wait_for("message", check=check)
second = game.fetch_player(second_message.content)
await user.send("You're good to go!")
return [(first, second)]
async def do_drunk(self, user, game):
embed = discord.Embed(
title = "You are a drunk!",
description = " ".join([
"You like the happy juice a biiiit more than is probably healthy.",
"Choose a card in the center to swap with."
])
)
message = await user.send(embed=embed)
key = {"1️⃣" : 1, "2️⃣" : 2, "3️⃣" : 3}
for i in key:
await message.add_reaction(i)
def check(r, u):
return u.id == user.id and r.message.id == message.id and str(r.emoji) in key
reaction, user = await self.bot.wait_for("reaction_add", check=check)
selection = key[str(reaction.emoji)]
current = game.fetch_player(user.id)
middle = game.players.index(key[str(reaction.emoji)])
await user.send("You're good to go!")
return [(current, middle)]
async def do_insomniac(self, user, game):
current = game.fetch_player(user.id)
await user.send(cards[game.current_roles[current]])
@commands.group(aliases=["ww"], invoke_without_command=True)
async def werewolf(self, ctx):
host = ctx.message.author
embed = discord.Embed(
title = "Werewolf",
description = " ".join([
"A classic social deduction game where two sides face off against each other: the **Villagers** and **Werewolves**.",
"Uncover who the werewolves are, or use deception to stay hidden until the end.",
"But be careful: if you kill the Tanner, then both teams lose.",
"\n\n**Instructions:**\n",
"**React to this message with 🐺** to join the game, "
"then **add cards** using `%ww add` followed by a list of the roles you want to add, seperated by spaces.",
"For example, you might do something like this to add multiple roles:",
"`%ww add werewolf minion seer tanner troublemaker mason mason`.",
"Additionally, you can get the order of the roles using %ww roleOrder."
]),
color = 0x7289da
)
embed.set_footer(text=f"{host.display_name} is the host", icon_url=host.avatar_url)
embed.add_field(name="Players", value="None")
embed.add_field(name="Roles", value="None")
if not ctx.channel.id in self.games:
message = await ctx.send(embed=embed)
await message.add_reaction("🐺")
self.games[ctx.channel.id] = Game(ctx.message.author, message)
else:
await ctx.send("There's already a game running here!")
@werewolf.command()
async def join(self, ctx):
game = None
if not ctx.channel.id in self.games:
self.games[ctx.channel.id] = Game()
game = self.games[ctx.channel.id]
if game.state == "preparing":
if game.fetch_player(ctx.message.author.id) == -1:
game.players.append(ctx.message.author)
await ctx.send("yeah sure")
else:
await ctx.send("already in the game")
else:
await ctx.send("nah you cant join in the middle of a round")
@werewolf.command(aliases=["addcard"])
async def add(self, ctx, *, names):
if not ctx.channel.id in self.games:
await ctx.send("theres no game here dummy")
return
game = self.games[ctx.channel.id]
if game.host.id != ctx.message.author.id:
await ctx.send("Only the host can add roles.")
return
if all([name.lower() in cards for name in names.split()]) and game.state == "preparing":
for name in names.split():
game.initial_roles.append(cards.index(name.lower()))
await game.join_message.edit(embed=game.get_refreshed_embed())
@werewolf.command()
async def set(self, ctx, *, names):
if not ctx.channel.id in self.games:
await ctx.send("theres no game here dummy")
return
game = self.games[ctx.channel.id]
if game.host.id != ctx.message.author.id:
await ctx.send("Only the host can add roles.")
return
game.initial_roles = []
if all([name.lower() in cards for name in names.split()]) and game.state == "preparing":
for name in names.split():
game.initial_roles.append(cards.index(name.lower()))
await game.join_message.edit(embed=game.get_refreshed_embed())
@werewolf.command(aliase=["removecard"])
async def remove(self, ctx, name):
if not ctx.channel.id in self.games:
await ctx.send("theres no game here dummy")
return
game = self.games[ctx.channel.id]
if game.host.id != ctx.message.author.id:
await ctx.send("Only the host can add roles.")
game = self.games[ctx.channel.id]
if game.state == "preparing" and cards.index(name.lower()) in game.initial_roles:
game.initial_roles.remove(cards.index(name.lower()))
await game.join_message.edit(embed=game.get_refreshed_embed())
@werewolf.command()
async def vote(self, ctx, *, accused : str):
if not ctx.channel.id in self.games:
await ctx.send("theres no game here dummy")
return
game = self.games[ctx.channel.id]
if game.state != "voting":
await ctx.send("cant vote yet")
return
author = ctx.message.author
if game.fetch_player(author.id) != -1:
if game.fetch_player(accused) != -1:
game.votes[author.id] = game.players[game.fetch_player(accused)].id
if len(game.votes) == len(game.players) - 3:
tally = {}
for i in game.votes:
if not game.votes[i] in tally:
tally[game.votes[i]] = 0
tally[game.votes[i]] += 1
top = sorted([(tally[i], i) for i in tally])
if len(top) > 1 and top[-1][0] == top[-2][0]:
await ctx.send("no decisive winner")
else:
killed_id = top[-1][1]
index = game.fetch_player(killed_id)
killed = game.players[index]
await ctx.send("killing " + killed.mention)
if cards[game.current_roles[index]] == "hunter":
def user_check(m):
if m.channel.id == ctx.message.channel.id:
if game.fetch_player(m.content) != -1:
self.bot.loop.create_task(m.add_reaction("✅"))
return True
else:
self.bot.loop.create_task(m.add_reaction("❌"))
return False
player = game.fetch_player((await self.bot.wait_for("message", check=user_check)).content)
await ctx.send(killed.display_name + " was " + cards[game.current_roles[index]])
paired_roles = [" was ".join(i) for i in game.get_debrief()]
await ctx.send(", ".join(paired_roles))
del self.games[ctx.channel.id]
else:
await ctx.send("vote registered")
else:
await ctx.send("cant find")
else:
await ctx.send("you're not playing")
@werewolf.command()
async def roleOrder(self, ctx):
await ctx.send(", ".join(cards))
@werewolf.command()
async def start(self, ctx):
if not ctx.channel.id in self.games:
await ctx.send("theres no game here dummy")
return
game = self.games[ctx.channel.id]
if game.host.id != ctx.message.author.id:
await ctx.send("Only the host can start the game.")
return
if len(game.players) > len(game.initial_roles):
await ctx.send("You need more roles to play!")
return
if len(game.players) < len(game.initial_roles):
await ctx.send("You need less roles to play!")
return
game.state = "running"
game.initial_roles = sorted(game.initial_roles)
random.shuffle(game.players)
game.current_roles = [i for i in game.initial_roles]
await ctx.send("game starting")
for i in range(len(game.players)):
if not isinstance(game.players[i], int):
await game.players[i].send("you're the " + cards[game.initial_roles[i]])
tasks = []
for i in range(len(game.initial_roles)):
if not isinstance(game.players[i], int):
if game.initial_roles[i] == 1:
tasks.append(self.do_werewolf(game.players[i], game))
if game.initial_roles[i] == 2:
tasks.append(self.do_minion(game.players[i], game))
if game.initial_roles[i] == 3:
tasks.append(self.do_mason(game.players[i], game))
if game.initial_roles[i] == 4:
tasks.append(self.do_seer(game.players[i], game))
if game.initial_roles[i] == 5:
tasks.append(self.do_robber(game.players[i], game))
if game.initial_roles[i] == 6:
tasks.append(self.do_troublemaker(game.players[i], game))
if game.initial_roles[i] == 7:
tasks.append(self.do_drunk(game.players[i], game))
instructions = await asyncio.gather(*tasks)
game.simulate(instructions)
for i in range(len(game.players)):
if not isinstance(game.players[i], int):
if game.initial_roles[i] == 8:
await self.do_insomniac(game.players[i], game)
await ctx.send("the night's now over, do ur stuff then do %ww vote")
game.state = "voting"
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if user.id == self.bot.user.id:
return
message = reaction.message
if message.channel.id in self.games and self.games[message.channel.id].state == "preparing":
game = self.games[message.channel.id]
if game.join_message.id == message.id:
if game.fetch_player(user.id) == -1:
game.players.append(user)
await message.edit(embed=game.get_refreshed_embed())
@commands.Cog.listener()
async def on_reaction_remove(self, reaction, user):
message = reaction.message
if message.channel.id in self.games and self.games[message.channel.id].state == "preparing":
game = self.games[message.channel.id]
if game.join_message.id == message.id:
if game.fetch_player(user.id) != -1:
for player in range(len(game.players)):
if not isinstance(game.players[player], int) and game.players[player].id == user.id:
del game.players[player]
await message.edit(embed=game.get_refreshed_embed())
@werewolf.command()
async def cancel(self, ctx):
if ctx.message.channel.id in self.games:
game = self.games[ctx.channel.id]
if game.host.id == ctx.message.author.id:
del self.games[ctx.channel.id]
await ctx.send("game cancelled")
def setup(bot):
bot.add_cog(Dictionary(bot))
| 38.496075 | 157 | 0.540046 | 3,006 | 24,522 | 4.346973 | 0.119095 | 0.032831 | 0.034285 | 0.016071 | 0.594398 | 0.560496 | 0.522461 | 0.498584 | 0.488329 | 0.44356 | 0 | 0.007294 | 0.345853 | 24,522 | 636 | 158 | 38.556604 | 0.805436 | 0 | 0 | 0.486 | 0 | 0.006 | 0.160958 | 0.000938 | 0 | 0 | 0.000652 | 0 | 0 | 1 | 0.032 | false | 0.004 | 0.008 | 0.01 | 0.128 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c8b5792d3174dff39013842e2317ce417ed4a8a | 650 | py | Python | main.py | KShah707/TweetDownloader | ff6770e2f86273919c279466337a4e5ab2cd63e0 | [
"MIT"
] | null | null | null | main.py | KShah707/TweetDownloader | ff6770e2f86273919c279466337a4e5ab2cd63e0 | [
"MIT"
] | null | null | null | main.py | KShah707/TweetDownloader | ff6770e2f86273919c279466337a4e5ab2cd63e0 | [
"MIT"
] | null | null | null | ###################################################
# Uses the tweepy client to query a user's tweets #
###################################################
import csv
import tweepy
# Load secrets from separate file
from user_secrets import *
# Set up API client
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
# Fetch all tweets
with open('downloaded_tweets.csv', 'w') as outfile:
writer = csv.writer(outfile)
my_tweets = [[tweet.full_text] for tweet in api.user_timeline(tweet_mode='extended')]
print(my_tweets)
writer.writerows(my_tweets) | 30.952381 | 89 | 0.641538 | 83 | 650 | 4.843373 | 0.566265 | 0.08209 | 0.084577 | 0.109453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.124615 | 650 | 21 | 90 | 30.952381 | 0.706503 | 0.176923 | 0 | 0 | 0 | 0 | 0.070093 | 0.049065 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c8c64b7f637476a4c3e62cdc4f47747851211f2 | 1,664 | py | Python | anime_downloader/sites/nyaa.py | Alsira/anime-downloader | d82b4cfd5c7c6c358d0d8ffd36ce2d5c4a285595 | [
"Unlicense"
] | 1,077 | 2020-10-17T15:43:17.000Z | 2022-03-31T15:24:29.000Z | anime_downloader/sites/nyaa.py | Alsira/anime-downloader | d82b4cfd5c7c6c358d0d8ffd36ce2d5c4a285595 | [
"Unlicense"
] | 509 | 2018-06-01T13:07:56.000Z | 2020-10-17T13:34:39.000Z | anime_downloader/sites/nyaa.py | Alsira/anime-downloader | d82b4cfd5c7c6c358d0d8ffd36ce2d5c4a285595 | [
"Unlicense"
] | 255 | 2018-05-27T03:52:11.000Z | 2020-10-12T17:27:38.000Z | from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
class Nyaa(Anime, sitename='nyaa'):
"""
Site: https://nyaa.si
Config
~~~~~~
filter: Choose filter method in search. One of ['No filter', 'No remakes', 'Trusted Only']
category: Choose categories to search. One of ['Anime Music Video', 'English-translated', 'Non-English-translated']
"""
sitename = 'nyaa'
url = f'https://{sitename}.si'
@classmethod
def search(cls, query):
filters = {"No filter": 0, "No remakes": 1, "Trusted only": 2}
categories = {"Anime Music Video": "1_1", "English-translated": "1_2", "Non-English-translated": "1_3"}
self = cls()
parameters = {"f": filters[self.config["filter"]], "c": categories[self.config["category"]], "q": query, "s": "size", "o": "desc"}
search_results = helpers.soupify(helpers.get(f"https://nyaa.si/", params=parameters))
search_results = [
SearchResult(
title=i.select("a:not(.comments)")[1].get("title"),
url=i.select_one('a[href*="magnet"]')['href'],
meta={'peers': i.find_all('td', class_='text-center')[3].text + ' peers', 'size':i.find_all('td', class_='text-center')[1].text})
for i in search_results.select("tr.default, tr.success")
]
return search_results
def _scrape_episodes(self):
# the magnet has all episodes making this redundant
return [self.url]
class NyaaEpisode(AnimeEpisode, sitename='nyaa'):
def _get_sources(self):
return [('no_extractor', self.url)]
| 35.404255 | 145 | 0.612981 | 206 | 1,664 | 4.859223 | 0.42233 | 0.067932 | 0.037962 | 0.047952 | 0.04995 | 0.04995 | 0.04995 | 0 | 0 | 0 | 0 | 0.009224 | 0.218149 | 1,664 | 46 | 146 | 36.173913 | 0.760184 | 0.176683 | 0 | 0 | 0 | 0 | 0.216741 | 0.016442 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.08 | 0.08 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c8f674fb4bb528a31df28c9e6f31e31a0334ee4 | 810 | py | Python | src/hgg_coffea/tools/xgb_loader.py | holzman/hgg-coffea | 5aabdb127edaa15b0f22d54f0e3ccdc74c5c10de | [
"BSD-3-Clause"
] | 3 | 2021-07-22T07:02:03.000Z | 2021-09-22T07:01:59.000Z | src/hgg_coffea/tools/xgb_loader.py | holzman/hgg-coffea | 5aabdb127edaa15b0f22d54f0e3ccdc74c5c10de | [
"BSD-3-Clause"
] | 2 | 2021-08-16T16:08:09.000Z | 2021-11-12T00:41:50.000Z | src/hgg_coffea/tools/xgb_loader.py | holzman/hgg-coffea | 5aabdb127edaa15b0f22d54f0e3ccdc74c5c10de | [
"BSD-3-Clause"
] | 8 | 2021-07-22T07:49:19.000Z | 2022-01-26T22:58:03.000Z | import gzip
import lzma
import warnings
from typing import Optional
import xgboost
def _get_gzip(fname: str) -> bytearray:
return bytearray(gzip.open(fname, "rb").read())
def _get_lzma(fname: str) -> bytearray:
return bytearray(lzma.open(fname, "rb").read())
_magics = {
b"\x1f\x8b": _get_gzip,
b"\xfd7": _get_lzma,
}
def load_bdt(fname: str) -> Optional[xgboost.Booster]:
try:
bdt = xgboost.Booster()
with open(fname, "rb") as f:
magic = f.read(2)
opener = _magics.get(magic, lambda x: x)
bdt.load_model(opener(fname))
except xgboost.core.XGBoostError as xgberr:
warnings.warn(repr(xgberr))
bdt = None
except FileNotFoundError as fnferr:
warnings.warn(repr(fnferr))
bdt = None
return bdt
| 21.891892 | 54 | 0.632099 | 106 | 810 | 4.716981 | 0.415094 | 0.048 | 0.066 | 0.092 | 0.128 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006547 | 0.245679 | 810 | 36 | 55 | 22.5 | 0.811784 | 0 | 0 | 0.074074 | 0 | 0 | 0.023457 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.185185 | 0.074074 | 0.407407 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c8faf8eecad5b6ac0b19aefead4deb655a3a6fb | 16,629 | py | Python | latent_rationale/snli/util.py | bastings/interpretable_neural_predictions | fef61833bd22205dc2d4f77e2c0ed3f40cbe8ea6 | [
"MIT"
] | 100 | 2019-05-21T21:26:19.000Z | 2022-03-27T18:22:27.000Z | latent_rationale/snli/util.py | bastings/interpretable_neural_predictions | fef61833bd22205dc2d4f77e2c0ed3f40cbe8ea6 | [
"MIT"
] | 6 | 2019-07-30T03:08:44.000Z | 2021-05-07T16:49:55.000Z | latent_rationale/snli/util.py | bastings/interpretable_neural_predictions | fef61833bd22205dc2d4f77e2c0ed3f40cbe8ea6 | [
"MIT"
] | 20 | 2019-06-19T18:36:41.000Z | 2022-01-08T12:59:39.000Z | import os
from argparse import ArgumentParser
import torch
from hashlib import md5
import numpy as np
import glob
import re
from torch.nn import functional as F
from latent_rationale.snli.constants import UNK_TOKEN, PAD_TOKEN, INIT_TOKEN
from latent_rationale.snli.plotting import plot_heatmap
from latent_rationale.snli.text import data
BIN_REGEX = re.compile(r"\( | \)")
NON_BIN_REGEX = re.compile(r"\([A-Z.,:$]+|\)")
def masked_softmax(t, mask, dim=-1):
t = torch.where(mask, t, t.new_full([1], float('-inf')))
return F.softmax(t, dim=dim)
def get_data_fields(glove_words, lowercase=False,
init_token=INIT_TOKEN):
not_in_glove = set()
def _tokens_from_binary_parse(s):
tokens = re.sub(BIN_REGEX, "", s).split()
tokens = unk_unknown_tokens(
tokens, glove_words=glove_words, not_in_glove=not_in_glove)
return tokens
def _tokens_from_non_binary_parse(s):
tokens = re.sub(NON_BIN_REGEX, "", s).split()
tokens = unk_unknown_tokens(
tokens, glove_words=glove_words, not_in_glove=not_in_glove)
return tokens
input_field = data.Field(
lower=lowercase, tokenize=_tokens_from_binary_parse,
batch_first=True, include_lengths=True,
init_token=init_token, pad_token=PAD_TOKEN, unk_token=UNK_TOKEN)
label_field = data.Field(
sequential=False, batch_first=True, unk_token=None)
return input_field, label_field, not_in_glove
def unk_unknown_tokens(tokens, n=100, lowercase=False,
glove_words=None, not_in_glove=None):
"""
Hash unknown words into N different UNK-classes
:param tokens:
:param n: hash tokens into this many classes
:param lowercase:
:param glove_words: a set with all valid glove words
:param not_in_glove: an empty set where we store words that were not in glove
:return:
"""
new_tokens = []
for token in tokens:
if lowercase:
token = token.lower()
if token not in glove_words:
not_in_glove.add(token)
hash_idx = hash_token(token, n=n)
token = "<unk_{:02d}>".format(hash_idx)
new_tokens.append(token)
return new_tokens
def get_n_correct(batch, answer):
"""get number of correct predictions (float)"""
return (torch.max(answer, 1)[1].view(
batch.label.size()) == batch.label).float().sum().item()
def find_ckpt_in_directory(path):
for f in os.listdir(os.path.join(path, "")):
if f.startswith('best_ckpt'):
return os.path.join(path, f)
def save_checkpoint(ckpt, save_path, iterations, prefix="ckpt",
dev_acc=None, test_acc=None, delete_old=False):
ckpt_prefix = os.path.join(save_path, prefix)
ckpt_path = ckpt_prefix + "_iter_{:08d}".format(iterations)
if dev_acc is not None:
ckpt_path += "_devacc_{:4.2f}".format(dev_acc)
if test_acc is not None:
ckpt_path += "_testacc_{:4.2f}".format(test_acc)
ckpt_path += ".pt"
try:
torch.save(ckpt, ckpt_path)
except IOError:
print("Error while saving checkpoint (iteration %d)" % iterations)
if delete_old:
try:
for f in glob.glob(ckpt_prefix + '*'):
if f != ckpt_path:
os.remove(f)
except IOError:
print("Error while deleting old checkpoint")
def load_glove_words(word_vectors):
print("Loading Glove dictionary: {}".format(word_vectors))
words = set()
path = os.path.join("data/snli", word_vectors + ".words.txt")
with open(path, mode="r", encoding="utf-8") as f:
for line in f:
word = line.rstrip()
words.add(word)
print("Loaded:", len(words), "words")
return words
def get_z_counts(att, prem_mask, hypo_mask):
"""
Compute z counts (number of 0, continious, 1 elements).
:param att: similarity matrix [B, prem, hypo]
:param prem_mask:
:param hypo_mask:
:return: z0, zc, z1
"""
# mask out all invalid positions with -1
att = torch.where(hypo_mask.unsqueeze(1), att, att.new_full([1], -1.))
att = torch.where(prem_mask.unsqueeze(2), att, att.new_full([1], -1.))
z0 = (att == 0.).sum().item()
zc = ((0 < att) & (att < 1)).sum().item()
z1 = (att == 1.).sum().item()
assert (att > -1).sum().item() == z0 + zc + z1, "mismatch"
return z0, zc, z1
def get_device():
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def print_config(config):
for k, v in vars(config).items():
print("%22s : %16s" % (k, str(v)))
print()
def print_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
n_params = sum([np.prod(p.size()) for p in model_parameters])
print("Total params: %d" % n_params)
for name, p in model.named_parameters():
if p.requires_grad:
print("%30s : %12s" % (name, list(p.size())))
else:
print("%30s : %12s (no-grad)" % (name, list(p.size())))
print()
def hash_token(token, n=100):
return int(md5(token.encode()).hexdigest(), 16) % n
def makedirs(name):
"""helper function for python 2 and 3 to call os.makedirs()
avoiding an error if the directory to be created already exists"""
import os, errno
try:
os.makedirs(name)
except OSError as ex:
if ex.errno == errno.EEXIST and os.path.isdir(name):
# ignore existing directory
pass
else:
# a different error happened
raise
def remove_padding(text, pad_token):
try:
cut = text.index(PAD_TOKEN)
text = text[:cut]
except ValueError: # no padding present
pass
return text
def extract_attention(model, data_iter, input_vocab, answer_vocab):
"""
:param model:
:param data_iter:
:param input_vocab:
:param answer_vocab:
:return:
"""
if not hasattr(model, "prem2hypo_att"):
return
data_iter.init_epoch()
model.eval()
p2h_att = []
h2p_att = []
prems = []
hypos = []
predictions = []
targets = []
with torch.no_grad():
for i, batch in enumerate(data_iter, 1):
result = model(batch)
for j in range(batch.batch_size):
prem = [input_vocab.itos[x] for x in batch.premise[0][j]]
hypo = [input_vocab.itos[x] for x in batch.hypothesis[0][j]]
prem = remove_padding(prem, PAD_TOKEN)
hypo = remove_padding(hypo, PAD_TOKEN)
label = answer_vocab.itos[batch.label[j]]
answer = answer_vocab.itos[result.argmax(dim=-1)[j]]
prem2hypo_att = model.prem2hypo_att[j].cpu().numpy()
hypo2prem_att = model.hypo2prem_att[j].cpu().numpy()
prem2hypo_att = prem2hypo_att[:len(prem), :len(hypo)]
hypo2prem_att = hypo2prem_att[:len(hypo), :len(prem)]
targets.append(label)
predictions.append(answer)
p2h_att.append(prem2hypo_att)
h2p_att.append(hypo2prem_att)
prems.append(prem)
hypos.append(hypo)
return p2h_att, h2p_att, prems, hypos, predictions, targets
def print_examples(model, data_iter, input_vocab, answer_vocab, save_path,
iterations, n=3, writer=None, skip_null=True):
"""
:param model:
:param data_iter:
:param input_vocab:
:param answer_vocab:
:param save_path:
:param iterations:
:param n:
:param writer: Tensorboard writer to write attention images to Tensorboard
:param skip_null: do not show NULL (first) symbol in plot
:return:
"""
data_iter.init_epoch()
model.eval()
n_printed = 0
with torch.no_grad():
for i, batch in enumerate(data_iter, 1):
result = model(batch)
for j in range(batch.batch_size):
prem = [input_vocab.itos[x] for x in batch.premise[0][j]]
hypo = [input_vocab.itos[x] for x in batch.hypothesis[0][j]]
try:
cut = prem.index(PAD_TOKEN)
prem = prem[:cut]
except ValueError:
pass
try:
cut = hypo.index(PAD_TOKEN)
hypo = hypo[:cut]
except ValueError:
pass
label = answer_vocab.itos[batch.label[j]]
answer = answer_vocab.itos[result.argmax(dim=-1)[j]]
# extract attention matrices
if hasattr(model, "prem2hypo_att"):
prem2hypo_att = model.prem2hypo_att[j].cpu().numpy()
hypo2prem_att = model.hypo2prem_att[j].cpu().numpy()
if skip_null:
prem2hypo_att = prem2hypo_att[1:, 1:]
hypo2prem_att = hypo2prem_att[1:, 1:]
prem = prem[1:]
hypo = hypo[1:]
# attention is normalized by last dimension, so columns here
name = "ex{:02d}_prem2hypo_att".format(n_printed)
if writer is not None:
writer.add_image("data/" + name,
prem2hypo_att[None, :, :],
iterations)
path = os.path.join(save_path, name + ".pdf")
plot_heatmap(prem2hypo_att, row_labels=prem,
column_labels=hypo, output_path=path)
# attention is normalized by last dimension, so columns here
name = "ex{:02d}_hypo2prem_att".format(n_printed)
if writer is not None:
writer.add_image("data/" + name,
hypo2prem_att[None, :, :],
iterations)
path = os.path.join(save_path, name + ".pdf")
plot_heatmap(hypo2prem_att, row_labels=prem,
column_labels=hypo, output_path=path)
# extract multi-head self-attention matrices
if hasattr(model, "prem_self_att_samples"):
for k, a in enumerate(model.prem_self_att_samples):
prem_self_att = a[j].cpu().numpy()
prem_self_att = prem_self_att[:len(prem), :len(prem)]
name = "ex{:02d}_prem_sa{}".format(n_printed, k)
if writer is not None:
writer.add_image("data/" + name,
prem_self_att[None, :, :],
iterations)
name = name + ".pdf"
plot_heatmap(prem_self_att,
row_labels=prem, column_labels=prem,
output_path=os.path.join(save_path, name))
if hasattr(model, "hypo_self_att_samples"):
for k, a in enumerate(model.hypo_self_att_samples):
hypo_self_att = a[j].cpu().numpy()
hypo_self_att = hypo_self_att[:len(hypo), :len(hypo)]
name = "ex{:02d}_hypo_sa{}".format(n_printed, k)
if writer is not None:
writer.add_image("data/" + name,
hypo_self_att[None, :, :],
iterations)
name = name + ".pdf"
plot_heatmap(hypo_self_att,
row_labels=prem, column_labels=prem,
output_path=os.path.join(save_path, name))
# extract self-attention matrices
if hasattr(model, "prem_self_att") and \
model.prem_self_att is not None:
prem_self_att = model.prem_self_att[j].cpu().numpy()
hypo_self_att = model.hypo_self_att[j].cpu().numpy()
plot_heatmap(prem_self_att,
row_labels=prem, column_labels=prem,
output_path=os.path.join(
save_path,
"ex%02d_prem_self_att.pdf" % n_printed))
plot_heatmap(hypo_self_att,
row_labels=hypo, column_labels=hypo,
output_path=os.path.join(
save_path,
"ex%02d_hypo_self_att.pdf" % n_printed))
print("Example {}".format(n_printed))
print("{:11} : {}".format("Premise:", " ".join(prem)))
print("{:11} : {}".format("Hypothesis:", " ".join(hypo)))
print("{:11} : {}".format("Label:", label))
print("{:11} : {}".format("Prediction:", answer))
print()
n_printed += 1
if n_printed == n:
return
def get_predict_args():
parser = ArgumentParser(description='PyTorch/torchtext SNLI example')
parser.add_argument('--ckpt', type=str, default="path_to_checkpoint")
args = parser.parse_args()
return args
def get_args():
parser = ArgumentParser(description='SNLI')
parser.add_argument('--save_path', type=str, default='results/snli/default')
parser.add_argument('--resume_snapshot', type=str, default='')
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--embed_size', type=int, default=300)
parser.add_argument('--proj_size', type=int, default=200)
parser.add_argument('--hidden_size', type=int, default=200)
parser.add_argument('--n_layers', type=int, default=1)
parser.add_argument('--print_every', type=int, default=100)
parser.add_argument('--eval_every', type=int, default=1000)
parser.add_argument('--save_every', type=int, default=1000)
parser.add_argument('--dropout', type=float, default=0.2)
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--min_lr', type=float, default=5e-5)
parser.add_argument('--lr_decay', type=float, default=0.999)
parser.add_argument('--weight_decay', type=float, default=1e-6)
parser.add_argument('--patience', type=int, default=10000)
parser.add_argument('--max_grad_norm', type=float, default=5.)
parser.add_argument('--stop_lr_threshold', type=float, default=1e-5)
parser.add_argument('--max_relative_distance', type=int, default=11)
parser.add_argument('--model',
choices=["recurrent", "decomposable"],
default="decomposable")
parser.add_argument('--dist', choices=["", "hardkuma"],
default="")
parser.add_argument('--self-attention', action='store_true',
help="intra-sentence attention (Decomposable model)")
parser.add_argument('--no-bidirectional', action='store_false', dest='birnn')
# control Hard Kuma sparsity
parser.add_argument('--selection', type=float, default=1.0)
# lagrange settings
parser.add_argument('--lagrange_lr', type=float, default=0.01,
help="learning rate for lagrange")
parser.add_argument('--lagrange_alpha', type=float, default=0.99,
help="alpha for computing the running average")
parser.add_argument('--lambda_init', type=float, default=1e-5,
help="initial value for lambda")
# misc
parser.add_argument('--no-projection', action='store_false',
dest='projection')
parser.add_argument('--mask-diagonal', action='store_true')
parser.add_argument('--overwrite', action='store_true',
help="erase save_path if it exists")
parser.add_argument('--no-emb-normalization', action='store_false',
dest='normalize_embeddings')
parser.add_argument('--train_embed', action='store_false', dest='fix_emb')
parser.add_argument('--word_vectors', type=str,
default='glove.840B.300d')
args = parser.parse_args()
return args
| 35.76129 | 81 | 0.561609 | 1,993 | 16,629 | 4.484195 | 0.193176 | 0.035247 | 0.066577 | 0.010966 | 0.335124 | 0.295177 | 0.268435 | 0.245944 | 0.219537 | 0.189549 | 0 | 0.017206 | 0.31848 | 16,629 | 464 | 82 | 35.838362 | 0.771376 | 0.078958 | 0 | 0.282051 | 0 | 0 | 0.098718 | 0.011828 | 0 | 0 | 0 | 0 | 0.003205 | 1 | 0.064103 | false | 0.012821 | 0.038462 | 0.003205 | 0.160256 | 0.092949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c9220dbfde66eef7cdc24410685f09d9949bf06 | 5,357 | py | Python | lc_functions.py | mrawls/kepler-makelc | 72a929b04d1c71bb5e854b96a9901544f681ed86 | [
"MIT"
] | 1 | 2018-09-10T01:35:08.000Z | 2018-09-10T01:35:08.000Z | lc_functions.py | mrawls/kepler-makelc | 72a929b04d1c71bb5e854b96a9901544f681ed86 | [
"MIT"
] | null | null | null | lc_functions.py | mrawls/kepler-makelc | 72a929b04d1c71bb5e854b96a9901544f681ed86 | [
"MIT"
] | null | null | null | import numpy as np
from pyraf import iraf
from pyraf.iraf import kepler
'''
Useful functions for Kepler light curve processing
Use this with the program 'makelc.py'
Originally by Jean McKeever
Edited and improved by Meredith Rawls
'''
# calculate orbital phase
# times must be a list of observation times in the same units as BJD0
# it returns 'phases': orbital phases from 0 to 1
# it also returns 'phasedoubles': twice as long as 'phases' and now from 0 to 2
def phasecalc(times, period=100, BJD0=2454833):
phases = []
cycles = []
for i in range(0, len(times)):
fracP = (times[i] - BJD0) / period
if fracP < 0:
phases.append(fracP % 1)
cycles.append(int(fracP))
else:
phases.append(fracP % 1)
cycles.append(int(fracP) + 1)
#print(fracP, phases[i])
return phases
# remove long-term trends
# uses a simple 3rd-order polynomial by default
# operates on one array at a time (e.g., after all quarters have been combined)
def long_detrend(t, flux, order=3):
model = np.polyfit(t, flux, order)
fit = np.zeros(len(t))
# apply the model coefficients to create the fit
for i in range(0, order+1):
fit += model[i]*np.power(t, (order-i))
#flux = flux/fit*1e6 - 1e6 # put it in ppm >:(
flux = flux/fit*np.median(flux) # don't put it in ppm, because ppm is annoying
return t, flux
# Delete any observation that has one or more NaN values.
# Assumes there are six parallel arrays... use dummy arrays if you don't have 6
# columns of interest to operate on (sorry).
# Operates on one quarter at a time
def nan_delete(time, flux, ferr, other1, other2, other3):
a = []
a = [time, flux, ferr, other1, other2, other3]
atrans = np.transpose(a)
newatrans = []
newa = []
for row in atrans:
# only save rows that DON'T contain a NaN value
if np.isnan(row).any() != True:
newatrans.append(row)
newa = np.transpose(newatrans)
newtime = newa[0]
newflux = newa[1]
newferr = newa[2]
newother1 = newa[3]
newother2 = newa[4]
newother3 = newa[5]
return newtime, newflux, newferr, newother1, newother2, newother3
# Put data from different quarters on the same AVERAGE level
# operates on a list of arrays (multiple quarters) all at once
# DON'T USE THIS ONE
# def normalize_qtr_avg(flux):
# sumflux = 0
# npts = 0
# for arr in flux:
# sumflux += np.nansum(arr)
# npts += len(arr[arr>0])
# avgflux = sumflux/npts # overall average for all quarters
# for arr in flux:
# avg_arr = np.mean(arr[arr>0]) # average for an individual quarter
# arr += avgflux - avg_arr
# return flux
# Put data from different quarters on the same MEDIAN level
# operates on a list of arrays (multiple quarters) all at once
def normalize_qtr_med(flux):
sumflux = 0
npts = 0
for arr in flux:
sumflux += np.nansum(arr)
npts += len(arr)
avgflux = sumflux/npts # overall average for all quarters
for arr in flux:
med_arr = np.median(arr) # median for an individual quarter
arr += avgflux - med_arr
return flux
# Line up the gaps within each quarter
# operates on a list of arrays (multiple quarters) all at once
def lineup_qtr_gaps(time, flux, maskstart, maskend):
diffs = np.zeros(len(time) - 1)
for i in range(0,len(time) - 1): # loop through quarters
# calculate differences between flux points at quarter start/end
start = 0
end = -1
for idx, mask in enumerate(maskstart):
while (time[i][end] > maskstart[idx] and time[i][end] < maskend[idx]):
#print('end', end, time[i][end], maskstart[idx], maskend[idx])
end -= 1
while (time[i+1][start] > maskstart[idx] and time[i+1][start] < maskend[idx]):
#print('start', start, time[i+1][start], maskstart[idx], maskend[idx])
start += 1
diffs[i] = (flux[i][end] - flux[i+1][start])
# maxi will find the point with the largest change in flux
maxi = lambda z: np.where(max(abs(z)) == abs(z))[0][0]
cntr = 0 # counter
max_val = max(abs(diffs))
while max_val > 100: #original value here was 100
# this is the index of the largest change in flux, so it needs adjusting
ind = maxi(diffs)
# this is the actual change in flux associated with that index
diff = diffs[ind]
# adjust the flux at this spot and its neighbor so they meet
flux[ind] = flux[ind] - diff/2.0
flux[ind+1] = flux[ind+1] + diff/2.0
diffs = np.zeros(len(time) - 1)
for i in range(0, len(time) - 1):
# calculate differences between flux points at quarter start/end, again
start = 0
end = -1
for idx, mask in enumerate(maskstart):
while time[i][end] > maskstart[idx] and time[i][end] < maskend[idx]:
#print('end', end, time[i][end], maskstart[idx], maskend[idx])
end -= 1
while time[i+1][start] > maskstart[idx] and time[i+1][start] < maskend[idx]:
#print('start', start, time[i+1][start], maskstart[idx], maskend[idx])
start += 1
diffs[i] = (flux[i][end] - flux[i+1][start])
cntr += 1 # count how many times this while-loop happens
max_val = max(abs(diffs))
# print(max_val, cntr)
return time, flux
# performs detrending with cotrending basis vectors (cbvs)
# lcin and lcout must both be FITS filenames
def kepcotrend(lcin, lcout, cbvfile, maskfile=''):
iraf.kepcotrend(infile=lcin, outfile=lcout, cbvfile=cbvfile,
vectors='1 2', method='simplex', fitpower=1, iterate='yes', sigmaclip=2.0,
maskfile=maskfile, scinterp='None', plot='no', clobber='yes', verbose='no')
return | 36.691781 | 82 | 0.682472 | 872 | 5,357 | 4.174312 | 0.302752 | 0.016484 | 0.015385 | 0.018132 | 0.406868 | 0.382143 | 0.343956 | 0.343956 | 0.302747 | 0.273077 | 0 | 0.02268 | 0.193392 | 5,357 | 146 | 83 | 36.691781 | 0.819718 | 0.445399 | 0 | 0.252874 | 0 | 0 | 0.008718 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.034483 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c93c3807680923e216b02227b5a6105e6428b73 | 17,649 | py | Python | src/beacon_api/utils/polyvalent_functions.py | jrambla/beacon-2.x | f6c8bbecd183471d62c01e040d6e0b3c9ef8f448 | [
"Apache-2.0"
] | null | null | null | src/beacon_api/utils/polyvalent_functions.py | jrambla/beacon-2.x | f6c8bbecd183471d62c01e040d6e0b3c9ef8f448 | [
"Apache-2.0"
] | null | null | null | src/beacon_api/utils/polyvalent_functions.py | jrambla/beacon-2.x | f6c8bbecd183471d62c01e040d6e0b3c9ef8f448 | [
"Apache-2.0"
] | null | null | null | """
Functions used by different endopoints.
- To do basic operations
- To parse the filters request
- To manage access resolution
"""
import ast
import logging
import yaml
import requests
from pathlib import Path
from ..api.exceptions import BeaconBadRequest, BeaconServerError, BeaconForbidden, BeaconUnauthorised
from .. import __apiVersion__
from ..conf.config import DB_SCHEMA
LOG = logging.getLogger(__name__)
# ----------------------------------------------------------------------------------------------------------------------
# BASIC FUNCTIONS
# ----------------------------------------------------------------------------------------------------------------------
def create_prepstmt_variables(value):
"""Takes a value of how many prepared variables you want to pass a query
and creates a string to put it in it"""
dollars = []
for element in range(value):
element += 1
variable = "$" + str(element)
dollars.append(variable)
return ", ".join(dollars)
def filter_exists(include_dataset, datasets):
"""Return those datasets responses that the `includeDatasetResponses` parameter decides.
Look at the exist parameter in each returned dataset to established HIT or MISS.
"""
if include_dataset == 'ALL':
return datasets
elif include_dataset == 'NONE':
return []
elif include_dataset == 'HIT':
return [d for d in datasets if d['exists'] is True]
elif include_dataset == 'MISS':
return [d for d in datasets if d['exists'] is False]
def datasetHandover(dataset_name):
"""Return the datasetHandover with the correct name of the dataset."""
datasetHandover = [ { "handoverType" : {
"id" : "CUSTOM",
"label" : "Dataset info"
},
"note" : "Dataset information and DAC contact details in EGA Website",
"url" : f"https://ega-archive.org/datasets/{dataset_name}"
} ]
return datasetHandover
# ----------------------------------------------------------------------------------------------------------------------
# YAML LOADER
# ----------------------------------------------------------------------------------------------------------------------
def find_yml_and_load(input_file):
"""Try to load the access levels yaml and return it as a dict."""
file = Path(input_file)
if not file.exists():
LOG.error(f"The file '{file}' does not exist", file=sys.stderr)
return
if file.suffix in ('.yaml', '.yml'):
with open(file, 'r') as stream:
file_dict = yaml.safe_load(stream)
return file_dict
# Otherwise, fail
LOG.error(f"Unsupported format for {file}", file=sys.stderr)
# ----------------------------------------------------------------------------------------------------------------------
# FILTERING TERMS MANAGEMENT
# ----------------------------------------------------------------------------------------------------------------------
def parse_filters_request(filters_request_list):
"""Create a list of the filters passed in the query, where each filter
is another list in the main list with the following elements: ontology, term, operator, value.
"""
list_filters = []
for unprocessed_filter in filters_request_list:
filter_elements = unprocessed_filter.split(":")
ontology = filter_elements[0]
operator_switch = False
for operator in [">=", "<=", "=", ">", "<"]: # TO DO: raise an error if "=<" or "=>" are given
if operator in filter_elements[1]:
operator = operator
term = filter_elements[1].split(operator)[0]
value = filter_elements[1].split(operator)[1]
operator_switch = True
break
if operator_switch:
final_elements = [ontology, term, operator, value]
operator_switch = False
else:
final_elements = [ontology, filter_elements[1]]
list_filters.append(final_elements)
return list_filters
async def prepare_filter_parameter(db_pool, filters_request):
"""Parse the filters parameters given in the query to create the string that needs to be passed
to the SQL query.
e.g. '(technology)::jsonb ?& array[''Illumina Genome Analyzer II'', ''Illumina HiSeq 2000''] AND
(other)::jsonb ?& array[''example1'', ''example2'']
"""
# First we want to parse the filters request
if isinstance(filters_request, list):
list_filters = parse_filters_request(filters_request)
else:
list_filters = parse_filters_request(ast.literal_eval(filters_request))
combinations_list = "','".join([":".join([filter_elements[0],filter_elements[1]]) for filter_elements in list_filters])
combinations_list = "'" + combinations_list + "'"
# Then we connect to the DB and retrieve the parameters that will be passed to the main query
async with db_pool.acquire(timeout=180) as connection:
try:
query = f"""SELECT target_table, column_name, column_value
FROM ontology_term_column_correspondance
WHERE concat_ws(':', ontology, term) IN ({combinations_list})"""
LOG.debug(f"QUERY filters info: {query}")
statement = await connection.prepare(query)
db_response = await statement.fetch()
filter_dict = {}
for record in list(db_response):
if record["target_table"] not in filter_dict.keys():
filter_dict[record["target_table"]] = {}
filter_dict[record["target_table"]][record["column_name"]] = []
filter_dict[record["target_table"]][record["column_name"]].append(record["column_value"])
elif record["column_name"] not in filter_dict[record["target_table"]].keys():
filter_dict[record["target_table"]][record["column_name"]] = []
filter_dict[record["target_table"]][record["column_name"]].append(record["column_value"])
else:
filter_dict[record["target_table"]][record["column_name"]].append(record["column_value"])
# After we have retrieved the values in a dict with the target_table as keys and as value another dict with column_name as keys, we need to create the final string
strings_list = []
final_string = ""
for target_table, column_name_dict in filter_dict.items():
if target_table == "public.beacon_dataset_table":
for column_name, values in column_name_dict.items():
string_values = ", ".join("'" + str(value) + "'" for value in values)
string = f'({column_name})::jsonb ?& array[{string_values}]'
strings_list.append(string)
# Once we have the response, we parse it to create the final string needed as input
if not strings_list:
final_string = 'null'
else:
final_string = " AND ".join(strings_list)
return str(final_string), filter_dict
except Exception as e:
raise BeaconServerError(f'Query filters DB error: {e}')
# ----------------------------------------------------------------------------------------------------------------------
# ACCESS RELATED FUNCTIONS AND DICT
# ----------------------------------------------------------------------------------------------------------------------
def access_resolution(request, token, host, public_data, registered_data, controlled_data):
"""Determine the access level for a user.
Depends on user bona_fide_status, and by default it should be PUBLIC.
"""
permissions = []
# all should have access to PUBLIC datasets
# unless the request is for specific datasets
if public_data:
permissions.append("PUBLIC")
access = set(public_data) # empty if no datasets are given
# for now we are expecting that the permissions are a list of datasets
if registered_data and token["bona_fide_status"] is True:
permissions.append("REGISTERED")
access = access.union(set(registered_data))
# if user requests public datasets do not throw an error
# if both registered and controlled datasets are request this will be shown first
elif registered_data and not public_data:
if token["authenticated"] is False:
# token is not provided (user not authed)
raise BeaconUnauthorised(request, host, "missing_token", 'Unauthorized access to dataset(s), missing token.')
# token is present, but is missing perms (user authed but no access)
raise BeaconForbidden(request, host, 'Access to dataset(s) is forbidden.')
if controlled_data and 'permissions' in token and token['permissions']:
# The idea is to return only accessible datasets
# Default event, when user doesn't specify dataset ids
# Contains only dataset ids from token that are present at beacon
controlled_access = set(controlled_data).intersection(set(token['permissions']))
access = access.union(controlled_access)
if controlled_access:
permissions.append("CONTROLLED")
# if user requests public datasets do not throw an error
# By default permissions cannot be None, at worst empty set, thus this might never be reached
elif controlled_data and not (public_data or registered_data):
if token["authenticated"] is False:
# token is not provided (user not authed)
raise BeaconUnauthorised(request, host, "missing_token", 'Unauthorized access to dataset(s), missing token.')
# token is present, but is missing perms (user authed but no access)
raise BeaconForbidden(request, host, 'Access to dataset(s) is forbidden.')
LOG.info(f"Accesible datasets are: {list(access)}.")
return permissions, list(access)
async def fetch_datasets_access(db_pool, datasets):
"""Retrieve 3 list of the available datasets depending on the access type"""
LOG.info('Retrieving info about the available datasets (id and access type).')
public = []
registered = []
controlled = []
async with db_pool.acquire(timeout=180) as connection:
async with connection.transaction():
datasets_query = None if datasets == "null" or not datasets else ast.literal_eval(datasets)
try:
query = f"""SELECT access_type, id, stable_id FROM {DB_SCHEMA}.beacon_dataset
WHERE coalesce(stable_id = any($1), true);
"""
LOG.debug(f"QUERY datasets access: {query}")
statement = await connection.prepare(query)
db_response = await statement.fetch(datasets_query)
for record in list(db_response):
if record['access_type'] == 'PUBLIC':
public.append(record['id'])
if record['access_type'] == 'REGISTERED':
registered.append(record['id'])
if record['access_type'] == 'CONTROLLED':
controlled.append(record['id'])
return public, registered, controlled
except Exception as e:
raise BeaconServerError(f'Query available datasets DB error: {e}')
# ----------------------------------------------------------------------------------------------------------------------
# FILTER RESPONSE BASED ON ACCESS LEVELS
# ----------------------------------------------------------------------------------------------------------------------
def filter_response(response, access_levels_dict, accessible_datasets, user_levels, field2access, parent_key=None):
"""
Recursive function that parses the response of the beacon to filter out those fields that are
not accessible for the user (based on the access level).
:response: beacon response
:access_levels_dict: access levels dictionary created out of the yml file in /utils
:accessible_datasets: list of datasets accessible by the user (taking into account its privileges)
:user_levels: list of levels that the user has, i.e ['PUBLIC', 'REGISTERED']
:field2access: dictionary that maps the child_field name to its corresponding parent_field name in the access levels dict (i.e 'datasets' inside the parent 'beacon' maps to its parent name 'beaconDataset')
:parent_key: used inside de recursion to store the parent key of the dict we are in
"""
final_dict = {}
if isinstance(response, dict):
for key, val in response.items():
translated_key = field2access[key] if key in field2access.keys() else key
specific_access_levels_dict = access_levels_dict[parent_key] if parent_key else access_levels_dict
if translated_key not in access_levels_dict.keys() and translated_key not in specific_access_levels_dict.keys():
final_dict[key] = val
else:
# if (isinstance(val, dict) or isinstance(val, list)) and key != "info":
if (isinstance(val, dict) or isinstance(val, list)) and translated_key in access_levels_dict.keys():
parent_permission = True
self_permission = True if access_levels_dict[translated_key]["accessLevelSummary"] in user_levels else False
if parent_key:
parent_permission = True if access_levels_dict[parent_key][key] in user_levels else False
if self_permission and parent_permission:
final_dict[key] = filter_response(val, access_levels_dict, accessible_datasets, user_levels, field2access, translated_key)
else:
valid_level = access_levels_dict[parent_key][translated_key] if parent_key else access_levels_dict[translated_key]
if valid_level in user_levels:
final_dict[key] = val
elif isinstance(response, list):
filtered = []
for element in response:
if isinstance(element, dict):
datasetId = element.get("internalId")
if not datasetId or datasetId in accessible_datasets: # controlling specific access permission to show a dataset response
filtered.append(filter_response(element, access_levels_dict, accessible_datasets, user_levels, field2access, parent_key))
return filtered
return final_dict
# ----------------------------------------------------------------------------------------------------------------------
# VARIANT HANDOVER and extra ANNOTATION
# ----------------------------------------------------------------------------------------------------------------------
def snp_resultsHandover(variantId):
"""Create the resultsHanover dict by inserting the variantId into the template."""
resultsHandover = [ {
"handoverType" : {
"id" : "data:1106",
"label" : "dbSNP ID"
},
"note" : "Link to dbSNP database",
"url" : f"https://www.ncbi.nlm.nih.gov/snp/?term={variantId}"
}, {
"handoverType" : {
"id" : "data:1106",
"label" : "dbSNP ID"
},
"note" : "Link to dbSNP API",
"url" : f"https://api.ncbi.nlm.nih.gov/variation/v0/beta/refsnp/{variantId[2:]}"
} ]
return resultsHandover
async def fetch_variantAnnotations(variant_details):
"""
Create the a part of the variantsAnnotation response by fetching the cellBase API and the dbSNP API.
The variant_id has to be in the following format: chrom:start:ref:alt.
If in the variantDetails the alt is null, it has to be changed to a '-'.
"""
# cellBase
chrom = variant_details.get("chromosome") if variant_details.get("chromosome") else variant_details.get("referenceName")
start = variant_details.get("start")
ref = variant_details.get("referenceBases")
alt = variant_details.get("alternateBases") if variant_details.get("alternateBases") else '-'
variant_id = ":".join([str(chrom), str(start + 1), ref, alt])
url = f"http://cellbase.clinbioinfosspa.es/cb/webservices/rest/v4/hsapiens/genomic/variant/{variant_id}/annotation"
r = requests.get(url)
cellBase_dict = r.json() if r else ''
try:
cellBase_rsID = cellBase_dict["response"][0]["result"][0]["id"]
except:
cellBase_rsID = None
# dbSNP
rsID = variant_details.get("variantId") if (variant_details.get("variantId") and variant_details.get("variantId") != ".") else cellBase_rsID
if rsID:
url = f"https://api.ncbi.nlm.nih.gov/variation/v0/beta/refsnp/{rsID[2:]}"
r = requests.get(url)
dnSNP_dict = r.json() if r else ''
else:
dnSNP_dict = ''
return rsID, cellBase_dict, dnSNP_dict
| 49.025 | 209 | 0.570854 | 1,914 | 17,649 | 5.120167 | 0.202194 | 0.022041 | 0.02449 | 0.015714 | 0.271531 | 0.215 | 0.196939 | 0.190408 | 0.161633 | 0.144286 | 0 | 0.003547 | 0.265227 | 17,649 | 359 | 210 | 49.16156 | 0.752159 | 0.27622 | 0 | 0.195455 | 0 | 0.013636 | 0.172437 | 0.012764 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036364 | false | 0 | 0.036364 | 0 | 0.145455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c94bd3786b5a0a64f466b5fdfd28ee49d03d138 | 990 | py | Python | 02_Double/double_aws-samples_1_v1.py | Machine-Learning-Labs/DeepRacerRewardFunctionsCollection | f6addf4654de90f9d1669fd5de67331add93ab2f | [
"MIT"
] | 17 | 2020-01-14T06:25:10.000Z | 2022-01-25T18:02:37.000Z | 02_Double/double_aws-samples_1_v1.py | Machine-Learning-Labs/DeepRacerRewardFunctionsCollection | f6addf4654de90f9d1669fd5de67331add93ab2f | [
"MIT"
] | null | null | null | 02_Double/double_aws-samples_1_v1.py | Machine-Learning-Labs/DeepRacerRewardFunctionsCollection | f6addf4654de90f9d1669fd5de67331add93ab2f | [
"MIT"
] | 5 | 2020-05-30T18:49:18.000Z | 2021-09-03T19:38:39.000Z | '''
@author: AWS Samples
@Link: https://docs.aws.amazon.com/deepracer/latest/developerguide/what-is-deepracer.html
@License: Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
'''
def reward_function(params):
'''
Example of rewarding the agent to follow the track center line
'''
# Read input parameters
track_width = params['track_width']
distance_from_center = abs(params['distance_from_center'])
# Calculate 3 marks that are farther and father away from the center line
marker_1 = 0.1 * track_width
marker_2 = 0.25 * track_width
marker_3 = 0.5 * track_width
# Give higher reward if the car is closer to center line and vice versa
if distance_from_center <= marker_1:
reward = 1
elif distance_from_center <= marker_2:
reward = 0.5
elif distance_from_center <= marker_3:
reward = 0.1
else:
reward = 1e-3 # likely crashed/close to off-track
return reward
| 30.9375 | 93 | 0.680808 | 142 | 990 | 4.591549 | 0.528169 | 0.076687 | 0.138037 | 0.110429 | 0.08589 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033025 | 0.235354 | 990 | 31 | 94 | 31.935484 | 0.828269 | 0.458586 | 0 | 0 | 0 | 0 | 0.062249 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c95e40e7220fe3c7db4c0c8a471b2078f613932 | 14,498 | py | Python | parsons/github/github.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | 3 | 2019-09-05T16:57:15.000Z | 2019-10-01T19:56:58.000Z | parsons/github/github.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | 22 | 2019-09-03T13:23:37.000Z | 2019-10-03T20:32:48.000Z | parsons/github/github.py | cmc333333/parsons | 50804a3627117797570f1e9233c9bbad583f7831 | [
"Apache-2.0"
] | 2 | 2019-09-01T18:30:10.000Z | 2019-10-03T20:07:46.000Z | import logging
from functools import partial, wraps
import petl
import requests
from github import Github as PyGithub
from github.GithubException import UnknownObjectException
from parsons.etl.table import Table
from parsons.utilities import check_env, files
logger = logging.getLogger(__name__)
def _wrap_method(decorator, method):
def _wrapper(self, *args, **kwargs):
bound_method = partial(method.__get__(self, type(self)))
return decorator(bound_method)(*args, **kwargs)
return _wrapper
def decorate_methods(decorator):
# Based on Django's django.utils.decorators.method_decorator
def decorate(cls):
for method in dir(cls):
# Don't decorate dunder methods
if method.startswith("__"):
continue
cls_method = getattr(cls, method)
if callable(cls_method):
setattr(cls, method, _wrap_method(decorator, cls_method))
return cls
return decorate
def wrap_github_404(func):
@wraps(func)
def _wrapped_func(*args, **kwargs):
try:
return (func)(*args, **kwargs)
except UnknownObjectException:
raise ParsonsGitHubError(
"Couldn't find the object you referenced, maybe you need to log in?"
)
return _wrapped_func
class ParsonsGitHubError(Exception):
pass
@decorate_methods(wrap_github_404)
class GitHub(object):
"""Creates a GitHub class for accessing the GitHub API.
Uses ``parsons.utilities.check_env`` to load credentials from environment variables if not
supplied. Supports either a username and password or an access token for authentication. The
client also supports unauthenticated access.
Args:
username: Optional[str]
Username of account to use for credentials. Can be set with ``GITHUB_USERNAME``
environment variable.
password: Optional[str]
Password of account to use for credentials. Can be set with ``GITHUB_PASSWORD``
environment variable.
access_token: Optional[str]
Access token to use for credentials. Can be set with ``GITHUB_ACCESS_TOKEN`` environment
variable.
"""
def __init__(self, username=None, password=None, access_token=None):
self.username = check_env.check('GITHUB_USERNAME', username, optional=True)
self.password = check_env.check('GITHUB_PASSWORD', password, optional=True)
self.access_token = check_env.check('GITHUB_ACCESS_TOKEN', access_token, optional=True)
if self.username and self.password:
self.client = PyGithub(self.username, self.password)
elif self.access_token:
self.client = PyGithub(self.access_token)
else:
self.client = PyGithub()
def _as_table(self, paginated_list, page=None, page_size=100):
"""Converts a paginated list into a Parsons ``Table``. Uses the ``_rawData`` property of
each item instead of calling ``raw_data`` to avoid making a separate request for each item
in a page for types that PyGithub doesn't consider complete.
Args:
paginated_list: ``pygithub.PaginatedList.PaginatedList``
PyGithub paginated list
page: Optional[int]
Page number to load. Defaults to None. If not specified, all results are returned.
page_size: int
Page size. Defaults to 100. Ignored if ``page`` is not set.
Returns:
``Table``
Table object created from the raw data of the list
"""
if page is not None:
page_start = (page - 1) * page_size
page_end = page_start + page_size
list_pages = paginated_list[page_start:page_end]
else:
list_pages = paginated_list
return Table([list_item._rawData for list_item in list_pages])
def get_user(self, username):
"""Loads a GitHub user by username
Args:
username: str
Username of user to load
Returns:
dict
User information
"""
return self.client.get_user(username).raw_data
def get_organization(self, organization_name):
"""Loads a GitHub organization by name
Args:
organization_name: str
Name of organization to load
Returns:
dict
Organization information
"""
return self.client.get_organization(organization_name).raw_data
def get_repo(self, repo_name):
"""Loads a GitHub repo by name
Args:
repo_name: str
Full repo name (account/name)
Returns:
dict
Repo information
"""
return self.client.get_repo(repo_name).raw_data
def list_user_repos(self, username, page=None, page_size=100):
"""List user repos with pagination, returning a ``Table``
Args:
username: str
GitHub username
page: Optional[int]
Page number. All results are returned if not set.
page_size: int
Page size. Defaults to 100.
Returns:
``Table``
Table with page of user repos
"""
logger.info(f'Listing page {page} of repos for user {username}')
return self._as_table(
self.client.get_user(username).get_repos(), page=page, page_size=page_size
)
def list_organization_repos(self, organization_name, page=None, page_size=100):
"""List organization repos with pagination, returning a ``Table``
Args:
organization_name: str
GitHub organization name
page: Optional[int]
Page number. All results are returned if not set.
page_size: int
Page size. Defaults to 100.
Returns:
``Table``
Table with page of organization repos
"""
logger.info(f'Listing page {page} of repos for organization {organization_name}')
return self._as_table(
self.client.get_organization(organization_name).get_repos(),
page=page,
page_size=page_size,
)
def get_issue(self, repo_name, issue_number):
"""Loads a GitHub issue
Args:
repo_name: str
Full repo name (account/name)
issue_number: int
Number of issue to load
Returns:
dict
Issue information
"""
return self.client.get_repo(repo_name).get_issue(number=issue_number).raw_data
def list_repo_issues(self, repo_name, state="open", assignee=None, creator=None, mentioned=None,
labels=[], sort="created", direction="desc", since=None, page=None,
page_size=100):
"""List issues for a given repo
Args:
repo_name: str
Full repo name (account/name)
state: str
State of issues to return. One of "open", "closed", "all". Defaults to "open".
assignee: Optional[str]
Name of assigned user, "none", or "*".
creator: Optional[str]
Name of user that created the issue.
mentioned: Optional[str]
Name of user mentioned in the issue.
labels: list[str]
List of label names. Defaults to []
sort: str
What to sort results by. One of "created", "updated", "comments". Defaults to
"created".
direction: str
Direction to sort. One of "asc", "desc". Defaults to "desc".
since: Optional[Union[datetime.datetime, datetime.date]]
Timestamp to pull issues since. Defaults to None.
page: Optional[int]
Page number. All results are returned if not set.
page_size: int
Page size. Defaults to 100.
Returns:
``Table``
Table with page of repo issues
"""
logger.info(f'Listing page {page} of issues for repo {repo_name}')
kwargs_dict = {"state": state, "sort": sort, "direction": direction}
if assignee:
kwargs_dict["assignee"] = assignee
if creator:
kwargs_dict["creator"] = creator
if mentioned:
kwargs_dict["mentioned"] = mentioned
if len(labels) > 0:
kwargs_dict["labels"] = ",".join(labels)
if since:
kwargs_dict["since"] = f'{since.isoformat()[:19]}Z'
return self._as_table(
self.client.get_repo(repo_name).get_issues(**kwargs_dict),
page=page,
page_size=page_size,
)
def get_pull_request(self, repo_name, pull_request_number):
"""Loads a GitHub pull request
Args:
repo_name: str
Full repo name (account/name)
pull_request_number: int
Pull request number
Returns:
dict
Pull request information
"""
return self.client.get_repo(repo_name).get_pull(pull_request_number).raw_data
def list_repo_pull_requests(self, repo_name, state="open", base=None, sort="created",
direction="desc", page=None, page_size=100):
"""Lists pull requests for a given repo
Args:
repo_name: str
Full repo name (account/name)
state: str
One of "open, "closed", "all". Defaults to "open".
base: Optional[str]
Base branch to filter pull requests by.
sort: str
How to sort pull requests. One of "created", "updated", "popularity". Defaults to
"created".
direction: str
Direction to sort by. Defaults to "desc".
page: Optional[int]
Page number. All results are returned if not set.
page_size: int
Page size. Defaults to 100.
Returns:
``Table``
Table with page of repo pull requests
"""
logger.info(f'Listing page {page} of pull requests for repo {repo_name}')
kwargs_dict = {"state": state, "sort": sort, "direction": direction}
if base:
kwargs_dict["base"] = base
self._as_table(
self.client.get_repo(repo_name).get_pulls(**kwargs_dict), page=page, page_size=page_size
)
def list_repo_contributors(self, repo_name, page=None, page_size=100):
"""Lists contributors for a given repo
Args:
repo_name: str
Full repo name (account/name)
page: Optional[int]
Page number. All results are returned if not set.
page_size: int
Page size. Defaults to 100.
Returns:
``Table``
Table with page of repo contributors
"""
logger.info(f'Listing page {page} of contributors for repo {repo_name}')
return self._as_table(
self.client.get_repo(repo_name).get_contributors(), page=page, page_size=page_size
)
def download_file(self, repo_name, path, branch=None, local_path=None):
"""Download a file from a repo by path and branch. Defaults to the repo's default branch if
branch is not supplied.
Uses the download_url directly rather than the API because the API only supports contents up
to 1MB from a repo directly, and the process for downloading larger files through the API is
much more involved.
Because download_url does not go through the API, it does not support username / password
authentication, and requires a token to authenticate.
Args:
repo_name: str
Full repo name (account/name)
path: str
Path from the repo base directory
branch: Optional[str]
Branch to download file from. Defaults to repo default branch
local_path: Optional[str]
Local file path to download file to. Will create a temp file if not supplied.
Returns:
str
File path of downloaded file
"""
if not local_path:
local_path = files.create_temp_file_for_path(path)
repo = self.client.get_repo(repo_name)
if branch is None:
branch = repo.default_branch
logger.info(f'Downloading {path} from {repo_name}, branch {branch} to {local_path}')
headers = None
if self.access_token:
headers = {
'Authorization': f'token {self.access_token}',
}
res = requests.get(f'https://raw.githubusercontent.com/{repo_name}/{branch}/{path}',
headers=headers)
if res.status_code == 404:
raise UnknownObjectException(status=404, data=res.content)
elif res.status_code != 200:
raise ParsonsGitHubError(
f'Error downloading {path} from repo {repo_name}: {res.content}')
with open(local_path, 'wb') as f:
f.write(res.content)
logger.info(f'Downloaded {path} to {local_path}')
return local_path
def download_table(self, repo_name, path, branch=None, local_path=None, delimiter=','):
"""Download a CSV file from a repo by path and branch as a Parsons Table.
Args:
repo_name: str
Full repo name (account/name)
path: str
Path from the repo base directory
branch: Optional[str]
Branch to download file from. Defaults to repo default branch
local_path: Optional[str]
Local file path to download file to. Will create a temp file if not supplied.
delimiter: Optional[str]
The CSV delimiter to use to parse the data. Defaults to ','
Returns:
Parsons Table
See :ref:`parsons-table` for output options.
"""
downloaded_file = self.download_file(repo_name, path, branch, local_path)
return Table(petl.fromcsv(downloaded_file, delimiter=delimiter))
| 34.193396 | 100 | 0.588702 | 1,707 | 14,498 | 4.864675 | 0.154657 | 0.036609 | 0.017221 | 0.014451 | 0.373675 | 0.349711 | 0.317558 | 0.291426 | 0.251204 | 0.195448 | 0 | 0.005792 | 0.333149 | 14,498 | 423 | 101 | 34.274232 | 0.853124 | 0.433715 | 0 | 0.101449 | 0 | 0 | 0.117053 | 0.003714 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144928 | false | 0.036232 | 0.057971 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c9cfa04dec4051e0276862ca37caf6daae18af1 | 3,716 | py | Python | thirdparty/gd2c/gd2c/loader.py | ppiecuch/godot | ff2098b324b814a0d1bd9d5722aa871fc5214fab | [
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null | thirdparty/gd2c/gd2c/loader.py | ppiecuch/godot | ff2098b324b814a0d1bd9d5722aa871fc5214fab | [
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null | thirdparty/gd2c/gd2c/loader.py | ppiecuch/godot | ff2098b324b814a0d1bd9d5722aa871fc5214fab | [
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | null | null | null | from __future__ import annotations
from pathlib import Path
from gd2c.gdscriptclass import GDScriptClass, GDScriptClassConstant, GDScriptFunctionConstant, GDScriptFunction, GDScriptGlobal, GDScriptMember, GDScriptFunctionParameter
from gd2c.variant import VariantType
from gd2c.bytecode import extract
from typing import List, Iterable, TYPE_CHECKING
import json
def to_camel_case(snake_str: str, capitalize_first: bool = True):
components = snake_str.split('_')
# We capitalize the first letter of each component
if capitalize_first:
return ''.join(x.title() for x in components[0:])
else:
return components[0] + ''.join(x.title() for x in components[1:])
if TYPE_CHECKING:
from gd2c.project import Project
class JsonGDScriptLoader:
def __init__(self, project: Project):
self._project = project
def load_classes(self, physical_path: Path) -> Iterable[GDScriptClass]:
with physical_path.open() as f:
data = json.load(f)
yield self._build_class(physical_path, data)
def _build_class(self, physical_path: Path, data) -> GDScriptClass:
cls = GDScriptClass(
self._project.to_resource_path(str(physical_path)),
data.get("name", None) or self._project.generate_unique_class_name(to_camel_case(physical_path.with_suffix('').stem)),
self._project.generate_unique_class_type_id())
cls.base_resource_path = data["base_type"]
cls.built_in_type = data["type"]
for index, entry in enumerate(data["global_constants"]):
glob = GDScriptGlobal(index, entry["name"], entry["original_name"], entry["type_code"], entry["kind_code"], entry["value"], entry["source"])
cls.globals[glob.index] = glob
for signal in data["signals"]:
cls.add_signal(signal)
for entry in data["members"]:
member = GDScriptMember(entry["name"], int(entry["index"]), entry["type"])
cls.add_member(member)
for index, entry in enumerate(data["constants"]):
cconst = GDScriptClassConstant(entry["name"], int(entry["type"]), bytes(list(entry["data"])), entry["declaration"])
cls.add_constant(cconst)
for index, entry in enumerate(data["methods"]):
func = GDScriptFunction(entry["name"], GDScriptFunction.TYPE_METHOD)
func.stack_size = int(entry["stack_size"])
func.default_arguments_jump_table = list(map(lambda x: int(x), entry["default_arguments"]))
func.return_vtype = VariantType.get(int(entry["return_type"]["type"]))
func.global_names = entry["global_names"]
num_parameters = len(entry["parameters"])
len_jump_table = len(func.default_arguments_jump_table)
for pindex, pentry in enumerate(entry["parameters"]):
param = GDScriptFunctionParameter(
pentry["name"],
VariantType.get(pentry["type"]),
pindex)
param.is_optional = pindex >= num_parameters - len_jump_table
func.add_parameter(param)
for centry in entry["constants"]:
mconst = GDScriptFunctionConstant(
int(centry["index"]),
centry["type"],
bytes(list(map(lambda x: int(x), centry["data"]))),
centry["declaration"])
func.add_constant(mconst)
ip = 0
while ip < len(entry["bytecode"]):
op = extract(func, entry["bytecode"], ip)
func.add_op(ip, op)
ip += op.stride
cls.add_function(func)
return cls
| 43.209302 | 170 | 0.626749 | 417 | 3,716 | 5.390887 | 0.314149 | 0.032028 | 0.017349 | 0.020018 | 0.129004 | 0.076512 | 0.023132 | 0 | 0 | 0 | 0 | 0.002901 | 0.257804 | 3,716 | 85 | 171 | 43.717647 | 0.812183 | 0.012917 | 0 | 0 | 0 | 0 | 0.07665 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057971 | false | 0 | 0.115942 | 0 | 0.231884 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c9e2ce825149494c4555d540152aae13598ea62 | 1,301 | py | Python | deepppl/tests/target_py/kmeans.py | sfantao/deepppl | 3091c342814744d622eda6f7a185085d420a152b | [
"Apache-2.0"
] | 18 | 2018-06-07T17:41:30.000Z | 2021-03-19T23:31:14.000Z | deepppl/tests/target_py/kmeans.py | sfantao/deepppl | 3091c342814744d622eda6f7a185085d420a152b | [
"Apache-2.0"
] | 19 | 2018-06-11T17:42:03.000Z | 2020-10-13T17:19:11.000Z | deepppl/tests/target_py/kmeans.py | sfantao/deepppl | 3091c342814744d622eda6f7a185085d420a152b | [
"Apache-2.0"
] | 7 | 2018-06-08T20:50:24.000Z | 2020-10-12T22:00:09.000Z | import torch
from torch import tensor, rand
import pyro
import torch.distributions.constraints as constraints
import pyro.distributions as dist
def transformed_data(D=None, K=None, N=None, y=None):
neg_log_K = -log(K)
return {'neg_log_K': neg_log_K}
def model(D=None, K=None, N=None, y=None, transformed_data=None):
neg_log_K = transformed_data['neg_log_K']
mu = sample('mu', ImproperUniform(shape=(K, D)))
soft_z = zeros((N, K))
for n in range(1, N + 1):
for k in range(1, K + 1):
soft_z[n - 1, k - 1] = neg_log_K - 0.5 * dot_self(mu[k - 1] - y
[n - 1])
for k in range(1, K + 1):
sample('mu' + '__{}'.format(k - 1) + '__1', dist.Normal(zeros(D), 1
), obs=mu[k - 1])
for n in range(1, N + 1):
sample('expr' + '__{}'.format(n) + '__2', dist.Exponential(1.0),
obs=-log_sum_exp(soft_z[n - 1]))
def generated_quantities(D=None, K=None, N=None, y=None, transformed_data=
None, parameters=None):
neg_log_K = transformed_data['neg_log_K']
mu = parameters['mu']
soft_z = zeros((N, K))
for n in range(1, N + 1):
for k in range(1, K + 1):
soft_z[n - 1, k - 1] = neg_log_K - 0.5 * dot_self(mu[k - 1] - y
[n - 1])
return {'soft_z': soft_z} | 34.236842 | 75 | 0.571868 | 225 | 1,301 | 3.115556 | 0.213333 | 0.057061 | 0.089872 | 0.042796 | 0.479315 | 0.479315 | 0.479315 | 0.459344 | 0.430813 | 0.410842 | 0 | 0.033368 | 0.262875 | 1,301 | 38 | 76 | 34.236842 | 0.697602 | 0 | 0 | 0.4375 | 0 | 0 | 0.043779 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.15625 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c9ffef9867b37cc6ff8be2016f65cefeb7bd057 | 5,885 | py | Python | Siberia/plug/plugin_adminka.py | spouk/Siberia | 0b7dfe2271b285eb038a09ef954ed8c605cbd2d2 | [
"MIT"
] | null | null | null | Siberia/plug/plugin_adminka.py | spouk/Siberia | 0b7dfe2271b285eb038a09ef954ed8c605cbd2d2 | [
"MIT"
] | null | null | null | Siberia/plug/plugin_adminka.py | spouk/Siberia | 0b7dfe2271b285eb038a09ef954ed8c605cbd2d2 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
__author__ = 'spouk'
__all__ = ('AdminkaPlugin',)
__version__ = 0.1
__name__ = 'AdminkaPLugin for Siberia'
__middleware__ = True
#---------------------------------------------------------------------------
# global imports
#---------------------------------------------------------------------------
from jinja2 import Environment, FileSystemLoader, TemplateError
from ..plugins import SiberiaPlugin
from ..data import ProxyStack
from aiohttp import web, request
from asyncio import coroutine
from sqlalchemy import func
import os
#---------------------------------------------------------------------------
# implement jinja2 plugin for Siberia
#---------------------------------------------------------------------------
class AdminkaPlugin(SiberiaPlugin):
# stack assert messages
assert_msg = ProxyStack(
route = " не найден путь для добавления роутера для админки",
middle = " не найден стак в основном приложении для middlewares",
jinja2 = " не найден плагин Jinja2Plugin, установите его, он нужен для работы админки",
)
# plugin definitions variables
plugin_stack = ProxyStack(
api = 0.1,
name = "Adminka",
version = 0.1,
middleware = True,
)
# config for session
config = ProxyStack(
adminroute = '/adminka',
template_path = 'adminka/',
static_path = 'static/',
mark = '[ADMINKAPLUGIN] {}',
)
def __init__(self, app, template_path=None, routeadminka=None):
self.app = app
self.routeadminka = routeadminka or self.config.adminroute
self.template_adminka = os.path.join(os.path.dirname(os.path.realpath(__file__)), self.config.template_path)
# adminka handlers containers
self.hand = AdminkaHandlers(app=self.app, adminka=self)
# result functions some
self.online = None
def assignrouteadmin(self):
# assert self.routeadminka, (self.config.mark.format(self.config.assert_msg.get('route')))
# adding template
self.app.fn.get(self.routeadminka, name="adminka", handler=self.hand.adminkalogin)
# adding static paths
self.staticpath = os.path.join(self.template_adminka, self.config.static_path)
print("Adding static path: ", self.staticpath)
self.app.router.add_static(prefix='/adminka/static/css',
path=self.staticpath + '/css',
name='css_admin')
self.app.router.add_static(prefix='/adminka/static/font',
path=self.staticpath + '/font',
name='font_admin')
self.app.router.add_static(prefix='/adminka/static/img',
path=self.staticpath + '/img',
name='img_admin')
self.app.router.add_static(prefix='/adminka/static/js',
path=self.staticpath + '/js',
name='js_admin')
print("Added static adminka path")
# self.app.fn.maproute()
def setup(self):
# проверка нужных для плагина переменных и плагинов в основном приложении
# для работы адимнки требуется jinja плагин, ибо все там адаптировано под испоьзование в качестве рендера `нинзю`
assert hasattr(self.app, 'middlewares'), (self.config.mark.format(self.config.assert_msg.get('middle')))
assert 'jinja2' in self.app.plugins, (self.config.mark.format(self.config.assert_msg.get('jinja2')))
# проверки прошли успешно, добавляем в миддлы
if hasattr(self.app, 'middlewares'):
self.app.middlewares.append(self.adminka_middleware(app=self.app))
# устанавливаю директорию темплейтов для админки к лодеру jinja
jinja2 = self.app.plugins.get('jinja2')
jinja2.addtemplate(self.template_adminka)
print("------NEW JINJA TEMPLATES: ", jinja2.config.template_path)
# добавляю роутер
print(self.config.mark.format(" добавляю роутер админки"))
self.assignrouteadmin()
print(self.config.mark.format(" добавляю роутер админки, добавил вроде"))
def adminka_middleware(self, app):
@coroutine
def adminka(app, handler):
@coroutine
def middleware(request):
if request.path == self.routeadminka:
url = request.app.router['adminka'].url()
print("ADMINKA PATH FOUND:", url)
# return web.HTTPFound(url)
# get users online
r = yield from self.hand.users_online(request)
self.online = r
response = yield from handler(request)
return response
return middleware
return adminka
class AdminkaHandlers(ProxyStack):
def __init__(self, app, adminka):
self.adminka = adminka # self adminka
self.app = app
self.db = self.app.db
self.render = self.app.render
# /`adminkaroute`
async def adminkalogin(self, request):
# return web.Response(body='/usr/home/spouk/PycharmProjects/Siberia/plug/adminka/adminka.html'.encode())
return await self.app.render('adminindex.html', online=self.adminka.online)
@coroutine
def url_user_online(self, request):
res = yield from self.app.render('usersonline.html', online=self.adminka.online)
return res
# users online
@coroutine
def users_online(self, request):
with (yield from self.db) as conn:
dbs = self.db.cook
# q = dbs.select().where([func.count(dbs.c.status)])
q = dbs.select().where(dbs.c.status == 1)
resp = yield from conn.execute(q)
found = yield from resp.fetchall()
return found
| 38.464052 | 121 | 0.584367 | 616 | 5,885 | 5.48539 | 0.310065 | 0.045576 | 0.020716 | 0.029595 | 0.149453 | 0.11749 | 0.11749 | 0.11749 | 0.07813 | 0 | 0 | 0.003935 | 0.26593 | 5,885 | 152 | 122 | 38.717105 | 0.778241 | 0.202209 | 0 | 0.061224 | 0 | 0 | 0.137887 | 0 | 0 | 0 | 0 | 0 | 0.030612 | 1 | 0.091837 | false | 0 | 0.071429 | 0 | 0.27551 | 0.061224 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ca1f66514af862a2395c2a72cc563dfe7628eb9 | 2,576 | py | Python | services/jobs/tests/api/base.py | Open-EO/openeo-openshift-driver | 822dbd3ccee25180cc48efd2f891504b6b5edc14 | [
"Apache-2.0"
] | 6 | 2018-10-02T15:04:11.000Z | 2019-12-13T11:36:49.000Z | services/jobs/tests/api/base.py | Open-EO/openeo-openshift-driver | 822dbd3ccee25180cc48efd2f891504b6b5edc14 | [
"Apache-2.0"
] | 54 | 2019-01-09T17:14:29.000Z | 2020-10-02T11:39:38.000Z | services/jobs/tests/api/base.py | Open-EO/openeo-openshift-driver | 822dbd3ccee25180cc48efd2f891504b6b5edc14 | [
"Apache-2.0"
] | 6 | 2018-07-03T15:58:05.000Z | 2019-07-03T07:20:46.000Z | """Provide base test class."""
from typing import Any, Callable
from nameko_sqlalchemy.database_session import Session
from jobs.dependencies.dag_handler import DagHandler
from jobs.service import JobService
from .exceptions import get_missing_resource_service_exception, get_not_authorized_service_exception
from ..utils import add_job, get_configured_job_service, get_random_job_id, get_random_user
class BaseCase:
"""Base test class to be inherited from by other test classes."""
dag_handler = DagHandler()
def get_method(self, service: JobService, method: str) -> Callable:
"""Return service method corresponding to a given string."""
mapper = {
"get": service.get,
"modify": service.modify,
"delete": service.delete,
"process": service.process,
"get_results": service.get_results,
}
if method not in mapper:
raise NotImplementedError(f"The method {method} is currently not supported")
return mapper[method]
def test_not_existing_job(self, db_session: Session, method: str, **kwargs: Any) -> None:
"""Check trying to access a non existing job throws the expected error.
Args:
db_session: Database session.
method: Which method to call with a non-existing job identifier.
kwargs: Additional keyword arguments which need to be supplied to the given method.
"""
job_service = get_configured_job_service(db_session)
user = get_random_user()
job_id = get_random_job_id()
result = self.get_method(job_service, method)(user=user, job_id=job_id, **kwargs)
assert result == get_missing_resource_service_exception(user_id=user["id"], job_id=job_id)
def test_not_authorized_for_job(self, db_session: Session, method: str, **kwargs: Any) -> None:
"""Check trying to access a job of another user throws the expected error.
Args:
db_session: Database session.
method: Which method to call as not authorized user.
kwargs: Additional keyword arguments which need to be supplied to the given method.
"""
job_service = get_configured_job_service(db_session)
user = get_random_user()
job_id = add_job(job_service, user=user)
other_user = get_random_user()
result = self.get_method(job_service, method)(user=other_user, job_id=job_id, **kwargs)
assert result == get_not_authorized_service_exception(user_id=other_user["id"], job_id=job_id)
| 42.933333 | 102 | 0.689829 | 339 | 2,576 | 4.99705 | 0.268437 | 0.035419 | 0.024793 | 0.023613 | 0.50059 | 0.42621 | 0.40732 | 0.40732 | 0.361275 | 0.319953 | 0 | 0 | 0.229037 | 2,576 | 59 | 103 | 43.661017 | 0.852971 | 0.257764 | 0 | 0.125 | 0 | 0 | 0.046009 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 1 | 0.09375 | false | 0 | 0.1875 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ca3388b5b4f8f447dfd365008e2dd96e0c723ba | 1,919 | py | Python | egi/models.py | gtaylor/evennia-game-directory | f7a4d0731503de540026dc0fe6409a7c24ad676e | [
"BSD-3-Clause"
] | 1 | 2019-02-06T10:58:11.000Z | 2019-02-06T10:58:11.000Z | egi/models.py | gtaylor/evennia-game-index | f7a4d0731503de540026dc0fe6409a7c24ad676e | [
"BSD-3-Clause"
] | 4 | 2017-10-24T21:47:53.000Z | 2019-09-22T13:12:57.000Z | egi/models.py | gtaylor/evennia-game-directory | f7a4d0731503de540026dc0fe6409a7c24ad676e | [
"BSD-3-Clause"
] | 2 | 2017-02-09T16:25:27.000Z | 2017-10-24T21:40:42.000Z | import datetime
from google.appengine.ext import ndb
class GameListing(ndb.Model):
# Game listing stuff
game_name = ndb.StringProperty(required=True)
game_status = ndb.StringProperty(required=True)
game_website = ndb.StringProperty()
listing_contact = ndb.StringProperty(required=True)
short_description = ndb.StringProperty()
long_description = ndb.TextProperty()
# How to play
telnet_hostname = ndb.StringProperty()
telnet_port = ndb.IntegerProperty()
web_client_url = ndb.StringProperty()
# Game stats
connected_account_count = ndb.IntegerProperty()
total_account_count = ndb.IntegerProperty()
# System info
evennia_version = ndb.StringProperty(required=True)
python_version = ndb.StringProperty()
django_version = ndb.StringProperty()
server_platform = ndb.StringProperty()
created_time = ndb.DateTimeProperty(auto_now_add=True)
checkin_time = ndb.DateTimeProperty(auto_now=True)
@classmethod
def get_all_fresh_games_list(cls):
games = cls.query()
# Getting around a weird Google Cloud Datastore limitation crappily
# until I can figure out a better way.
filtered_games = [g for g in games if g.is_fresh()]
# Saves us from having to create an index, which is apparently slightly
# more expensive (monetarily).
# we sort first so that games having a telnet/webclient link ends up on top,
# then by number of connected players and finally alphabetically by game name
return sorted(filtered_games, key=lambda game: (
(0 if ((game.telnet_hostname and game.telnet_port) or game.web_client_url) else 1),
(-1 * (game.connected_account_count or 0)),
game.game_name))
def is_fresh(self):
cutoff_time = datetime.datetime.now() - datetime.timedelta(hours=2)
return self.checkin_time > cutoff_time
| 37.627451 | 95 | 0.706097 | 241 | 1,919 | 5.452282 | 0.53112 | 0.142314 | 0.076104 | 0.08828 | 0.09589 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003318 | 0.214695 | 1,919 | 50 | 96 | 38.38 | 0.868613 | 0.211569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.064516 | 0 | 0.774194 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ca4292d50c5517e99b0982bf8f944c2f58adef3 | 2,267 | py | Python | stylelens/dataset/df/generate_category_classifier_dataset.py | williamcameron/bl-magi | a35809489c15df25efc9c322166afaee7df3e192 | [
"Apache-2.0"
] | null | null | null | stylelens/dataset/df/generate_category_classifier_dataset.py | williamcameron/bl-magi | a35809489c15df25efc9c322166afaee7df3e192 | [
"Apache-2.0"
] | null | null | null | stylelens/dataset/df/generate_category_classifier_dataset.py | williamcameron/bl-magi | a35809489c15df25efc9c322166afaee7df3e192 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from stylelens_dataset.categories import Categories
from stylelens_dataset.objects import Objects
from pprint import pprint
import os
import urllib.request as urllib
# create an instance of the API class
category_api = Categories()
object_api = Objects()
def download_image_from_url(url, filename):
try:
urllib.urlretrieve(url, filename)
except urllib.HTTPError:
pass
def get_objects_with_category_name(category_name):
try:
offset = 0
limit = 100
i = 0
while True:
res = object_api.get_objects_by_category_name(category_name, offset=offset, limit=limit)
for object in res:
download_image_from_url(object['url'], str(object['_id']) + '.jpg')
i += 1
if limit > len(res):
break
else:
offset = offset + limit
pprint(category_name + ' : ' + str(i))
except Exception as e:
print("Exception when calling get_objects_by_category_name: %s\n" % e)
def get_category_classes():
try:
offset = 0
limit = 10
categories = []
while True:
res = category_api.get_categories(offset=offset, limit=limit)
if limit > len(res):
break
else:
offset = offset + limit
for cate in res:
categories.append(cate)
return categories
except Exception as e:
print("Exception when calling add_category: %s\n" % e)
return None
def make_category_dataset():
dataset_path = '/Users/daesubkim/Desktop/Python/py-example'
categories = get_category_classes()
if categories:
for category in categories:
category_name = category["name"]
os.chdir(dataset_path)
try:
os.mkdir(category_name)
except FileExistsError:
pass
os.chdir(category_name)
get_objects_with_category_name(category_name)
def start():
try:
make_category_dataset()
except Exception as e:
pprint(e)
# log.error(str(e))
if __name__ == '__main__':
try:
start()
except Exception as e:
pprint(e)
| 25.47191 | 100 | 0.603 | 264 | 2,267 | 4.94697 | 0.314394 | 0.11026 | 0.061256 | 0.073507 | 0.258806 | 0.222052 | 0.183767 | 0.125574 | 0.059724 | 0 | 0 | 0.005818 | 0.3176 | 2,267 | 88 | 101 | 25.761364 | 0.838397 | 0.023379 | 0 | 0.371429 | 0 | 0 | 0.074627 | 0.032112 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.028571 | 0.085714 | 0 | 0.185714 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ca44cd0b909b867cabf4f36b32c482801cc16f1 | 8,429 | py | Python | keras/dtensor/lazy_variable.py | englert-m/keras | 7007cd0fd548032f1bb2c23b1defa4812628baec | [
"Apache-2.0"
] | null | null | null | keras/dtensor/lazy_variable.py | englert-m/keras | 7007cd0fd548032f1bb2c23b1defa4812628baec | [
"Apache-2.0"
] | null | null | null | keras/dtensor/lazy_variable.py | englert-m/keras | 7007cd0fd548032f1bb2c23b1defa4812628baec | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lazily initialized variables, useful for creating a symbolic Keras model."""
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
def _infer_shape_dtype_and_create_handle(initial_value, shape, dtype, name):
"""Infer shape and dtype from initial_value and create a variable handle."""
with ops.name_scope(name, "Variable", skip_on_eager=False) as name:
handle_name = ops.name_from_scope_name(name)
unique_id = "%s_%d" % (handle_name, ops.uid())
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
device_context_manager = ops.NullContextmanager
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % handle_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}): # pylint: disable=protected-access
with ops.name_scope("Initializer"), device_context_manager(None):
if not callable(initial_value):
if isinstance(initial_value, trackable.CheckpointInitialValue):
raise NotImplementedError(
"CheckpointInitialValue is not supported to be the initial "
"value of a lazy variable.")
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
assert not callable(initial_value)
assert initial_value.shape.is_compatible_with(shape)
dtype = dtype or initial_value.dtype.base_dtype
shape = shape or initial_value.shape
assert dtype
assert shape
handle = resource_variable_ops._variable_handle_from_shape_and_dtype( # pylint: disable=protected-access
shape=shape,
dtype=dtype,
shared_name=None, # Never shared
name=name,
graph_mode=False,
initial_value=None)
# initial_value=initial_value if not callable(initial_value) else None)
return initial_value, shape, dtype, handle, handle_name, unique_id
class LazyInitVariable(resource_variable_ops.BaseResourceVariable):
"""Lazily initialized variables.
The major use case for this class is to serve as a memory efficient
alternative for tf.Variable. The resource handle of this class is point to
nothing, which mean it will raise error when its value is fetched in a eager
context. Having said that, it will perform like a normal tf.Variable when
using with graph tensor, like KerasTensor produced from tf.keras.Input.
"""
def __init__(
self,
initial_value=None,
trainable=None,
collections=None,
validate_shape=True, # pylint: disable=unused-argument
caching_device=None,
name=None,
dtype=None,
variable_def=None,
import_scope=None,
constraint=None,
distribute_strategy=None,
synchronization=None,
aggregation=None,
shape=None,
**kwargs):
assert context.executing_eagerly() # To simplify the logic
assert variable_def is None # Not supported yet.
assert caching_device is None # Not supported yet
if initial_value is None:
raise ValueError("The `initial_value` arg to `tf.Variable` must "
"be specified except when you are not providing a "
"`variable_def`. You provided neither.")
if isinstance(initial_value, ops.Tensor) and hasattr(
initial_value, "graph") and initial_value.graph.building_function:
raise ValueError(f"Argument `initial_value` ({initial_value}) could not "
"be lifted out of a `tf.function`. "
"(Tried to create variable with name='{name}'). "
"To avoid this error, when constructing `tf.Variable`s "
"inside of `tf.function` you can create the "
"`initial_value` tensor in a "
"`tf.init_scope` or pass a callable `initial_value` "
"(e.g., `tf.Variable(lambda : "
"tf.truncated_normal([10, 40]))`). "
"Please file a feature request if this "
"restriction inconveniences you.")
if constraint is not None and not callable(constraint):
raise ValueError(f"Argument `constraint` must be None or a callable. "
f"a callable. Got a {type(constraint)}: {constraint}")
self._name = name
(initial_value, shape, dtype, handle, handle_name,
unique_id) = _infer_shape_dtype_and_create_handle(initial_value, shape,
dtype, name)
super(LazyInitVariable, self).__init__(
distribute_strategy=distribute_strategy,
initial_value=initial_value,
shape=shape,
dtype=dtype,
name=name,
unique_id=unique_id,
handle_name=handle_name,
constraint=constraint,
handle=handle,
graph_element=None,
trainable=trainable,
synchronization=synchronization,
aggregation=aggregation,
in_graph_mode=False)
# TODO(scottzhu): This method and create_and_initialize might be removed if
# we decide to just use the tf.Variable to replace this class.
def initialize(self):
with ops.name_scope(self._name, "Variable", skip_on_eager=False) as name:
with ops.colocate_with(self._handle), ops.name_scope("Initializer"):
if callable(self._initial_value):
initial_value = self._initial_value()
else:
initial_value = self._initial_value
if not initial_value.shape.is_compatible_with(self._shape):
raise ValueError(
f"In this `tf.Variable` creation, the initial value's shape "
f"({initial_value.shape}) is not compatible with "
f"the explicitly supplied `shape` argument ({self._shape}).")
assert self._dtype is initial_value.dtype.base_dtype
gen_resource_variable_ops.assign_variable_op(self._handle, initial_value)
def create_and_initialize(self):
if callable(self._initial_value):
initial_value = self._initial_value()
with ops.device(initial_value.device):
(initial_value, shape, dtype, handle, handle_name,
unique_id) = _infer_shape_dtype_and_create_handle(
initial_value, self._shape, self._dtype, self._name)
self.initialize()
super(LazyInitVariable, self).__init__(
trainable=self._trainable,
shape=shape,
dtype=dtype,
handle=handle,
synchronization=self._synchronization,
constraint=self._constraint,
aggregation=self._aggregation,
distribute_strategy=self._distribute_strategy,
name=self._name,
unique_id=unique_id,
handle_name=handle_name,
graph_element=None,
initial_value=initial_value,
initializer_op=None,
is_initialized_op=None,
cached_value=None,
caching_device=None)
def _lazy_init_variable_creator(next_creator, **kwargs):
del next_creator
return LazyInitVariable(**kwargs)
@tf_contextlib.contextmanager
def lazy_init_scope():
with variable_scope.variable_creator_scope(_lazy_init_variable_creator):
yield
| 41.522167 | 111 | 0.678965 | 1,038 | 8,429 | 5.297688 | 0.251445 | 0.106929 | 0.030915 | 0.026187 | 0.187307 | 0.134934 | 0.117112 | 0.117112 | 0.087289 | 0.064375 | 0 | 0.002325 | 0.234429 | 8,429 | 202 | 112 | 41.727723 | 0.849837 | 0.208447 | 0 | 0.176871 | 0 | 0 | 0.150265 | 0.010431 | 0 | 0 | 0 | 0.004951 | 0.054422 | 1 | 0.040816 | false | 0.006803 | 0.068027 | 0 | 0.129252 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ca5cbc4f5357898528acdf248026bc993f249fb | 2,493 | py | Python | txaws/tests/test_util.py | vargas/txaws | b75d00e042c6e7e1609c05e01ee54e1c72b1eaf6 | [
"MIT"
] | 1 | 2021-12-17T00:03:24.000Z | 2021-12-17T00:03:24.000Z | txaws/tests/test_util.py | vargas/txaws | b75d00e042c6e7e1609c05e01ee54e1c72b1eaf6 | [
"MIT"
] | null | null | null | txaws/tests/test_util.py | vargas/txaws | b75d00e042c6e7e1609c05e01ee54e1c72b1eaf6 | [
"MIT"
] | 1 | 2021-12-17T00:06:41.000Z | 2021-12-17T00:06:41.000Z | from urllib.parse import urlparse
import binascii
from twisted.trial.unittest import TestCase
from txaws.util import hmac_sha1, iso8601time, parse
class MiscellaneousTestCase(TestCase):
def test_hmac_sha1(self):
cases = [
(binascii.unhexlify(b"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
"Hi There", "thcxhlUFcmTii8C2+zeMjvFGvgA="),
("Jefe", "what do ya want for nothing?",
"7/zfauXrL6LSdBbV8YTfnCWafHk="),
(binascii.unhexlify(b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
"\xdd" * 50, "El1zQrmsEc2Ro5r0iqF7T2PxddM="),
]
for key, data, expected in cases:
self.assertEqual(hmac_sha1(key, data), expected)
def test_iso8601time(self):
self.assertEqual("2006-07-07T15:04:56Z",
iso8601time((2006, 7, 7, 15, 4, 56, 0, 0, 0)))
class ParseUrlTestCase(TestCase):
"""
Test URL parsing facility and defaults values.
"""
def test_parse(self):
"""
L{parse} correctly parses a URL into its various components.
"""
# The default port for HTTP is 80.
self.assertEqual(
parse("http://127.0.0.1/"),
("http", "127.0.0.1", 80, "/"))
# The default port for HTTPS is 443.
self.assertEqual(
parse("https://127.0.0.1/"),
("https", "127.0.0.1", 443, "/"))
# Specifying a port.
self.assertEqual(
parse("http://spam:12345/"),
("http", "spam", 12345, "/"))
# Weird (but commonly accepted) structure uses default port.
self.assertEqual(
parse("http://spam:/"),
("http", "spam", 80, "/"))
# Spaces in the hostname are trimmed, the default path is /.
self.assertEqual(
parse("http://foo "),
("http", "foo", 80, "/"))
def test_externalUnicodeInterference(self):
"""
L{parse} should return C{str} for the scheme, host, and path
elements of its return tuple, even when passed an URL which has
previously been passed to L{urlparse} as a C{unicode} string.
"""
badInput = u"http://example1.com/path"
goodInput = badInput.encode("ascii")
urlparse(badInput)
scheme, host, port, path = parse(goodInput)
self.assertTrue(isinstance(scheme, str))
self.assertTrue(isinstance(host, str))
self.assertTrue(isinstance(path, str))
| 33.24 | 77 | 0.579222 | 272 | 2,493 | 5.283088 | 0.463235 | 0.073069 | 0.069589 | 0.066806 | 0.073765 | 0.044537 | 0 | 0 | 0 | 0 | 0 | 0.070582 | 0.289611 | 2,493 | 74 | 78 | 33.689189 | 0.740824 | 0.200562 | 0 | 0.113636 | 0 | 0 | 0.203558 | 0.085819 | 0 | 0 | 0 | 0 | 0.227273 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ca6030084049893d40df687b5267b7ac8dd9230 | 7,674 | py | Python | vgame/theater.py | cilame/vgame | 9b7076256500137fe5c95426798007734dd613a2 | [
"MIT"
] | null | null | null | vgame/theater.py | cilame/vgame | 9b7076256500137fe5c95426798007734dd613a2 | [
"MIT"
] | null | null | null | vgame/theater.py | cilame/vgame | 9b7076256500137fe5c95426798007734dd613a2 | [
"MIT"
] | null | null | null | import pygame
from pygame.locals import *
from .actor import Actor
from .actor import Player, Wall, Enemy, Bullet, NPC, Anime, Map
from .actor import Menu, Background, Button
from .actor import Delayer
import vgame
class Camera:
'''
主要负责处理镜头的处理,看看后续能够扩展出多少的功能。
'''
DEBUG = False
def __init__(self, width, height):
self.w = width
self.h = height
self.camera = pygame.Rect(0, 0, self.w, self.h)
self.camera_xy = (0., 0.)
self.theater = None
self.follow = None # 单角色跟随
self.fspeed = 1
self.offsets = (0, 0)
self.padding = pygame.Vector2(0, 0)
self.debug_area = None
# 多角色镜头缩放的处理目前几乎无解,这里的接口后续将很长时间内没有进展,毕竟通用游戏框架对于此处的需求并不强烈。
self.follows = None # 尚在开发中的接口,后续将解决多角色跟随问题
self.paddings = None # 尚在开发中的接口,后续将解决多角色跟随问题
# self.margin = pygame.Vector2(*((100, 100) if vgame.DEBUG else (0, 0))) # 调试时候使用,方便查看边界
self.margin = pygame.Vector2((0,0)) # 调试时候使用,方便查看边界
self.delayer = Delayer()
def _get_fspeed(self): return self._fspeed
def _set_fspeed(self, value):
if value <= 0 or value > 1:
raise ValueError('fspeed:{} limit in "0 < fspeed <= 1".'.format(value))
self._fspeed = value
fspeed = property(_get_fspeed, _set_fspeed)
def apply(self, entity):
x, y = self.camera.topleft
x += self.offsets[0]
y += self.offsets[1]
return entity.rect.move((x, y))
def update(self, ticks):
if self.follow:
_x, _y = self.follow.rect.center
x = -_x + int(self.w/2)
y = -_y + int(self.h/2)
x = min(self.margin.x, x) # top
y = min(self.margin.y, y) # left
tx = max(x, -(self.theater.size[0] - self.w + self.margin.x)) # right
ty = max(y, -(self.theater.size[1] - self.h + self.margin.y)) # bottom
cx, cy = self.camera_xy
if self.delayer.update(ticks):
_tx = cx + (tx - cx) * self.fspeed
_ty = cy + (ty - cy) * self.fspeed
ox, oy = self.camera[:2]
jx, jy = abs(self.w/2-(_x+ox)), abs(self.h/2-(_y+oy))
if jx < self.padding.x/2: _tx = ox
if jy < self.padding.y/2: _ty = oy
self.camera = pygame.Rect(_tx, _ty, self.w, self.h)
self.camera_xy = _tx, _ty
def debug_padding(self):
if vgame.DEBUG and Camera.DEBUG:
if not self.debug_area:
showsize = (int(self.padding.x), int(self.padding.y))
showpoint = (self.w/2-self.padding.x/2, self.h/2-self.padding.y/2)
self.debug_area = Actor((0,0,0,30), showsize=showsize, showpoint=showpoint)
self.debug_area.imager._delay_bind_debug()
self.theater.screen.blit(self.debug_area.image, self.debug_area.rect)
# self.theater.screen.blit(self.debug_area.image, self.apply(self.debug_area))
class Theater:
'''
舞台对象,主要负责布景功能(地图信息主要就是放在这里)
负责场景的资源加载(加载进全局,留下一个引用的结构)
这样,已经加载的资源就不会再被加载进内存当中,并且
调用资源仅仅需要通过自身的实例的绑定就能获取到
'''
Camera = Camera # 用于快速定位并修改某些配置参数: vgame.Theater.Camera.DEBUG
_theater_numb = 0
_theater_format = 'theater:{}'
def __init__(self,
background = None, # 背景图片,可以传很多类型的数据,详细请看 Image 实例化时的参数
size = None, # 游戏背景大小,背景大小如未设定则使用屏幕大小
camera_size = None, # 镜头的尺寸,默认情况下镜头尺寸和游戏背景大小一样
):
game_screen = pygame.display.get_surface() # 游戏屏幕(镜头)显示的大小
if game_screen is None or not vgame.Artist.ARTIST:
raise 'pls use vgame.Initer() to init game first.'
self.artist = vgame.Artist.ARTIST
self.screen = game_screen
self.screen_size = self.screen.get_size()
self.theater_name = self._mk_theater_name()
self.size = size if size else self.screen_size
self.showsize = self.size
self.group_grid = pygame.sprite.Group()
self.group = pygame.sprite.Group()
self.background = None
self.camera = self.regist_camera(Camera(*self.screen_size))
# 用这个初始化不同场景下的物理检测的 Actor 列表
Actor .RIGID_BODY[self.theater_name] = []
Actor .SHOW_BODY [self.theater_name] = []
Player.RIGID_BODY[self.theater_name] = []
Player.SHOW_BODY [self.theater_name] = []
Wall .RIGID_BODY[self.theater_name] = []
Wall .SHOW_BODY [self.theater_name] = []
Enemy .RIGID_BODY[self.theater_name] = []
Enemy .SHOW_BODY [self.theater_name] = []
Bullet.RIGID_BODY[self.theater_name] = []
Bullet.SHOW_BODY [self.theater_name] = []
NPC .RIGID_BODY[self.theater_name] = []
NPC .SHOW_BODY [self.theater_name] = []
Anime .RIGID_BODY[self.theater_name] = []
Anime .SHOW_BODY [self.theater_name] = []
Menu .RIGID_BODY[self.theater_name] = []
Menu .SHOW_BODY [self.theater_name] = []
Button.RIGID_BODY[self.theater_name] = []
Button.SHOW_BODY [self.theater_name] = []
# *暂未使用的参数,后续要考虑入场和出场的动画表演,否则切换场景会非常僵硬(至少要提供配置接口)
# *后面可以考虑实现一些可配置的淡入淡出的效果
self.enter = None
self.leave = None
# 初始化时可以传一张图片作为背景,也可以为空,透明的区域,用于限定游戏的范围,增加更多的可配置的空间
# 主要用于限定镜头跟随的范围
self._add_background(background if background else (0,0,0,0))
self.artist.regist(self)
def regist(self,*actors):
for actor in actors:
actor.theater = self
actor._regist = self.regist
if isinstance(actor, Actor) and not self.group.has(actor):
self.group.add(actor)
def regist_camera(self, camera):
camera.theater = self
return camera
def regist_grid(self, grid):
grid.theater = self
grid._regist = self.regist_grid
if isinstance(grid, Actor) and not self.group_grid.has(grid):
self.group_grid.add(grid)
return grid
def _add_background(self, background):
self.background = Background(background, showsize=self.size)
self.background.theater = self
if self.background.image:
self.group.add(self.background)
def _mk_theater_name(self):
Theater._theater_numb += 1
return Theater._theater_format.format(Theater._theater_numb)
@property
def name(self):
return self.theater_name
def change_theater(self, name_or_class):
self.artist.change_theater(name_or_class)
def follow(self, actor, speed, offsets, padding):
self.camera.follow = actor
self.camera.fspeed = speed
self.camera.offsets = offsets
self.camera.padding[:2] = padding
@property
def Actor(self): return Actor.SHOW_BODY[self.name].copy()
@property
def Player(self): return Player.SHOW_BODY[self.name].copy()
@property
def Wall(self): return Wall.SHOW_BODY[self.name].copy()
@property
def Enemy(self): return Enemy.SHOW_BODY[self.name].copy()
@property
def Bullet(self): return Bullet.SHOW_BODY[self.name].copy()
@property
def NPC(self): return NPC.SHOW_BODY[self.name].copy()
@property
def Anime(self): return Anime.SHOW_BODY[self.name].copy()
@property
def Menu(self): return Menu.SHOW_BODY[self.name].copy()
@property
def Button(self): return Button.SHOW_BODY[self.name].copy()
@property
def rect(self): return self.background.rect
@property
def draw(self):
return vgame.draw(self.background) | 36.717703 | 100 | 0.600078 | 954 | 7,674 | 4.68239 | 0.179245 | 0.048355 | 0.067159 | 0.076561 | 0.22073 | 0.09156 | 0.09156 | 0.019252 | 0.019252 | 0 | 0 | 0.009643 | 0.283815 | 7,674 | 209 | 101 | 36.717703 | 0.80313 | 0.097081 | 0 | 0.074074 | 0 | 0 | 0.012962 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.160494 | false | 0 | 0.04321 | 0.080247 | 0.283951 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ca906d948f8bdc4ff1f5129d6cb152946ac17af | 60,654 | py | Python | third_party/chromite/lib/auto_updater.py | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/chromite/lib/auto_updater.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | third_party/chromite/lib/auto_updater.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Library containing functions to execute auto-update on a remote device.
TODO(xixuan): Make this lib support other update logics, including:
auto-update CrOS images for DUT
beaglebones for servo
stage images to servo usb
install custom CrOS images for chaos lab
install firmware images with FAFT
install android/brillo
TODO(xixuan): crbugs.com/631837, re-consider the structure of this file,
like merging check functions into one class.
Currently, this lib supports ChromiumOSFlashUpdater and ChromiumOSUpdater.
---------------
| BaseUpdater | : Updater
---------------
|
|
-------------------------
| ChromiumOSFlashUpdater | : Chromium OS Updater by cros flash
-------------------------
|
|
---------------------
| ChromiumOSUpdater | : Chromium OS Updater by cros flash
--------------------- with more checks
ChromiumOSFlashUpdater includes:
----Precheck---
* Pre-check payload's existence before auto-update.
* Pre-check if the device can run its devserver.
----Tranfer----
* Transfer devserver package at first.
* Transfer rootfs update files if rootfs update is required.
* Transfer stateful update files if stateful update is required.
----Auto-Update---
* Do rootfs partition update if it's required.
* Do stateful partition update if it's required.
* Do reboot for device if it's required.
----Verify----
* Do verification if it's required.
* Disable rootfs verification in device if it's required.
ChromiumOSUpdater adds:
----Check-----
* Check functions, including kernel/version/cgpt check.
----Precheck---
* Pre-check for stateful/rootfs update/whole update.
----Tranfer----
* Add @retry to all transfer functions.
----Verify----
* Post-check stateful/rootfs update/whole update.
"""
from __future__ import print_function
import cStringIO
import json
import os
import re
import shutil
import tempfile
import time
from chromite.cli import command
from chromite.lib import auto_update_util
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import dev_server_wrapper as ds_wrapper
from chromite.lib import operation
from chromite.lib import osutils
from chromite.lib import path_util
from chromite.lib import remote_access
from chromite.lib import retry_util
from chromite.lib import timeout_util
# Naming conventions for global variables:
# File on remote host without slash: REMOTE_XXX_FILENAME
# File on remote host with slash: REMOTE_XXX_FILE_PATH
# Path on remote host with slash: REMOTE_XXX_PATH
# File on local server without slash: LOCAL_XXX_FILENAME
# File on local server with slash: LOCAL_XXX_FILE_PATH
# Path on local server: LOCAL_XXX_PATH
# Update Status for remote device.
UPDATE_STATUS_IDLE = 'UPDATE_STATUS_IDLE'
UPDATE_STATUS_DOWNLOADING = 'UPDATE_STATUS_DOWNLOADING'
UPDATE_STATUS_FINALIZING = 'UPDATE_STATUS_FINALIZING'
UPDATE_STATUS_UPDATED_NEED_REBOOT = 'UPDATE_STATUS_UPDATED_NEED_REBOOT'
# Error msg in loading shared libraries when running python command.
ERROR_MSG_IN_LOADING_LIB = 'python: error while loading shared libraries'
# Max number of the times for retry:
# 1. for transfer functions to be retried.
# 2. for some retriable commands to be retried.
MAX_RETRY = 5
# Number of times to retry update_engine_client --status. See crbug.com/744212.
UPDATE_ENGINE_STATUS_RETRY = 30
# The delay between retriable tasks.
DELAY_SEC_FOR_RETRY = 5
# Third-party package directory on devserver
THIRD_PARTY_PKG_DIR = '/usr/lib/python2.7/dist-packages/'
# Third-party package list
THIRD_PARTY_PKG_LIST = ['cherrypy', 'google/protobuf']
# update_payload path from update_engine.
UPDATE_PAYLOAD_DIR = os.path.join(
constants.UPDATE_ENGINE_SCRIPTS_PATH, 'update_payload')
# Number of seconds to wait for the post check version to settle.
POST_CHECK_SETTLE_SECONDS = 15
# Number of seconds to delay between post check retries.
POST_CHECK_RETRY_SECONDS = 5
class ChromiumOSUpdateError(Exception):
"""Thrown when there is a general ChromiumOS-specific update error."""
class PreSetupUpdateError(ChromiumOSUpdateError):
"""Raised for the rootfs/stateful update pre-setup failures."""
class RootfsUpdateError(ChromiumOSUpdateError):
"""Raised for the Rootfs partition update failures."""
class StatefulUpdateError(ChromiumOSUpdateError):
"""Raised for the stateful partition update failures."""
class AutoUpdateVerifyError(ChromiumOSUpdateError):
"""Raised for verification failures after auto-update."""
class DevserverCannotStartError(ChromiumOSUpdateError):
"""Raised when devserver cannot restart after stateful update."""
class RebootVerificationError(ChromiumOSUpdateError):
"""Raised for failing to reboot errors."""
class BaseUpdater(object):
"""The base updater class."""
def __init__(self, device, payload_dir):
self.device = device
self.payload_dir = payload_dir
class ChromiumOSFlashUpdater(BaseUpdater):
"""Used to update DUT with image."""
# stateful update files
LOCAL_STATEFUL_UPDATE_FILENAME = 'stateful_update'
LOCAL_CHROOT_STATEFUL_UPDATE_PATH = '/usr/bin/stateful_update'
REMOTE_STATEFUL_UPDATE_PATH = '/usr/local/bin/stateful_update'
# devserver files
LOCAL_DEVSERVER_LOG_FILENAME = 'target_devserver.log'
REMOTE_DEVSERVER_FILENAME = 'devserver.py'
# rootfs update files
REMOTE_UPDATE_ENGINE_BIN_FILENAME = 'update_engine_client'
REMOTE_UPDATE_ENGINE_LOGFILE_PATH = '/var/log/update_engine.log'
REMOTE_PROVISION_FAILED_FILE_PATH = '/var/tmp/provision_failed'
REMOTE_HOSTLOG_FILE_PATH = '/var/log/devserver_hostlog'
REMOTE_QUICK_PROVISION_LOGFILE_PATH = '/var/log/quick-provision.log'
UPDATE_CHECK_INTERVAL_PROGRESSBAR = 0.5
UPDATE_CHECK_INTERVAL_NORMAL = 10
# Update engine perf files
REMOTE_UPDATE_ENGINE_PERF_SCRIPT_PATH = \
'/mnt/stateful_partition/unencrypted/preserve/' \
'update_engine_performance_monitor.py'
REMOTE_UPDATE_ENGINE_PERF_RESULTS_PATH = '/var/log/perf_data_results.json'
# `mode` parameter when copying payload files to the DUT.
PAYLOAD_MODE_PARALLEL = 'parallel'
PAYLOAD_MODE_SCP = 'scp'
# Related to crbug.com/276094: Restore to 5 mins once the 'host did not
# return from reboot' bug is solved.
REBOOT_TIMEOUT = 480
def __init__(self, device, payload_dir, dev_dir='', tempdir=None,
original_payload_dir=None, do_rootfs_update=True,
do_stateful_update=True, reboot=True, disable_verification=False,
clobber_stateful=False, yes=False, payload_filename=None,
send_payload_in_parallel=False):
"""Initialize a ChromiumOSFlashUpdater for auto-update a chromium OS device.
Args:
device: the ChromiumOSDevice to be updated.
payload_dir: the directory of payload(s).
dev_dir: the directory of the devserver that runs the CrOS auto-update.
tempdir: the temp directory in caller, not in the device. For example,
the tempdir for cros flash is /tmp/cros-flash****/, used to
temporarily keep files when transferring devserver package, and
reserve devserver and update engine logs.
original_payload_dir: The directory containing payloads whose version is
the same as current host's rootfs partition. If it's None, will first
try installing the matched stateful.tgz with the host's rootfs
Partition when restoring stateful. Otherwise, install the target
stateful.tgz.
do_rootfs_update: whether to do rootfs partition update. The default is
True.
do_stateful_update: whether to do stateful partition update. The default
is True.
reboot: whether to reboot device after update. The default is True.
disable_verification: whether to disabling rootfs verification on the
device. The default is False.
clobber_stateful: whether to do a clean stateful update. The default is
False.
yes: Assume "yes" (True) for any prompt. The default is False. However,
it should be set as True if we want to disable all the prompts for
auto-update.
payload_filename: Filename of exact payload file to use for
update instead of the default: update.gz. Defaults to None. Use
only if you staged a payload by filename (i.e not artifact) first.
send_payload_in_parallel: whether to transfer payload in chunks
in parallel. The default is False.
"""
super(ChromiumOSFlashUpdater, self).__init__(device, payload_dir)
if tempdir is not None:
self.tempdir = tempdir
else:
self.tempdir = tempfile.mkdtemp(prefix='cros-update')
self.dev_dir = dev_dir
self.original_payload_dir = original_payload_dir
# Update setting
self._cmd_kwargs = {}
self._cmd_kwargs_omit_error = {'error_code_ok': True}
self._do_stateful_update = do_stateful_update
self._do_rootfs_update = do_rootfs_update
self._disable_verification = disable_verification
self._clobber_stateful = clobber_stateful
self._reboot = reboot
self._yes = yes
# Device's directories
self.device_dev_dir = os.path.join(self.device.work_dir, 'src')
self.device_static_dir = os.path.join(self.device.work_dir, 'static')
self.device_restore_dir = os.path.join(self.device.work_dir, 'old')
self.stateful_update_bin = None
# autoupdate_EndToEndTest uses exact payload filename for update
self.payload_filename = payload_filename
if send_payload_in_parallel:
self.payload_mode = self.PAYLOAD_MODE_PARALLEL
else:
self.payload_mode = self.PAYLOAD_MODE_SCP
self.perf_id = None
@property
def is_au_endtoendtest(self):
return self.payload_filename is not None
def CheckPayloads(self):
"""Verify that all required payloads are in |self.payload_dir|."""
logging.debug('Checking if payloads have been stored in directory %s...',
self.payload_dir)
filenames = []
payload_name = self._GetRootFsPayloadFileName()
filenames += [payload_name] if self._do_rootfs_update else []
if self._do_stateful_update:
filenames += [ds_wrapper.STATEFUL_FILENAME]
for fname in filenames:
payload = os.path.join(self.payload_dir, fname)
if not os.path.exists(payload):
raise ChromiumOSUpdateError('Payload %s does not exist!' % payload)
def CheckRestoreStateful(self):
"""Check whether to restore stateful."""
logging.debug('Checking whether to restore stateful...')
restore_stateful = False
try:
self._CheckDevserverCanRun()
return restore_stateful
except DevserverCannotStartError as e:
if self._do_rootfs_update:
msg = ('Cannot start devserver! The stateful partition may be '
'corrupted: %s' % e)
prompt = 'Attempt to restore the stateful partition?'
restore_stateful = self._yes or cros_build_lib.BooleanPrompt(
prompt=prompt, default=False, prolog=msg)
if not restore_stateful:
raise ChromiumOSUpdateError(
'Cannot continue to perform rootfs update!')
logging.debug('Restore stateful partition is%s required.',
('' if restore_stateful else ' not'))
return restore_stateful
def _CheckDevserverCanRun(self):
"""We can run devserver on |device|.
If the stateful partition is corrupted, Python or other packages
(e.g. cherrypy) needed for rootfs update may be missing on |device|.
This will also use `ldconfig` to update library paths on the target
device if it looks like that's causing problems, which is necessary
for base images.
Raise DevserverCannotStartError if devserver cannot start.
"""
# Try to capture the output from the command so we can dump it in the case
# of errors. Note that this will not work if we were requested to redirect
# logs to a |log_file|.
cmd_kwargs = dict(self._cmd_kwargs)
cmd_kwargs['capture_output'] = True
cmd_kwargs['combine_stdout_stderr'] = False
logging.info('Checking if we can run devserver on the device...')
devserver_bin = os.path.join(self.device_dev_dir,
self.REMOTE_DEVSERVER_FILENAME)
devserver_check_command = ['python', devserver_bin, '--help']
try:
self.device.RunCommand(devserver_check_command, **cmd_kwargs)
except cros_build_lib.RunCommandError as e:
logging.warning('Cannot start devserver:')
logging.warning(e.result.error)
if ERROR_MSG_IN_LOADING_LIB in str(e):
logging.info('Attempting to correct device library paths...')
try:
self.device.RunCommand(['ldconfig', '-r', '/'], **cmd_kwargs)
self.device.RunCommand(devserver_check_command,
**cmd_kwargs)
logging.info('Library path correction successful.')
return
except cros_build_lib.RunCommandError as e2:
logging.warning('Library path correction failed:')
logging.warning(e2.result.error)
error_msg = e.result.error.splitlines()[-1]
raise DevserverCannotStartError(error_msg)
# pylint: disable=unbalanced-tuple-unpacking
@classmethod
def GetUpdateStatus(cls, device, keys=None):
"""Returns the status of the update engine on the |device|.
Retrieves the status from update engine and confirms all keys are
in the status.
Args:
device: A ChromiumOSDevice object.
keys: the keys to look for in the status result (defaults to
['CURRENT_OP']).
Returns:
A list of values in the order of |keys|.
"""
keys = keys or ['CURRENT_OP']
result = device.RunCommand([cls.REMOTE_UPDATE_ENGINE_BIN_FILENAME,
'--status'],
capture_output=True, log_output=True)
if not result.output:
raise Exception('Cannot get update status')
try:
status = cros_build_lib.LoadKeyValueFile(
cStringIO.StringIO(result.output))
except ValueError:
raise ValueError('Cannot parse update status')
values = []
for key in keys:
if key not in status:
raise ValueError('Missing %s in the update engine status')
values.append(status.get(key))
return values
@classmethod
def GetRootDev(cls, device):
"""Get the current root device on |device|.
Args:
device: a ChromiumOSDevice object, defines whose root device we
want to fetch.
"""
rootdev = device.RunCommand(
['rootdev', '-s'], capture_output=True).output.strip()
logging.debug('Current root device is %s', rootdev)
return rootdev
def _GetStatefulUpdateScript(self):
"""Returns the path to the stateful_update_bin on the target.
Returns:
<need_transfer, path>:
need_transfer is True if stateful_update_bin is found in local path,
False if we directly use stateful_update_bin on the host.
path: If need_transfer is True, it represents the local path of
stateful_update_bin, and is used for further transferring. Otherwise,
it refers to the host path.
"""
# We attempt to load the local stateful update path in 2 different
# ways. If this doesn't exist, we attempt to use the Chromium OS
# Chroot path to the installed script. If all else fails, we use the
# stateful update script on the host.
stateful_update_path = path_util.FromChrootPath(
self.LOCAL_CHROOT_STATEFUL_UPDATE_PATH)
if not os.path.exists(stateful_update_path):
logging.warning('Could not find chroot stateful_update script in %s, '
'falling back to the client copy.', stateful_update_path)
stateful_update_path = os.path.join(self.dev_dir,
self.LOCAL_STATEFUL_UPDATE_FILENAME)
if os.path.exists(stateful_update_path):
logging.debug('Use stateful_update script in devserver path: %s',
stateful_update_path)
return True, stateful_update_path
logging.debug('Cannot find stateful_update script, will use the script '
'on the host')
return False, self.REMOTE_STATEFUL_UPDATE_PATH
else:
return True, stateful_update_path
def _StartUpdateEngineIfNotRunning(self, device):
"""Starts update-engine service if it is not running.
Args:
device: a ChromiumOSDevice object, defines the target root device.
"""
try:
result = device.RunCommand(['start', 'update-engine'],
capture_output=True, log_output=True).output
if 'start/running' in result:
logging.info('update engine was not running, so we started it.')
except cros_build_lib.RunCommandError as e:
if e.result.returncode != 1 or 'is already running' not in e.result.error:
raise e
def SetupRootfsUpdate(self):
"""Makes sure |device| is ready for rootfs update."""
logging.info('Checking if update engine is idle...')
self._StartUpdateEngineIfNotRunning(self.device)
status, = self.GetUpdateStatus(self.device)
if status == UPDATE_STATUS_UPDATED_NEED_REBOOT:
logging.info('Device needs to reboot before updating...')
self._Reboot('setup of Rootfs Update')
status, = self.GetUpdateStatus(self.device)
if status != UPDATE_STATUS_IDLE:
raise RootfsUpdateError('Update engine is not idle. Status: %s' % status)
def _GetDevicePythonSysPath(self):
"""Get python sys.path of the given |device|."""
sys_path = self.device.RunCommand(
['python', '-c', '"import json, sys; json.dump(sys.path, sys.stdout)"'],
capture_output=True, log_output=True).output
return json.loads(sys_path)
def _FindDevicePythonPackagesDir(self):
"""Find the python packages directory for the given |device|."""
third_party_host_dir = ''
sys_path = self._GetDevicePythonSysPath()
for p in sys_path:
if p.endswith('site-packages') or p.endswith('dist-packages'):
third_party_host_dir = p
break
if not third_party_host_dir:
raise ChromiumOSUpdateError(
'Cannot find proper site-packages/dist-packages directory from '
'sys.path for storing packages: %s' % sys_path)
return third_party_host_dir
def _CopyPythonFilesToTemp(self, source_python_dir, dest_temp_dir,
extra_ignore_patterns=None):
"""Copy filtered python files to tempdir.
Args;
source_python_dir: The source python directory that is used to copy from.
dest_temp_dir: The dest temp directory that is used to copy to.
extra_ignore_patterns: A list of extra ignore patterns in addition to
default patterns.
"""
logging.debug('Copy from %s to %s', source_python_dir, dest_temp_dir)
default_ignore_patterns = ['*.pyc', 'tmp*', '.*', 'static', '*~']
if extra_ignore_patterns:
default_ignore_patterns.extend(extra_ignore_patterns)
shutil.copytree(
source_python_dir, dest_temp_dir,
ignore=shutil.ignore_patterns(*default_ignore_patterns),
symlinks=True)
def _TransferRequiredPackage(self):
"""Transfer third-party packages related to devserver package."""
logging.info('Copying third-party packages to device...')
try:
# Copy third-party packages to pythonX.X/site(dist)-packages
third_party_host_dir = self._FindDevicePythonPackagesDir()
package_dir = os.path.join(self.tempdir, 'third_party')
osutils.RmDir(package_dir, ignore_missing=True)
for package in THIRD_PARTY_PKG_LIST:
# Filter python files from (binary) garbage.
self._CopyPythonFilesToTemp(
os.path.join(THIRD_PARTY_PKG_DIR, package),
os.path.join(package_dir, package))
# Python packages are plain text files so we chose rsync --compress.
self.device.CopyToDevice(
os.path.join(package_dir, os.path.split(package)[0]),
third_party_host_dir, mode='rsync', log_output=True,
**self._cmd_kwargs)
except cros_build_lib.RunCommandError as e:
# There's a chance that the DUT doesn't have any basic lib before
# provisioning, like python. These commands will fail first, but succeed
# after stateful partition is restored. So we choose not to raise error
# here.
logging.debug(
'Cannot transfer third-party packages to host due to: %s', e)
def _EnsureDeviceDirectory(self, directory):
"""Mkdir the directory no matther whether this directory exists on host.
Args:
directory: the directory to be made on the device.
"""
self.device.RunCommand(['mkdir', '-p', directory], **self._cmd_kwargs)
def _GetRootFsPayloadFileName(self):
"""Get the correct RootFs payload filename.
Returns:
The payload filename. (update.gz or a custom payload filename).
"""
if self.is_au_endtoendtest:
return self.payload_filename
else:
return ds_wrapper.ROOTFS_FILENAME
def TransferDevServerPackage(self):
"""Transfer devserver package to work directory of the remote device."""
logging.info('Copying devserver package to device...')
src_dir = os.path.join(self.tempdir, 'src')
osutils.RmDir(src_dir, ignore_missing=True)
# Filter python files from (binary) garbage.
# Also filter out directories including symlink to chromite.
self._CopyPythonFilesToTemp(ds_wrapper.DEVSERVER_PKG_DIR, src_dir,
extra_ignore_patterns=['venv', 'gs_cache'])
# Copy update_payload from update_engine repository.
update_payload_dir = os.path.join(src_dir, 'update_payload')
self._CopyPythonFilesToTemp(UPDATE_PAYLOAD_DIR, update_payload_dir)
# Make sure the device.work_dir exist after any installation and reboot.
self._EnsureDeviceDirectory(self.device.work_dir)
# Python packages are plain text files so we chose rsync --compress.
self.device.CopyToWorkDir(src_dir, mode='rsync', log_output=True,
**self._cmd_kwargs)
if self.original_payload_dir:
self._TransferRequiredPackage()
def TransferRootfsUpdate(self):
"""Transfer files for rootfs update.
Copy the update payload to the remote device for rootfs update.
"""
device_payload_dir = os.path.join(self.device_static_dir, 'pregenerated')
self._EnsureDeviceDirectory(device_payload_dir)
logging.info('Copying rootfs payload to device...')
payload_name = self._GetRootFsPayloadFileName()
payload = os.path.join(self.payload_dir, payload_name)
self.device.CopyToDevice(payload, device_payload_dir,
mode=self.payload_mode,
log_output=True, **self._cmd_kwargs)
if self.is_au_endtoendtest:
self.RenameRootfsPayloadForAUTest(device_payload_dir, payload_name)
def RenameRootfsPayloadForAUTest(self, payload_dir, payload_name):
"""Rename the payload supplied by autoupdate_EndToEndTest on the DUT.
The au test takes in a payload that we want to update to. In order not
to break the devservers update handling we rename this payload to
update.gz after we copy it to the DUT.
"""
expected_path = os.path.join(payload_dir, ds_wrapper.ROOTFS_FILENAME)
# Strip any partial paths from the filename e.g payloads/payload.bin
payload_name = payload_name.rpartition('/')[2]
current_path = os.path.join(payload_dir, payload_name)
# Rename the payload on the DUT so we don't break the current
# devserver staging. Rename to update.gz so DUTs devserver can respond.
self.device.RunCommand(['mv', current_path, expected_path])
def TransferStatefulUpdate(self):
"""Transfer files for stateful update.
The stateful update bin and the corresponding payloads are copied to the
target remote device for stateful update.
"""
logging.debug('Checking whether file stateful_update_bin needs to be '
'transferred to device...')
need_transfer, stateful_update_bin = self._GetStatefulUpdateScript()
if need_transfer:
logging.info('Copying stateful_update_bin to device...')
# stateful_update is a tiny uncompressed text file, so use rsync.
self.device.CopyToWorkDir(stateful_update_bin, mode='rsync',
log_output=True, **self._cmd_kwargs)
self.stateful_update_bin = os.path.join(
self.device.work_dir, os.path.basename(
self.LOCAL_CHROOT_STATEFUL_UPDATE_PATH))
else:
self.stateful_update_bin = stateful_update_bin
if self.original_payload_dir:
logging.info('Copying original stateful payload to device...')
original_payload = os.path.join(
self.original_payload_dir, ds_wrapper.STATEFUL_FILENAME)
self._EnsureDeviceDirectory(self.device_restore_dir)
self.device.CopyToDevice(original_payload, self.device_restore_dir,
mode=self.payload_mode, log_output=True,
**self._cmd_kwargs)
logging.info('Copying target stateful payload to device...')
payload = os.path.join(self.payload_dir, ds_wrapper.STATEFUL_FILENAME)
self.device.CopyToWorkDir(payload, mode=self.payload_mode,
log_output=True, **self._cmd_kwargs)
def RestoreStateful(self):
"""Restore stateful partition for device."""
logging.warning('Restoring the stateful partition')
self.RunUpdateStateful()
self._Reboot('stateful partition restoration')
try:
self._CheckDevserverCanRun()
logging.info('Stateful partition restored.')
except DevserverCannotStartError as e:
raise ChromiumOSUpdateError(
'Unable to restore stateful partition: %s', e)
def ResetStatefulPartition(self):
"""Clear any pending stateful update request."""
logging.debug('Resetting stateful partition...')
try:
self.device.RunCommand(['sh', self.stateful_update_bin,
'--stateful_change=reset'],
**self._cmd_kwargs)
except cros_build_lib.RunCommandError as e:
if self.is_au_endtoendtest and not self.device.HasRsync():
# If we have updated backwards from a build with ext4 crytpo to a
# build without ext4 crypto the DUT gets powerwashed. So the stateful
# bin, payloads, and devserver files are no longer accessible.
# See crbug.com/689105. Rsync will no longer be available either so we
# will need to use scp for the rest of the update.
logging.warning('Exception while resetting stateful: %s', e)
if self.CheckRestoreStateful():
logging.info('Stateful files and devserver code now back on '
'the device. Trying to reset stateful again.')
self.device.RunCommand(['sh', self.stateful_update_bin,
'--stateful_change=reset'],
**self._cmd_kwargs)
else:
raise
def RevertBootPartition(self):
"""Revert the boot partition."""
part = self.GetRootDev(self.device)
logging.warning('Reverting update; Boot partition will be %s', part)
try:
self.device.RunCommand(['/postinst', part], **self._cmd_kwargs)
except cros_build_lib.RunCommandError as e:
logging.warning('Reverting the boot partition failed: %s', e)
def UpdateRootfs(self):
"""Update the rootfs partition of the device."""
logging.info('Updating rootfs partition')
devserver_bin = os.path.join(self.device_dev_dir,
self.REMOTE_DEVSERVER_FILENAME)
ds = ds_wrapper.RemoteDevServerWrapper(
self.device, devserver_bin, self.is_au_endtoendtest,
static_dir=self.device_static_dir,
log_dir=self.device.work_dir)
try:
ds.Start()
logging.debug('Successfully started devserver on the device on port '
'%d.', ds.port)
# Use the localhost IP address to ensure that update engine
# client can connect to the devserver.
omaha_url = ds.GetDevServerURL(
ip='127.0.0.1', port=ds.port, sub_dir='update/pregenerated')
cmd = [self.REMOTE_UPDATE_ENGINE_BIN_FILENAME, '-check_for_update',
'-omaha_url=%s' % omaha_url]
self._StartPerformanceMonitoringForAUTest()
self.device.RunCommand(cmd, **self._cmd_kwargs)
# If we are using a progress bar, update it every 0.5s instead of 10s.
if command.UseProgressBar():
update_check_interval = self.UPDATE_CHECK_INTERVAL_PROGRESSBAR
oper = operation.ProgressBarOperation()
else:
update_check_interval = self.UPDATE_CHECK_INTERVAL_NORMAL
oper = None
end_message_not_printed = True
# Loop until update is complete.
while True:
#TODO(dhaddock): Remove retry when M61 is stable. See crbug.com/744212.
op, progress = retry_util.RetryException(cros_build_lib.RunCommandError,
UPDATE_ENGINE_STATUS_RETRY,
self.GetUpdateStatus,
self.device,
['CURRENT_OP', 'PROGRESS'],
delay_sec=DELAY_SEC_FOR_RETRY)
logging.info('Waiting for update...status: %s at progress %s',
op, progress)
if op == UPDATE_STATUS_UPDATED_NEED_REBOOT:
logging.notice('Update completed.')
break
if op == UPDATE_STATUS_IDLE:
raise RootfsUpdateError(
'Update failed with unexpected update status: %s' % op)
if oper is not None:
if op == UPDATE_STATUS_DOWNLOADING:
oper.ProgressBar(float(progress))
elif end_message_not_printed and op == UPDATE_STATUS_FINALIZING:
oper.Cleanup()
logging.notice('Finalizing image.')
end_message_not_printed = False
time.sleep(update_check_interval)
# Write the hostlog to a file before shutting off devserver.
self._CollectDevServerHostLog(ds)
ds.Stop()
except Exception as e:
logging.error('Rootfs update failed.')
self.RevertBootPartition()
logging.warning(ds.TailLog() or 'No devserver log is available.')
error_msg = 'Failed to perform rootfs update: %r'
raise RootfsUpdateError(error_msg % e)
finally:
if ds.is_alive():
self._CollectDevServerHostLog(ds)
ds.Stop()
self.device.CopyFromDevice(
ds.log_file,
os.path.join(self.tempdir, self.LOCAL_DEVSERVER_LOG_FILENAME),
**self._cmd_kwargs_omit_error)
self.device.CopyFromDevice(
self.REMOTE_UPDATE_ENGINE_LOGFILE_PATH,
os.path.join(self.tempdir, os.path.basename(
self.REMOTE_UPDATE_ENGINE_LOGFILE_PATH)),
follow_symlinks=True,
**self._cmd_kwargs_omit_error)
self.device.CopyFromDevice(
self.REMOTE_QUICK_PROVISION_LOGFILE_PATH,
os.path.join(self.tempdir, os.path.basename(
self.REMOTE_QUICK_PROVISION_LOGFILE_PATH)),
follow_symlinks=True,
ignore_failures=True,
**self._cmd_kwargs_omit_error)
self._CopyHostLogFromDevice('rootfs')
self._StopPerformanceMonitoringForAUTest()
def UpdateStateful(self, use_original_build=False):
"""Update the stateful partition of the device.
Args:
use_original_build: True if we use stateful.tgz of original build for
stateful update, otherwise, as default, False.
"""
msg = 'Updating stateful partition'
if self.original_payload_dir and use_original_build:
payload_dir = self.device_restore_dir
else:
payload_dir = self.device.work_dir
cmd = ['sh',
self.stateful_update_bin,
os.path.join(payload_dir, ds_wrapper.STATEFUL_FILENAME)]
if self._clobber_stateful:
cmd.append('--stateful_change=clean')
msg += ' with clobber enabled'
logging.info('%s...', msg)
try:
self.device.RunCommand(cmd, **self._cmd_kwargs)
except cros_build_lib.RunCommandError as e:
logging.error('Stateful update failed.')
self.ResetStatefulPartition()
error_msg = 'Failed to perform stateful partition update: %s'
raise StatefulUpdateError(error_msg % e)
def RunUpdateRootfs(self):
"""Run all processes needed by updating rootfs.
1. Check device's status to make sure it can be updated.
2. Copy files to remote device needed for rootfs update.
3. Do root updating.
TODO(ihf): Change this to:
2. Unpack rootfs here on server.
3. rsync from server rootfs to device rootfs to perform update
(do not use --compress).
"""
self.SetupRootfsUpdate()
# Copy payload for rootfs update.
self.TransferRootfsUpdate()
self.UpdateRootfs()
def RunUpdateStateful(self):
"""Run all processes needed by updating stateful.
1. Copy files to remote device needed by stateful update.
2. Do stateful update.
TODO(ihf): Change this to:
1. Unpack stateful here on server.
2. rsync from server stateful to device stateful to update (do not
use --compress).
"""
self.TransferStatefulUpdate()
self.UpdateStateful()
def RebootAndVerify(self):
"""Reboot and verify the remote device.
1. Reboot the remote device. If _clobber_stateful (--clobber-stateful)
is executed, the stateful partition is wiped, and the working directory
on the remote device no longer exists. So, recreate the working directory
for this remote device.
2. Verify the remote device, by checking that whether the root device
changed after reboot.
"""
logging.notice('rebooting device...')
# Record the current root device. This must be done after SetupRootfsUpdate
# and before reboot, since SetupRootfsUpdate may reboot the device if there
# is a pending update, which changes the root device, and reboot will
# definitely change the root device if update successfully finishes.
old_root_dev = self.GetRootDev(self.device)
self.device.Reboot()
if self._clobber_stateful:
self.device.BaseRunCommand(['mkdir', '-p', self.device.work_dir])
if self._do_rootfs_update:
logging.notice('Verifying that the device has been updated...')
new_root_dev = self.GetRootDev(self.device)
if old_root_dev is None:
raise AutoUpdateVerifyError(
'Failed to locate root device before update.')
if new_root_dev is None:
raise AutoUpdateVerifyError(
'Failed to locate root device after update.')
if new_root_dev == old_root_dev:
raise AutoUpdateVerifyError(
'Failed to boot into the new version. Possibly there was a '
'signing problem, or an automated rollback occurred because '
'your new image failed to boot.')
def RunUpdate(self):
"""Update the device with image of specific version."""
self.TransferDevServerPackage()
restore_stateful = self.CheckRestoreStateful()
if restore_stateful:
self.RestoreStateful()
# Perform device updates.
if self._do_rootfs_update:
self.RunUpdateRootfs()
logging.info('Rootfs update completed.')
if self._do_stateful_update and not restore_stateful:
self.RunUpdateStateful()
logging.info('Stateful update completed.')
if self._reboot:
self.RebootAndVerify()
if self._disable_verification:
logging.info('Disabling rootfs verification on the device...')
self.device.DisableRootfsVerification()
def _CollectDevServerHostLog(self, devserver):
"""Write the host_log events from the remote DUTs devserver to a file.
The hostlog is needed for analysis by autoupdate_EndToEndTest only.
We retry several times as some DUTs are slow immediately after
starting up a devserver and return no hostlog on the first call(s).
Args:
devserver: The remote devserver wrapper for the running devserver.
"""
if not self.is_au_endtoendtest:
return
for _ in range(0, MAX_RETRY):
try:
host_log_url = devserver.GetDevServerHostLogURL(ip='127.0.0.1',
port=devserver.port,
host='127.0.0.1')
# Save the hostlog.
self.device.RunCommand(['curl', host_log_url, '-o',
self.REMOTE_HOSTLOG_FILE_PATH],
**self._cmd_kwargs)
# Copy it back.
tmphostlog = os.path.join(self.tempdir, 'hostlog')
self.device.CopyFromDevice(self.REMOTE_HOSTLOG_FILE_PATH, tmphostlog,
**self._cmd_kwargs_omit_error)
# Check that it is not empty.
with open(tmphostlog, 'r') as out_log:
hostlog_data = json.loads(out_log.read())
if not hostlog_data:
logging.info('Hostlog empty. Trying again...')
time.sleep(DELAY_SEC_FOR_RETRY)
else:
break
except cros_build_lib.RunCommandError as e:
logging.debug('Exception raised while trying to write the hostlog: '
'%s', e)
def _StartPerformanceMonitoringForAUTest(self):
"""Start update_engine performance monitoring script in rootfs update.
This script is used by autoupdate_EndToEndTest.
"""
if self._clobber_stateful or not self.is_au_endtoendtest:
return None
cmd = ['python', self.REMOTE_UPDATE_ENGINE_PERF_SCRIPT_PATH, '--start-bg']
try:
perf_id = self.device.RunCommand(cmd).output.strip()
logging.info('update_engine_performance_monitors pid is %s.', perf_id)
self.perf_id = perf_id
except cros_build_lib.RunCommandError as e:
logging.debug('Could not start performance monitoring script: %s', e)
def _StopPerformanceMonitoringForAUTest(self):
"""Stop the performance monitoring script and save results to file."""
if self.perf_id is None:
return
cmd = ['python', self.REMOTE_UPDATE_ENGINE_PERF_SCRIPT_PATH, '--stop-bg',
self.perf_id]
try:
perf_json_data = self.device.RunCommand(cmd).output.strip()
self.device.RunCommand(['echo', json.dumps(perf_json_data), '>',
self.REMOTE_UPDATE_ENGINE_PERF_RESULTS_PATH])
except cros_build_lib.RunCommandError as e:
logging.debug('Could not stop performance monitoring process: %s', e)
def _CopyHostLogFromDevice(self, partial_filename):
"""Copy the hostlog file generated by the devserver from the device."""
if self.is_au_endtoendtest:
self.device.CopyFromDevice(
self.REMOTE_HOSTLOG_FILE_PATH,
os.path.join(self.tempdir, '_'.join([os.path.basename(
self.REMOTE_HOSTLOG_FILE_PATH), partial_filename])),
**self._cmd_kwargs_omit_error)
def _Reboot(self, error_stage):
try:
self.device.Reboot(timeout_sec=self.REBOOT_TIMEOUT)
except cros_build_lib.DieSystemExit:
raise ChromiumOSUpdateError('%s cannot recover from reboot at %s' % (
self.device.hostname, error_stage))
except remote_access.SSHConnectionError:
raise ChromiumOSUpdateError('Failed to connect to %s at %s' % (
self.device.hostname, error_stage))
class ChromiumOSUpdater(ChromiumOSFlashUpdater):
"""Used to auto-update Cros DUT with image.
Different from ChromiumOSFlashUpdater, which only contains cros-flash
related auto-update methods, ChromiumOSUpdater includes pre-setup and
post-check methods for both rootfs and stateful update. It also contains
various single check functions, like CheckVersion() and _ResetUpdateEngine().
Furthermore, this class adds retry to package transfer-related functions.
"""
REMOTE_STATEFUL_PATH_TO_CHECK = ['/var', '/home', '/mnt/stateful_partition']
REMOTE_STATEFUL_TEST_FILENAME = '.test_file_to_be_deleted'
REMOTE_UPDATED_MARKERFILE_PATH = '/run/update_engine_autoupdate_completed'
REMOTE_LAB_MACHINE_FILE_PATH = '/mnt/stateful_partition/.labmachine'
KERNEL_A = {'name': 'KERN-A', 'kernel': 2, 'root': 3}
KERNEL_B = {'name': 'KERN-B', 'kernel': 4, 'root': 5}
KERNEL_UPDATE_TIMEOUT = 180
def __init__(self, device, build_name, payload_dir, dev_dir='',
log_file=None, tempdir=None, original_payload_dir=None,
clobber_stateful=True, local_devserver=False, yes=False,
payload_filename=None):
"""Initialize a ChromiumOSUpdater for auto-update a chromium OS device.
Args:
device: the ChromiumOSDevice to be updated.
build_name: the target update version for the device.
payload_dir: the directory of payload(s).
dev_dir: the directory of the devserver that runs the CrOS auto-update.
log_file: The file to save running logs.
tempdir: the temp directory in caller, not in the device. For example,
the tempdir for cros flash is /tmp/cros-flash****/, used to
temporarily keep files when transferring devserver package, and
reserve devserver and update engine logs.
original_payload_dir: The directory containing payloads whose version is
the same as current host's rootfs partition. If it's None, will first
try installing the matched stateful.tgz with the host's rootfs
Partition when restoring stateful. Otherwise, install the target
stateful.tgz.
clobber_stateful: whether to do a clean stateful update. The default is
True for CrOS update.
local_devserver: Indicate whether users use their local devserver.
Default: False.
yes: Assume "yes" (True) for any prompt. The default is False. However,
it should be set as True if we want to disable all the prompts for
auto-update.
payload_filename: Filename of exact payload file to use for
update instead of the default: update.gz.
"""
super(ChromiumOSUpdater, self).__init__(
device, payload_dir, dev_dir=dev_dir, tempdir=tempdir,
original_payload_dir=original_payload_dir,
clobber_stateful=clobber_stateful, yes=yes,
payload_filename=payload_filename)
if log_file:
self._cmd_kwargs['log_stdout_to_file'] = log_file
self._cmd_kwargs['append_to_file'] = True
self._cmd_kwargs['combine_stdout_stderr'] = True
self._cmd_kwargs_omit_error['log_stdout_to_file'] = log_file
self._cmd_kwargs_omit_error['append_to_file'] = True
self._cmd_kwargs_omit_error['combine_stdout_stderr'] = True
self.inactive_kernel = None
if local_devserver:
self.update_version = None
else:
self.update_version = build_name
def _cgpt(self, flag, kernel, dev='$(rootdev -s -d)'):
"""Return numeric cgpt value for the specified flag, kernel, device."""
cmd = ['cgpt', 'show', '-n', '-i', '%d' % kernel['kernel'], flag, dev]
return int(self._RetryCommand(
cmd, capture_output=True, log_output=True).output.strip())
def _GetKernelPriority(self, kernel):
"""Return numeric priority for the specified kernel.
Args:
kernel: information of the given kernel, KERNEL_A or KERNEL_B.
"""
return self._cgpt('-P', kernel)
def _GetKernelSuccess(self, kernel):
"""Return boolean success flag for the specified kernel.
Args:
kernel: information of the given kernel, KERNEL_A or KERNEL_B.
"""
return self._cgpt('-S', kernel) != 0
def _GetKernelTries(self, kernel):
"""Return tries count for the specified kernel.
Args:
kernel: information of the given kernel, KERNEL_A or KERNEL_B.
"""
return self._cgpt('-T', kernel)
def _GetKernelState(self):
"""Returns the (<active>, <inactive>) kernel state as a pair."""
active_root = int(re.findall(r'(\d+\Z)', self.GetRootDev(self.device))[0])
if active_root == self.KERNEL_A['root']:
return self.KERNEL_A, self.KERNEL_B
elif active_root == self.KERNEL_B['root']:
return self.KERNEL_B, self.KERNEL_A
else:
raise ChromiumOSUpdateError('Encountered unknown root partition: %s' %
active_root)
def _GetReleaseVersion(self):
"""Get release version of the device."""
lsb_release_content = self._RetryCommand(
['cat', '/etc/lsb-release'],
capture_output=True, log_output=True).output.strip()
regex = r'^CHROMEOS_RELEASE_VERSION=(.+)$'
return auto_update_util.GetChromeosBuildInfo(
lsb_release_content=lsb_release_content, regex=regex)
def _GetReleaseBuilderPath(self):
"""Get release version of the device."""
lsb_release_content = self._RetryCommand(
['cat', '/etc/lsb-release'],
capture_output=True, log_output=True).output.strip()
regex = r'^CHROMEOS_RELEASE_BUILDER_PATH=(.+)$'
return auto_update_util.GetChromeosBuildInfo(
lsb_release_content=lsb_release_content, regex=regex)
def CheckVersion(self):
"""Check the image running in DUT has the expected version.
Returns:
True if the DUT's image version matches the version that the
ChromiumOSUpdater tries to update to.
"""
if not self.update_version:
return False
# Use CHROMEOS_RELEASE_BUILDER_PATH to match the build version if it exists
# in lsb-release, otherwise, continue using CHROMEOS_RELEASE_VERSION.
release_builder_path = self._GetReleaseBuilderPath()
if release_builder_path:
return self.update_version == release_builder_path
return self.update_version.endswith(self._GetReleaseVersion())
def _ResetUpdateEngine(self):
"""Resets the host to prepare for a clean update regardless of state."""
self._RetryCommand(['rm', '-f', self.REMOTE_UPDATED_MARKERFILE_PATH],
**self._cmd_kwargs)
self._RetryCommand(['stop', 'ui'], **self._cmd_kwargs_omit_error)
self._RetryCommand(['stop', 'update-engine'],
**self._cmd_kwargs_omit_error)
self._RetryCommand(['start', 'update-engine'], **self._cmd_kwargs)
status = retry_util.RetryException(
Exception,
MAX_RETRY,
self.GetUpdateStatus, self.device,
delay_sec=DELAY_SEC_FOR_RETRY)
if status[0] != UPDATE_STATUS_IDLE:
raise PreSetupUpdateError('%s is not in an installable state' %
self.device.hostname)
def _VerifyBootExpectations(self, expected_kernel_state, rollback_message):
"""Verify that we fully booted given expected kernel state.
It verifies that we booted using the correct kernel state, and that the
OS has marked the kernel as good.
Args:
expected_kernel_state: kernel state that we're verifying with i.e. I
expect to be booted onto partition 4 etc. See output of _GetKernelState.
rollback_message: string to raise as a RootfsUpdateError if we booted
with the wrong partition.
"""
logging.debug('Start verifying boot expectations...')
# Figure out the newly active kernel
active_kernel_state = self._GetKernelState()[0]
# Rollback
if (expected_kernel_state and
active_kernel_state != expected_kernel_state):
logging.debug('Dumping partition table.')
self.device.RunCommand(['cgpt', 'show', '$(rootdev -s -d)'],
**self._cmd_kwargs)
logging.debug('Dumping crossystem for firmware debugging.')
self.device.RunCommand(['crossystem', '--all'], **self._cmd_kwargs)
raise RootfsUpdateError(rollback_message)
# Make sure chromeos-setgoodkernel runs
try:
timeout_util.WaitForReturnTrue(
lambda: (self._GetKernelTries(active_kernel_state) == 0
and self._GetKernelSuccess(active_kernel_state)),
self.KERNEL_UPDATE_TIMEOUT,
period=5)
except timeout_util.TimeoutError:
services_status = self.device.RunCommand(
['status', 'system-services'], capture_output=True,
log_output=True).output
logging.debug('System services_status: %r' % services_status)
if services_status != 'system-services start/running\n':
event = ('Chrome failed to reach login screen')
else:
event = ('update-engine failed to call '
'chromeos-setgoodkernel')
raise RootfsUpdateError(
'After update and reboot, %s '
'within %d seconds' % (event, self.KERNEL_UPDATE_TIMEOUT))
def _CheckVersionToConfirmInstall(self):
# In the local_devserver case, we can't know the expected
# build, so just pass.
logging.debug('Checking whether the new build is successfully installed...')
if not self.update_version:
logging.debug('No update_version is provided if test is executed with'
'local devserver.')
return True
# Always try the default check_version method first, this prevents
# any backward compatibility issue.
if self.CheckVersion():
return True
return auto_update_util.VersionMatch(
self.update_version, self._GetReleaseVersion())
def _RetryCommand(self, cmd, **kwargs):
"""Retry commands if SSHConnectionError happens.
Args:
cmd: the command to be run by device.
kwargs: the parameters for device to run the command.
Returns:
the output of running the command.
"""
return retry_util.RetryException(
remote_access.SSHConnectionError,
MAX_RETRY,
self.device.RunCommand,
cmd, delay_sec=DELAY_SEC_FOR_RETRY, **kwargs)
def TransferDevServerPackage(self):
"""Transfer devserver package to work directory of the remote device."""
retry_util.RetryException(
cros_build_lib.RunCommandError,
MAX_RETRY,
super(ChromiumOSUpdater, self).TransferDevServerPackage,
delay_sec=DELAY_SEC_FOR_RETRY)
def TransferRootfsUpdate(self):
"""Transfer files for rootfs update.
The corresponding payload are copied to the remote device for rootfs
update.
"""
retry_util.RetryException(
cros_build_lib.RunCommandError,
MAX_RETRY,
super(ChromiumOSUpdater, self).TransferRootfsUpdate,
delay_sec=DELAY_SEC_FOR_RETRY)
def TransferStatefulUpdate(self):
"""Transfer files for stateful update.
The stateful update bin and the corresponding payloads are copied to the
target remote device for stateful update.
"""
retry_util.RetryException(
cros_build_lib.RunCommandError,
MAX_RETRY,
super(ChromiumOSUpdater, self).TransferStatefulUpdate,
delay_sec=DELAY_SEC_FOR_RETRY)
def PreSetupCrOSUpdate(self):
"""Pre-setup for whole auto-update process for cros_host.
It includes:
1. Create a file to indicate if provision fails for cros_host.
The file will be removed by stateful update or full install.
"""
logging.debug('Start pre-setup for the whole CrOS update process...')
if not self.is_au_endtoendtest:
self._RetryCommand(['touch', self.REMOTE_PROVISION_FAILED_FILE_PATH],
**self._cmd_kwargs)
# Related to crbug.com/360944.
release_pattern = r'^.*-release/R[0-9]+-[0-9]+\.[0-9]+\.0$'
if not re.match(release_pattern, self.update_version):
logging.debug('The update version is not matched to release pattern')
return False
if not self.CheckVersion():
logging.debug('The update version is not matched to the current version')
return False
return True
def PreSetupStatefulUpdate(self):
"""Pre-setup for stateful update for CrOS host."""
logging.debug('Start pre-setup for stateful update...')
self._RetryCommand(['sudo', 'stop', 'ap-update-manager'],
**self._cmd_kwargs_omit_error)
for folder in self.REMOTE_STATEFUL_PATH_TO_CHECK:
touch_path = os.path.join(folder, self.REMOTE_STATEFUL_TEST_FILENAME)
self._RetryCommand(['touch', touch_path], **self._cmd_kwargs)
self._ResetUpdateEngine()
self.ResetStatefulPartition()
def PostCheckStatefulUpdate(self):
"""Post-check for stateful update for CrOS host."""
logging.debug('Start post check for stateful update...')
self._Reboot('post check of stateful update')
if self._clobber_stateful:
for folder in self.REMOTE_STATEFUL_PATH_TO_CHECK:
test_file_path = os.path.join(folder,
self.REMOTE_STATEFUL_TEST_FILENAME)
# If stateful update succeeds, these test files should not exist.
if self.device.IfFileExists(test_file_path,
**self._cmd_kwargs_omit_error):
raise StatefulUpdateError('failed to post-check stateful update.')
def PreSetupRootfsUpdate(self):
"""Pre-setup for rootfs update for CrOS host."""
logging.debug('Start pre-setup for rootfs update...')
self._Reboot('pre-setup of rootfs update')
self._RetryCommand(['sudo', 'stop', 'ap-update-manager'],
**self._cmd_kwargs_omit_error)
self._ResetUpdateEngine()
def _IfDevserverPackageInstalled(self):
"""Check whether devserver package is well installed.
There's a chance that devserver package is removed in the middle of
auto-update process. This function double check it and transfer it if it's
removed.
"""
logging.info('Checking whether devserver files are still on the device...')
try:
devserver_bin = os.path.join(self.device_dev_dir,
self.REMOTE_DEVSERVER_FILENAME)
if not self.device.IfFileExists(
devserver_bin, **self._cmd_kwargs_omit_error):
logging.info('Devserver files not found on device. Resending them...')
self.TransferDevServerPackage()
self.TransferStatefulUpdate()
return True
except cros_build_lib.RunCommandError as e:
logging.warning('Failed to verify whether packages still exist: %s', e)
return False
def _CheckDevserverCanRun(self):
"""Check if devserver can successfully run for ChromiumOSUpdater."""
self._IfDevserverPackageInstalled()
super(ChromiumOSUpdater, self)._CheckDevserverCanRun()
def CheckDevserverRun(self):
"""Check whether devserver can start."""
self._CheckDevserverCanRun()
logging.info('Devserver successfully start.')
def RestoreStateful(self):
"""Restore stateful partition for device."""
logging.warning('Restoring the stateful partition')
self.PreSetupStatefulUpdate()
use_original_build = bool(self.original_payload_dir)
self.UpdateStateful(use_original_build=use_original_build)
self.PostCheckStatefulUpdate()
self.CheckDevserverRun()
def PostCheckRootfsUpdate(self):
"""Post-check for rootfs update for CrOS host."""
logging.debug('Start post check for rootfs update...')
active_kernel, inactive_kernel = self._GetKernelState()
logging.debug('active_kernel= %s, inactive_kernel=%s',
active_kernel, inactive_kernel)
if (self._GetKernelPriority(inactive_kernel) <
self._GetKernelPriority(active_kernel)):
raise RootfsUpdateError('Update failed. The priority of the inactive '
'kernel partition is less than that of the '
'active kernel partition.')
self.inactive_kernel = inactive_kernel
if not self.is_au_endtoendtest:
# The issue is that certain AU tests leave the TPM in a bad state which
# most commonly shows up in provisioning. Executing this 'crossystem'
# command before rebooting clears the problem state during the reboot.
# It's also worth mentioning that this isn't a complete fix: The bad
# TPM state in theory might happen some time other than during
# provisioning. Also, the bad TPM state isn't supposed to happen at
# all; this change is just papering over the real bug.
self._RetryCommand('crossystem clear_tpm_owner_request=1',
**self._cmd_kwargs_omit_error)
self._Reboot('post check of rootfs update')
def PostCheckCrOSUpdate(self):
"""Post check for the whole auto-update process."""
logging.debug('Post check for the whole CrOS update...')
start_time = time.time()
# Not use 'sh' here since current device.RunCommand cannot recognize
# the content of $FILE.
autoreboot_cmd = ('FILE="%s" ; [ -f "$FILE" ] || '
'( touch "$FILE" ; start autoreboot )')
self._RetryCommand(autoreboot_cmd % self.REMOTE_LAB_MACHINE_FILE_PATH,
**self._cmd_kwargs)
# Loop in case the initial check happens before the reboot.
while True:
try:
start_verify_time = time.time()
self._VerifyBootExpectations(
self.inactive_kernel, rollback_message=
'Build %s failed to boot on %s; system rolled back to previous '
'build' % (self.update_version, self.device.hostname))
# Check that we've got the build we meant to install.
if not self._CheckVersionToConfirmInstall():
raise ChromiumOSUpdateError(
'Failed to update %s to build %s; found build '
'%s instead' % (self.device.hostname,
self.update_version,
self._GetReleaseVersion()))
except RebootVerificationError as e:
# If a minimum amount of time since starting the check has not
# occurred, wait and retry. Use the start of the verification
# time in case an SSH call takes a long time to return/fail.
if start_verify_time - start_time < POST_CHECK_SETTLE_SECONDS:
logging.warning('Delaying for re-check of %s to update to %s (%s)' %
(self.device.hostname, self.update_version, e))
time.sleep(POST_CHECK_RETRY_SECONDS)
continue
raise
break
# For autoupdate_EndToEndTest only, we have one extra step to verify.
if self.is_au_endtoendtest and not self._clobber_stateful:
self.PostRebootUpdateCheckForAUTest()
def PostRebootUpdateCheckForAUTest(self):
"""Do another update check after reboot to get the post update hostlog.
This is only done with autoupdate_EndToEndTest.
"""
logging.debug('Doing one final update check to get post update hostlog.')
devserver_bin = os.path.join(self.device_dev_dir,
self.REMOTE_DEVSERVER_FILENAME)
ds = ds_wrapper.RemoteDevServerWrapper(
self.device, devserver_bin, self.is_au_endtoendtest,
static_dir=self.device_static_dir,
log_dir=self.device.work_dir)
try:
ds.Start()
logging.debug('Successfully started devserver on the device on port '
'%d.', ds.port)
omaha_url = ds.GetDevServerURL(ip='127.0.0.1', port=ds.port,
sub_dir='update')
cmd = [self.REMOTE_UPDATE_ENGINE_BIN_FILENAME, '-check_for_update',
'-omaha_url=%s' % omaha_url]
self.device.RunCommand(cmd, **self._cmd_kwargs)
op = self.GetUpdateStatus(self.device)
logging.info('Post update check status: %s' % op)
self._CollectDevServerHostLog(ds)
ds.Stop()
except Exception:
logging.error('Post reboot update check failed.')
logging.warning(ds.TailLog() or 'No devserver log is available.')
finally:
if ds.is_alive():
self._CollectDevServerHostLog(ds)
ds.Stop()
self._CopyHostLogFromDevice('reboot')
def AwaitReboot(self, old_boot_id):
"""Await a reboot, ensuring that it is no longer running old_boot_id.
Args:
old_boot_id: The boot_id that must be transitioned away from for success.
Returns:
True if the device has successfully rebooted.
Raises:
RebootVerificationError if a successful reboot has not occurred.
"""
logging.debug('Awaiting reboot from %s...', old_boot_id)
if not self.device.AwaitReboot(old_boot_id):
raise RebootVerificationError('Device has not rebooted from %s' %
old_boot_id)
return True
| 40.328457 | 80 | 0.685099 | 7,537 | 60,654 | 5.334218 | 0.121534 | 0.020893 | 0.013904 | 0.007313 | 0.339941 | 0.270819 | 0.21764 | 0.180231 | 0.165829 | 0.150035 | 0 | 0.002868 | 0.229663 | 60,654 | 1,503 | 81 | 40.355289 | 0.857589 | 0.308125 | 0 | 0.274971 | 0 | 0 | 0.16369 | 0.020934 | 0 | 0 | 0 | 0.003327 | 0 | 1 | 0.074031 | false | 0 | 0.024677 | 0.001175 | 0.182139 | 0.0047 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2caaeb7931baabb2f2a4e505483d42162334c020 | 1,368 | py | Python | d2lbook/common.py | dltech-xyz/d2l-book | 5f5279708f029c9724f4b258a1a26b6db2317532 | [
"Apache-2.0"
] | 1 | 2021-01-02T03:34:04.000Z | 2021-01-02T03:34:04.000Z | d2lbook/common.py | szha/d2l-book | 5f5279708f029c9724f4b258a1a26b6db2317532 | [
"Apache-2.0"
] | null | null | null | d2lbook/common.py | szha/d2l-book | 5f5279708f029c9724f4b258a1a26b6db2317532 | [
"Apache-2.0"
] | 1 | 2020-09-15T05:57:12.000Z | 2020-09-15T05:57:12.000Z | import re
from typing import Optional, List, Any, Callable, Tuple
# Our special mark in markdown, e.g. :label:`chapter_intro`
md_mark_pattern = re.compile(':([-\/\\._\w\d]+):(`[\ \*-\/\\\._\w\d]+`)?')
# Same for md_mark_pattern, but for rst files
rst_mark_pattern = re.compile(':([-\/\\._\w\d]+):(``[\ \*-\/\\\._\w\d]+``)?')
# The source code tab mark
source_tab_pattern = re.compile('# *@tab +([\w\,\ ]+)')
# Markdown code fence
md_code_fence = re.compile('(```+) *(.*)')
def group_list(list_obj: List[Any], status_fn: Callable[[Any, Any], Any]
) -> List[Tuple[Any, List[Any]]]:
"""Cut a list into multiple parts when fn returns True"""
prev_status = None
prev_pos = 0
ret = []
for i, item in enumerate(list_obj):
cur_status = status_fn(item, prev_status)
if prev_status is not None and cur_status != prev_status:
ret.append((prev_status, list_obj[prev_pos:i]))
prev_pos = i
prev_status = cur_status
ret.append((cur_status, list_obj[prev_pos:]))
return ret
def flatten(x):
"""flatten a list of lists into a list."""
return [item for sublist in x for item in sublist]
def print_list(x):
print(f'len: {len(x)}')
for i, y in enumerate(x):
print(f'{i}\t{y}')
def print_dict(x):
print(f'len: {len(x)}')
for k in x:
print(f'{k}\t{x[k]}') | 33.365854 | 77 | 0.597953 | 213 | 1,368 | 3.661972 | 0.347418 | 0.076923 | 0.035897 | 0.051282 | 0.15641 | 0.105128 | 0.105128 | 0.061538 | 0 | 0 | 0 | 0.000926 | 0.210526 | 1,368 | 41 | 78 | 33.365854 | 0.721296 | 0.172515 | 0 | 0.068966 | 0 | 0 | 0.145666 | 0.040214 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.068966 | 0 | 0.275862 | 0.206897 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cab7ae8cc2711116a7008627d7fc69c47ab979f | 4,999 | py | Python | emulator/demoEmulator.py | ojotoxy/Processor | 8e6e4e8b33b2d3b51b05104089e801f187bb4617 | [
"Apache-2.0"
] | 1 | 2021-07-21T03:46:43.000Z | 2021-07-21T03:46:43.000Z | emulator/demoEmulator.py | ojotoxy/Processor | 8e6e4e8b33b2d3b51b05104089e801f187bb4617 | [
"Apache-2.0"
] | null | null | null | emulator/demoEmulator.py | ojotoxy/Processor | 8e6e4e8b33b2d3b51b05104089e801f187bb4617 | [
"Apache-2.0"
] | 1 | 2021-01-22T07:46:38.000Z | 2021-01-22T07:46:38.000Z | #!/usr/bin/env python3
from enum import Enum
# This computer will have these memory mapped registers:
# 0 = PC
# 1 = SP
# but I didn't implement them. Only the program counter exists and its not memory mapped
class Memory(object):
def __init__(self):
self.storage = {}
def __setitem__(self, addr, value):
self.storage[addr] = value
def __getitem__(self, addr):
return self.storage.get(addr, 0)
Instruction = Enum('Instruction', 'mov literal add sub mul div mod inc dec jmp je jne jg jge print')
# argument size in words: what the op does
# 2: mov A -> B
# 2: literal VAL -> A
# 3: add A + B -> C
# 3: sub A - B -> C
# 3: mul A * B -> C
# 3: div A / B -> C
# 3: mod A % B -> C
# 1: inc A + 1 -> A
# 1: dec A - 1 -> A
# 1: jmp POSITION
# 3: je if A == B goto POSITION
# 3: jne if A != B goto POSITION
# 3: jg if A > B goto POSITION
# 3: jge if A >= B goto POSITION
# 1: print A
# writepointer mov A -> memory[B]
# readpointer memory[A] --> B
# Need to ensure you dont overwrite the code memory with variables!
# This is the task that assemblers do for you by createing code and data sections.
VAR_COUNTER = 100
VAR_COUNTER_INNER = 101
VAR_MODULO_RESULT = 102
VAR_ZERO = 103
PROGRAM_COUNT_UPWARDS = [
Instruction.literal, 1, VAR_COUNTER, # 0
Instruction.inc, VAR_COUNTER, # 3
Instruction.print, VAR_COUNTER, # 5
Instruction.jmp, 3 # 7
]
PROGRAM_LIST_PRIME_NUMBERS = [
Instruction.literal, 1, VAR_COUNTER, # 0
Instruction.literal, 0, VAR_ZERO, # 3
Instruction.inc, VAR_COUNTER, # 6
Instruction.literal, 2, VAR_COUNTER_INNER, # 8
Instruction.mod, VAR_COUNTER, VAR_COUNTER_INNER, VAR_MODULO_RESULT, # 11
Instruction.je, VAR_MODULO_RESULT, VAR_ZERO, 6, # 15, jump to next candidate prime
Instruction.inc, VAR_COUNTER_INNER, # 19, increment factor
Instruction.jge, VAR_COUNTER_INNER, VAR_COUNTER, 27, # 21, This number is prime, go to print it
Instruction.jmp, 11, # 25, jump to next modulo test.
Instruction.print, VAR_COUNTER, # 27
Instruction.jmp, 3 # 29, jump to next candidate prime
]
program = PROGRAM_LIST_PRIME_NUMBERS
memory = Memory()
# load the program into memory
for i in range(len(program)):
memory[i] = program[i]
program_counter = 0
def readword():
global program_counter
val = memory[program_counter]
program_counter += 1
return val
while True:
instruction_code = readword()
instruction = Instruction(instruction_code)
# print('PC = {}, Got Instruction {}'.format(program_counter-1, instruction))
if instruction == Instruction.mov:
A = readword()
B = readword()
memory[B] = memory[A]
elif instruction == Instruction.literal:
VAL = readword()
A = readword()
memory[A] = VAL
elif instruction == Instruction.add:
A = readword()
B = readword()
C = readword()
memory[C] = memory[A] + memory[B] # TODO modulo
elif instruction == Instruction.sub:
A = readword()
B = readword()
C = readword()
memory[C] = memory[A] - memory[B] # TODO modulo
elif instruction == Instruction.mul:
A = readword()
B = readword()
C = readword()
memory[C] = memory[A] * memory[B] # TODO modulo
elif instruction == Instruction.div:
A = readword()
B = readword()
C = readword()
memory[C] = int(memory[A] / memory[B])
elif instruction == Instruction.mod:
A = readword()
B = readword()
C = readword()
memory[C] = int(memory[A] % memory[B])
elif instruction == Instruction.inc:
A = readword()
memory[A] = memory[A] + 1 # TODO modulo
elif instruction == Instruction.dec:
A = readword()
memory[A] = memory[A] - 1 # TODO modulo
elif instruction == Instruction.jmp:
POSITION = readword()
program_counter = POSITION
elif instruction == Instruction.je:
A = readword()
B = readword()
POSITION = readword()
A_val = memory[A]
B_val = memory[B]
if A_val == B_val:
program_counter = POSITION
elif instruction == Instruction.jne:
A = readword()
B = readword()
POSITION = readword()
A_val = memory[A]
B_val = memory[B]
if A_val != B_val:
program_counter = POSITION
elif instruction == Instruction.jg:
A = readword()
B = readword()
POSITION = readword()
A_val = memory[A]
B_val = memory[B]
if A_val > B_val:
program_counter = POSITION
elif instruction == Instruction.jge:
A = readword()
B = readword()
POSITION = readword()
A_val = memory[A]
B_val = memory[B]
if A_val >= B_val:
program_counter = POSITION
elif instruction == Instruction.print:
A = readword()
value = memory[A]
print(value)
| 29.063953 | 100 | 0.605721 | 657 | 4,999 | 4.493151 | 0.2207 | 0.040312 | 0.123306 | 0.060976 | 0.41565 | 0.39397 | 0.360434 | 0.332656 | 0.332656 | 0.332656 | 0 | 0.019899 | 0.286257 | 4,999 | 171 | 101 | 29.233918 | 0.807455 | 0.215643 | 0 | 0.447154 | 0 | 0 | 0.019102 | 0 | 0 | 0 | 0 | 0.005848 | 0 | 1 | 0.03252 | false | 0 | 0.00813 | 0.00813 | 0.065041 | 0.04065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cace54cc66cc33d32eb380eadf379d0098d853a | 8,842 | py | Python | ggshield/config.py | boblefrag/gg-shield | 8eef8e02596ca05b9250482d9ea5cafd4435cfa0 | [
"MIT"
] | null | null | null | ggshield/config.py | boblefrag/gg-shield | 8eef8e02596ca05b9250482d9ea5cafd4435cfa0 | [
"MIT"
] | null | null | null | ggshield/config.py | boblefrag/gg-shield | 8eef8e02596ca05b9250482d9ea5cafd4435cfa0 | [
"MIT"
] | null | null | null | import copy
import json
import os
from typing import Any, Dict, List, NamedTuple
import click
import yaml
from dotenv import load_dotenv
from pygitguardian.models import PolicyBreak
from .git_shell import get_git_root, is_git_dir
from .text_utils import display_error
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
# max file size to accept
MAX_FILE_SIZE = 1048576
CPU_COUNT = os.cpu_count() or 1
class Attribute(NamedTuple):
name: str
default: Any
def replace_in_keys(data: Dict, old_char: str, new_char: str) -> None:
""" Replace old_char with new_char in data keys. """
for key in list(data):
if old_char in key:
new_key = key.replace(old_char, new_char)
data[new_key] = data.pop(key)
class Config:
all_policies: bool
api_url: str
exit_zero: bool
matches_ignore: set
paths_ignore: set
show_secrets: bool
verbose: bool
CONFIG_LOCAL = ["./.gitguardian", "./.gitguardian.yml", "./.gitguardian.yaml"]
CONFIG_GLOBAL = [
os.path.join(os.path.expanduser("~"), ".gitguardian"),
os.path.join(os.path.expanduser("~"), ".gitguardian.yml"),
os.path.join(os.path.expanduser("~"), ".gitguardian.yaml"),
]
DEFAULT_CONFIG_LOCAL = "./.gitguardian.yaml"
attributes: List[Attribute] = [
Attribute("all_policies", False),
Attribute("api_url", "https://api.gitguardian.com"),
Attribute("exit_zero", False),
Attribute("matches_ignore", set()),
Attribute("paths_ignore", set()),
Attribute("show_secrets", False),
Attribute("verbose", False),
]
def __init__(self) -> None:
for attr in self.attributes:
setattr(self, attr.name, attr.default)
self.load_configs(self.CONFIG_GLOBAL)
self.load_configs(self.CONFIG_LOCAL)
def __getattr__(self, name: str) -> Any:
# Required for dynamic types on mypy
return object.__getattribute__(self, name)
def get_attributes_keys(self) -> List:
return list(
list(zip(*self.attributes))[0]
) # get list of first elements in tuple
def update_config(self, **kwargs: Any) -> None:
for key, item in kwargs.items():
if key in self.get_attributes_keys():
if isinstance(item, list):
getattr(self, key).update(item)
else:
setattr(self, key, item)
else:
click.echo("Unrecognized key in config: {}".format(key))
def load_config(self, filename: str) -> bool:
if not os.path.isfile(filename):
return False
with open(filename, "r") as f:
try:
_config = yaml.safe_load(f) or {}
replace_in_keys(_config, "-", "_")
self.update_config(**_config)
except Exception as e:
raise click.ClickException(
f"Parsing error while reading {filename}:\n{str(e)}"
)
return True
def load_configs(self, filenames: List[str]) -> None:
""" Loads config files until one succeeds. """
for filename in filenames:
try:
if self.load_config(filename):
return
except Exception as exc:
click.echo(str(exc))
def to_dict(self) -> Dict[str, Any]:
_config = {key: getattr(self, key) for key in self.get_attributes_keys()}
# Convert all sets into more human readable lists
for key in self.get_attributes_keys():
value = _config[key]
if type(value) is set:
_config[key] = list(value)
replace_in_keys(_config, "_", "-")
return _config
def save(self) -> bool:
"""
Save config in the first CONFIG_LOCAL file.
If no local config file, creates a local .gitguardian.yaml
"""
config_file = self.DEFAULT_CONFIG_LOCAL
for filename in self.CONFIG_LOCAL:
if os.path.isfile(filename):
config_file = filename
break
with open(config_file, "w") as f:
try:
stream = yaml.dump(self.to_dict(), default_flow_style=False)
f.write(stream.replace("- ", " - "))
except Exception as e:
raise click.ClickException(
f"Error while saving config in {config_file}:\n{str(e)}"
)
return True
def add_ignored_match(self, secret_hash: str) -> None:
""" Add secret to matches_ignore. """
current_ignored = self.matches_ignore
current_ignored.add(secret_hash)
def load_dot_env() -> None:
"""Loads .env file into sys.environ."""
dont_load_env = os.getenv("GITGUARDIAN_DONT_LOAD_ENV", False)
dotenv_path = os.getenv("GITGUARDIAN_DOTENV_PATH", None)
cwd_env = os.path.join(".", ".env")
if not dont_load_env:
if dotenv_path and os.path.isfile(dotenv_path):
load_dotenv(dotenv_path, override=True)
return
elif dotenv_path:
display_error(
"GITGUARDIAN_DOTENV_LOCATION does not point to a valid .env file"
)
if os.path.isfile(cwd_env):
load_dotenv(cwd_env, override=True)
return
if is_git_dir() and os.path.isfile(os.path.join(get_git_root(), ".env")):
load_dotenv(os.path.join(get_git_root(), ".env"), override=True)
return
class Cache:
last_found_secrets: set
CACHE_FILENAME = "./.cache_ggshield"
attributes: List[Attribute] = [
Attribute("last_found_secrets", set()),
]
def __init__(self) -> None:
self.purge()
self.load_cache()
def __getattr__(self, name: str) -> Any:
# Required for dynamic types on mypy
return object.__getattribute__(self, name)
def get_attributes_keys(self) -> List:
return list(
list(zip(*self.attributes))[0]
) # get list of first elements in tuple
def create_empty_cache(self) -> None:
# Creates a new file
with open(self.CACHE_FILENAME, "w"):
pass
def load_cache(self) -> bool:
if not os.path.isfile(self.CACHE_FILENAME):
self.create_empty_cache()
return True
_cache: dict = {}
if os.stat(self.CACHE_FILENAME).st_size != 0:
with open(self.CACHE_FILENAME, "r") as f:
try:
_cache = json.load(f)
# Convert back all sets that were serialized as lists
for attr in self.attributes:
if type(attr.default) is set and attr.name in _cache:
_cache[attr.name] = set(_cache[attr.name]) or set()
except Exception as e:
raise click.ClickException(
f"Parsing error while reading {self.CACHE_FILENAME}:\n{str(e)}"
)
self.update_cache(**_cache)
return True
def update_cache(self, **kwargs: Any) -> None:
for key, item in kwargs.items():
if key in self.get_attributes_keys():
if isinstance(item, list):
getattr(self, key).update(item)
else:
setattr(self, key, item)
else:
click.echo("Unrecognized key in cache: {}".format(key))
def to_dict(self) -> Dict[str, Any]:
_cache = {key: getattr(self, key) for key in self.get_attributes_keys()}
# Convert all sets into list so they can be json serialized
for key in self.get_attributes_keys():
value = _cache[key]
if type(value) is set:
_cache[key] = list(value)
return _cache
def save(self) -> bool:
if not os.path.isfile(self.CACHE_FILENAME):
return False
with open(self.CACHE_FILENAME, "w") as f:
try:
json.dump(self.to_dict(), f)
except Exception as e:
raise click.ClickException(
f"Error while saving cache in {self.CACHE_FILENAME}:\n{str(e)}"
)
return True
def purge(self) -> None:
for attr in self.attributes:
# Deep copy to avoid mutating the default value
default = copy.copy(attr.default)
setattr(self, attr.name, default)
def add_found_secret(self, hash: str) -> None:
self.last_found_secrets.add(hash)
def add_found_policy_break(self, policy_break: PolicyBreak) -> None:
if policy_break.policy.lower() == "secrets detection":
for match in policy_break.matches:
self.add_found_secret(match.match)
| 33.240602 | 87 | 0.577358 | 1,075 | 8,842 | 4.55907 | 0.194419 | 0.019588 | 0.027749 | 0.014691 | 0.37727 | 0.345032 | 0.312385 | 0.247705 | 0.23383 | 0.23383 | 0 | 0.001821 | 0.316784 | 8,842 | 265 | 88 | 33.366038 | 0.809469 | 0.072721 | 0 | 0.321782 | 0 | 0 | 0.085802 | 0.022618 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108911 | false | 0.004951 | 0.049505 | 0.019802 | 0.336634 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cad9f866ea7aa26c12ef287f5ee514f9321fee4 | 1,444 | py | Python | pynetest/lib/pyne_test_blocks.py | Avvir/pyne | 864885a8fb632b72c00af164f150b1daa38a346f | [
"MIT"
] | 4 | 2018-08-10T20:05:10.000Z | 2019-07-24T15:29:32.000Z | pynetest/lib/pyne_test_blocks.py | Avvir/pyne | 864885a8fb632b72c00af164f150b1daa38a346f | [
"MIT"
] | 6 | 2018-09-25T20:15:51.000Z | 2021-12-22T17:09:52.000Z | pynetest/lib/pyne_test_blocks.py | Avvir/pyne | 864885a8fb632b72c00af164f150b1daa38a346f | [
"MIT"
] | null | null | null | class BehaviorBlock:
def __init__(self, parent, method, description):
self.method = method
self.parent = parent
self.description = description
class Context(object):
def __init__(self, parent):
if parent is not None:
for attr in dir(parent.context):
if not hasattr(self, attr):
setattr(self, attr, getattr(parent.context, attr))
class DescribeBlock(BehaviorBlock):
def __init__(self, parent, context_description, method, pending=False, focused=False, has_focused_descendants=False):
super().__init__(parent, method, context_description)
self.after_each_blocks = []
self.describe_blocks = []
self.before_each_blocks = []
self.it_blocks = []
self.context = Context(parent)
self.pending = pending
self.has_focused_descendants = has_focused_descendants
self.focused = focused
class ItBlock(BehaviorBlock):
def __init__(self, parent, description, method, pending=False, focused=False):
super().__init__(parent, method, description)
self.pending = pending
self.focused = focused
class BeforeEachBlock(BehaviorBlock):
def __init__(self, parent, method):
super().__init__(parent, method, "@before_each")
class AfterEachBlock(BehaviorBlock):
def __init__(self, parent, method):
super().__init__(parent, method, "@after_each")
| 32.088889 | 121 | 0.665512 | 154 | 1,444 | 5.876623 | 0.24026 | 0.077348 | 0.072928 | 0.112707 | 0.374586 | 0.256354 | 0.125967 | 0.125967 | 0.125967 | 0.125967 | 0 | 0 | 0.234072 | 1,444 | 44 | 122 | 32.818182 | 0.818264 | 0 | 0 | 0.181818 | 0 | 0 | 0.015928 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cae6969a37d3bd9a688ae9ce4914b584b626699 | 1,166 | py | Python | code/Metricsource.py | t1191578/moniteredit | 48465d7cbee2452392720a26320d8753f8807bc9 | [
"Apache-2.0"
] | null | null | null | code/Metricsource.py | t1191578/moniteredit | 48465d7cbee2452392720a26320d8753f8807bc9 | [
"Apache-2.0"
] | null | null | null | code/Metricsource.py | t1191578/moniteredit | 48465d7cbee2452392720a26320d8753f8807bc9 | [
"Apache-2.0"
] | null | null | null | import requests
from multipledispatch import dispatch
class Source:
def __init__(self,url,query="query?query="):
self.url = url
self.query= query
def url(self):
#form url
pass
@dispatch(str)
def inputoutput(self, metricreq):
Aurl = self.url+self.query+metricreq
print(Aurl)
resp = requests.get(Aurl)
if resp.status_code != 200:
# This means something went wrong.
raise ApiError('GET /tasks/ {}'.format(resp.status_code))
val= resp.json()
cluster = val['data']['result']
print (cluster)
return cluster
@dispatch(str,str)
def inputoutput(self, name, metricreq):
Aurl = self.url + self.query + metricreq+'{job="'+name+'"}'
resp = requests.get(Aurl)
if resp.status_code != 200:
# This means something went wrong.
raise ApiError('GET /tasks/ {}'.format(resp.status_code))
val = resp.json()
cluster = val['data']['result']
return cluster
uri="http://35.154.106.147:9090/api/v1/"
query= "query?query="
val=Source(uri)
val.inputoutput( "go_info") | 29.897436 | 69 | 0.587479 | 140 | 1,166 | 4.828571 | 0.378571 | 0.073965 | 0.08284 | 0.06213 | 0.517751 | 0.517751 | 0.517751 | 0.405325 | 0.405325 | 0.405325 | 0 | 0.026128 | 0.277873 | 1,166 | 39 | 70 | 29.897436 | 0.776722 | 0.063465 | 0 | 0.375 | 0 | 0 | 0.111009 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.03125 | 0.0625 | 0 | 0.28125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cb1bb53ea364d5eed2d8ae779c2ec9f50b57788 | 966 | py | Python | mygrations/formats/mysql/file_reader/parsers/index_primary.py | cmancone/mygrations | 30d1d568ca7d6c38dbc5211834dd2d04c0bcf078 | [
"MIT"
] | 10 | 2018-04-09T08:39:42.000Z | 2022-03-14T15:36:05.000Z | mygrations/formats/mysql/file_reader/parsers/index_primary.py | cmancone/mygrations | 30d1d568ca7d6c38dbc5211834dd2d04c0bcf078 | [
"MIT"
] | 14 | 2018-05-02T11:14:08.000Z | 2022-01-15T18:48:54.000Z | mygrations/formats/mysql/file_reader/parsers/index_primary.py | cmancone/mygrations | 30d1d568ca7d6c38dbc5211834dd2d04c0bcf078 | [
"MIT"
] | 5 | 2018-07-18T02:20:48.000Z | 2022-02-19T09:32:07.000Z | from mygrations.core.parse.parser import parser
from mygrations.formats.mysql.definitions.index import index
class index_primary(parser, index):
_index_type = 'primary'
has_comma = False
# PRIMARY KEY (`id`),
rules = [{
'type': 'literal',
'value': 'PRIMARY KEY'
}, {
'type': 'literal',
'value': '('
}, {
'type': 'delimited',
'name': 'columns',
'separator': ',',
'quote': '`'
}, {
'type': 'literal',
'value': ')'
}, {
'type': 'literal',
'value': ',',
'optional': True,
'name': 'ending_comma'
}]
def __init__(self, rules=[]):
super().__init__(rules)
self._errors = []
self._warnings = []
self._columns = []
def process(self):
self._name = ''
self._columns = self._values['columns']
self.has_comma = True if 'ending_comma' in self._values else False
| 22.465116 | 74 | 0.509317 | 90 | 966 | 5.222222 | 0.444444 | 0.093617 | 0.13617 | 0.085106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.321946 | 966 | 42 | 75 | 23 | 0.717557 | 0.019669 | 0 | 0.235294 | 0 | 0 | 0.177778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cb2f10583b45235d86386a62278891da1c81b82 | 445 | py | Python | cryptolens/exchanges/poloniex.py | hkilian/cryptolens | 22ad4257fb9316e57b2402a83dbbfd583493fb39 | [
"MIT"
] | null | null | null | cryptolens/exchanges/poloniex.py | hkilian/cryptolens | 22ad4257fb9316e57b2402a83dbbfd583493fb39 | [
"MIT"
] | 1 | 2021-06-01T21:50:08.000Z | 2021-06-01T21:50:08.000Z | cryptolens/exchanges/poloniex.py | hkilian/cryptolens | 22ad4257fb9316e57b2402a83dbbfd583493fb39 | [
"MIT"
] | 1 | 2017-11-02T05:08:57.000Z | 2017-11-02T05:08:57.000Z | import json
import requests
from .exchange import Exchange
class Poloniex(Exchange):
def __init__(self):
Exchange.__init__(self, "GDAX")
def PullData(self):
url = "https://poloniex.com/public?command=returnTicker¤cyPair=USDT_BTC"
response = requests.get(url)
ticker = response.json()['USDT_BTC']
data = {}
data['price'] = ticker['last']
data['percentChange'] = 0
data['volume'] = ticker['baseVolume']
return data
| 21.190476 | 80 | 0.705618 | 54 | 445 | 5.62963 | 0.592593 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002632 | 0.146067 | 445 | 20 | 81 | 22.25 | 0.797368 | 0 | 0 | 0 | 0 | 0 | 0.269663 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.2 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cb370dba58960e4d34825e77ae18854da4dd923 | 354 | py | Python | exercises/exc_04_02.py | rklymentiev/py-for-neuro | 6bb163347483642c79eac429e5a9289edff7ce09 | [
"MIT"
] | 7 | 2021-04-28T13:12:16.000Z | 2022-01-15T00:21:11.000Z | exercises/exc_04_02.py | rklymentiev/py-for-neuro | 6bb163347483642c79eac429e5a9289edff7ce09 | [
"MIT"
] | 2 | 2021-04-02T18:42:55.000Z | 2021-05-20T08:43:06.000Z | exercises/exc_04_02.py | rklymentiev/py-for-neuro | 6bb163347483642c79eac429e5a9289edff7ce09 | [
"MIT"
] | 2 | 2021-07-04T22:57:29.000Z | 2021-07-29T19:28:43.000Z | import pandas as ___
import seaborn as ___
import matplotlib.pyplot as ___
# read in the data
___
# select the columns with 'mean' in the name
selected_columns = list(___)
# find the correlations
corr_matrix = ___
# make a plot
plt.___(figsize=(8,7), facecolor='white')
sns.___(data=___, cmap="YlGnBu")
plt.___("Correlation Among Variables")
___.___()
| 20.823529 | 44 | 0.748588 | 47 | 354 | 4.829787 | 0.765957 | 0.070485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006601 | 0.144068 | 354 | 16 | 45 | 22.125 | 0.742574 | 0.262712 | 0 | 0 | 0 | 0 | 0.148438 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cb530e8e30ba2311d864307233f32b3ff43ff4d | 3,178 | py | Python | tests/lexer_test.py | tykazzz/pseudo | 7414ab0f14142b9ed704d03af0223cf847bf8861 | [
"MIT"
] | null | null | null | tests/lexer_test.py | tykazzz/pseudo | 7414ab0f14142b9ed704d03af0223cf847bf8861 | [
"MIT"
] | null | null | null | tests/lexer_test.py | tykazzz/pseudo | 7414ab0f14142b9ed704d03af0223cf847bf8861 | [
"MIT"
] | null | null | null | """This module contains unit tests for lexer module."""
import pytest
import pseudo
from pseudo.pseudo_types import Operation, Operator, Int
__author__ = "Patryk Niedźwiedziński"
@pytest.fixture
def lexer():
"""Returns lexer object."""
lex = pseudo.lexer.Lexer("")
return lex
def test_is_keyword(lexer):
"""Check Lexer.is_keyword"""
if not (
lexer.is_keyword("pisz") is True
and lexer.is_keyword("oo") is False
and lexer.is_keyword("koniec") is True
):
raise AssertionError
def test_is_alphabet(lexer):
"""Check Lexer.is_alphabet"""
if not (
lexer.is_alphabet("a") is True
and lexer.is_alphabet("A") is True
and lexer.is_alphabet("1") is False
and lexer.is_alphabet("*") is False
and lexer.is_alphabet(1) is False
):
raise AssertionError
def test_is_digit(lexer):
"""Check Lexer.is_digit"""
if not (
lexer.is_digit("1") is True
and lexer.is_digit("a") is False
and lexer.is_digit('"') is False
):
raise AssertionError
def test_is_operator(lexer):
"""Checks Lexer.is_operator"""
if not (
lexer.is_operator("*") is True
and lexer.is_operator("div") is True
and lexer.is_operator(":=") is False
and lexer.is_operator("pisz") is False
):
raise AssertionError
def test_is_not_keyword_end(lexer):
"""Checks Lexer.is_not_keyword_end"""
if not (
lexer.is_not_keyword_end("a") is True
and lexer.is_not_keyword_end("+") is False
and lexer.is_not_keyword_end("!") is False
):
raise AssertionError
def test_update_args(lexer):
"""Checks Lexer.update_args"""
if not (
lexer.update_args([Int(2), Operator("+"), Int(2)], 1)
== [Operation(Operator("+"), Int(2), Int(2))]
):
raise AssertionError
def test_read_number(lexer):
"""Checks Lexer.read_number"""
lexer.i = pseudo.stream.Stream("123")
if 123 != lexer.read_number().value:
raise AssertionError
lexer.i = pseudo.stream.Stream("abc")
if lexer.read_number() is not None:
raise AssertionError
def test_read_string(lexer):
"""Checks Lexer.read_string"""
lexer.i = pseudo.stream.Stream('"abc"')
if "abc" != lexer.read_string().value:
raise AssertionError
def test_keyword(lexer):
"""Checks Lexer.read_keyword"""
lexer.i = pseudo.stream.Stream("pisz x")
if "pisz" != lexer.read_keyword():
raise AssertionError
def test_read_args(lexer):
"""Checks Lexer.read_args"""
lexer.i = pseudo.stream.Stream(" 12")
if 12 != lexer.read_args().value:
raise AssertionError
lexer.i = pseudo.stream.Stream("2+2*2")
if 6 != lexer.read_args().eval():
raise AssertionError
lexer.i = pseudo.stream.Stream("(2+2)*2")
if 8 != lexer.read_args().eval():
raise AssertionError
def test_read_expression(lexer):
"""Checks Lexer.read_expression"""
if (
lexer.read_expression(
[Int(2), Operator("+"), Int(2), Operator("*"), Int(2)]
).eval()
!= 6
):
raise AssertionError
| 25.023622 | 66 | 0.618943 | 417 | 3,178 | 4.539568 | 0.151079 | 0.08505 | 0.068674 | 0.137348 | 0.510829 | 0.316429 | 0.232435 | 0.144744 | 0.08505 | 0.08505 | 0 | 0.012469 | 0.24292 | 3,178 | 126 | 67 | 25.222222 | 0.774314 | 0.109818 | 0 | 0.317647 | 0 | 0 | 0.035119 | 0 | 0 | 0 | 0 | 0 | 0.164706 | 1 | 0.141176 | false | 0 | 0.035294 | 0 | 0.188235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cb5f4cd3fc9c93d71c637d25d6816c5fcb669d6 | 19,227 | py | Python | app/helpers.py | zhuding/javatools | 4b3a57b2ddfec36cae4af08ac5c3d71cfe879f63 | [
"Apache-2.0"
] | null | null | null | app/helpers.py | zhuding/javatools | 4b3a57b2ddfec36cae4af08ac5c3d71cfe879f63 | [
"Apache-2.0"
] | null | null | null | app/helpers.py | zhuding/javatools | 4b3a57b2ddfec36cae4af08ac5c3d71cfe879f63 | [
"Apache-2.0"
] | null | null | null | # coding=utf8
from app import db
def get_databases():
sql = "SHOW DATABASES"
result = db.engine.execute(sql)
dbs = []
for row in result:
dbs.append(row['Database'])
return dbs
def get_tables(dbname):
sql = "SELECT TABLE_NAME AS tableName FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='" + dbname + "'"
result = db.engine.execute(sql)
tables = []
for row in result:
tables.append(row['tableName'])
return tables
def get_table_field(tablename):
sql = "SHOW FULL COLUMNS FROM " + tablename
results = db.engine.execute(sql)
fields = []
for result in results:
fields.append({'field': result['Field'], 'type': result['Type'], \
'isNull': result['Null'], 'key': result['Key'], \
'defaultValue': result['Default'], 'extra': result['Extra'], 'comment': result['Comment']})
return fields
def get_select_field(tableName, fields):
sql = "SELECT "
sqlField = ""
for field in fields:
attr = convertField(field['field'])
if (sqlField == ''):
sqlField += field['field']
else:
sqlField += ',' + field['field']
sql += sqlField + ' FROM ' + tableName
return sql
def get_select_field_AS_Bean(tableName, fields):
sql = "SELECT "
sqlField = ""
for field in fields:
attr = convertField(field['field'])
if (sqlField == ''):
sqlField += field['field'] + ' AS ' + attr
else:
sqlField += ', ' + field['field'] + ' AS ' + attr
sql += sqlField + ' FROM ' + tableName
return sql
def get_insert_sql(objectName, tableName, fields):
sql = "INSERT INTO " + tableName + " "
sqlField = ''
sqlValue = ''
for field in fields:
if (field['extra'] != 'auto_increment'):
if (sqlField == ''):
sqlField += field['field']
else:
sqlField += ',' + field['field']
sql += '(' + sqlField + ') VALUES '
for field in fields:
if (field['extra'] != 'auto_increment'):
attr = convertField(field['field'])
if (sqlValue == ''):
sqlValue = '#{' + attr + '}'
else:
sqlValue += ',#{' + attr + '}'
sql += '(' + sqlValue + ') '
return sql
def get_update_sql(objectName, tableName, fields):
sql = "UPDATE " + tableName + " SET "
sqlField = ""
where = ""
for field in fields:
attr = convertField(field['field'])
if (field['extra'] != 'auto_increment'):
if (sqlField == ''):
sqlField += field['field'] + '=#{' + attr + '}'
else:
sqlField += ', ' + field['field'] + '=#{' + attr + '}'
if (field['extra'] == 'auto_increment'):
where = ' WHERE ' + field['field'] + '=#{' + attr + '}'
sql += sqlField + where
return sql
def get_update_sql_v2(tableName, fields):
sql = "UPDATE " + tableName + "<br/>"
sql += "\t\t<set><br/>"
for field in fields:
attr = convertField(field['field'])
if not (field['extra'] == 'auto_increment'):
sql += "\t\t\t<if test=\"" + attr + "!=null\"> " + field['field'] + "=#{" + attr + "} " \
"</if><br/>"
sql += "\t\t</set><br/>"
sql += "\t\tWHERE id=#{id}"
return sql
def get_delete_sql(objectName, tableName, fields):
sql = "DELETE FROM " + tableName
where = ""
for field in fields:
attr = convertField(field['field'])
if (field['extra'] == 'auto_increment'):
where = ' WHERE ' + field['field'] + '=#{' + attr + '}'
sql += where
return sql
def get_select_sql(tableName, fields):
sql = "SELECT "
sqlField = ""
where = ""
for field in fields:
attr = convertField(field['field'])
if (sqlField == ''):
sqlField += field['field'] + ' AS ' + attr
else:
sqlField += ',' + field['field'] + ' AS ' + attr
if (field['extra'] == 'auto_increment'):
where = ' WHERE ' + field['field'] + '=#{' + attr + '}'
sql += sqlField + ' FROM ' + tableName + where
return sql
def get_simple_select_sql(tableName, fields):
sql = "SELECT "
sqlField = ""
where = ""
for field in fields:
attr = convertField(field['field'])
if (sqlField == ''):
sqlField += field['field']
else:
sqlField += ',' + field['field']
if (field['extra'] == 'auto_increment'):
where = ' WHERE ' + field['field'] + '=#{' + attr + '}'
sql += sqlField + ' FROM ' + tableName + where
return sql
def get_java_code(objectName, fields):
code = "public class " + titleFirst(objectName) + " {<br/>"
for field in fields:
attr = convertField(field['field'])
code += "\t" + '// ' + field['comment'] + '' + "<br/>"
codeType = field['type']
if codeType.find('int')==0:
codeType = 'Integer'
elif codeType.find('tinyint')==0:
codeType = 'Integer'
elif codeType.find('bigint')==0:
codeType = 'Long'
elif codeType.find('timestamp')==0:
codeType = 'LocalDateTime'
elif codeType.find('datetime')==0:
codeType = 'LocalDateTime'
elif codeType.find('date')==0:
codeType = 'LocalDate'
elif codeType.find('char')==0:
codeType = 'String'
elif codeType.find('varchar')==0:
codeType = 'String'
elif codeType.find('text')>=0:
codeType = 'String'
elif codeType.find('blob')>=0:
codeType = 'String'
else:
codeType = 'String'
code += "\t" + 'private ' + codeType + ' ' + attr + ";<br/>"
code += '}'
return code
def get_mybatis_xml(objectName, tableName, fields):
select_fileds = get_select_field(tableName, fields)
simple_select_sql = get_simple_select_sql(tableName, fields)
insert_sql = get_insert_sql(objectName, tableName, fields)
update_sql = get_update_sql(objectName, tableName, fields)
#update_sql_v2 = get_update_sql_v2(tableName, fields)
className = titleFirst(objectName)
code = ""
code += "<?xml version=\"1.0\" encoding=\"UTF-8\" ?><br/>"
code += "<!DOCTYPE mapper PUBLIC \"-//mybatis.org//DTD Mapper 3.0//EN\" " \
"\"http://mybatis.org/dtd/mybatis-3-mapper.dtd\"><br/>"
code += "<mapper namespace=\"" + className + "Mapper\"><br/>"
code += "\t<resultMap id=\"" + objectName + "Map\" type=\"" + className + "\"><br />"
for field in fields:
attr = convertField(field['field'])
code += "\t\t<id column=\"" + field['field'] + "\" property=\"" + attr + "\" /><br/>"
code += "\t</resultMap><br/><br/>"
# Select fields
code += "\t<sql id=\"selectFields\"><br/>"
code += "\t\t" + select_fileds + "<br/>"
code += "\t</sql><br/><br/>"
# Select by id SQL
code += "\t<select id=\"get" + className + "ById\" resultMap=\"" + objectName + "Map\"><br/>"
code += "\t\t<include refid=\"selectFields\" /><br/>"
code += "\t\tWHERE id=#{id}<br/>"
code += "\t</select><br/><br/>"
# Select all SQL
code += "\t<select id=\"get" + className + "List\" resultMap=\"" + objectName + "Map\"><br/>"
code += "\t\t<include refid=\"selectFields\" /><br/>"
code += "\t\tORDER BY id DESC<br/>"
code += "\t</select><br/><br/>"
# Create SQL
code += "\t<insert id=\"create" + className + "\" useGeneratedKeys=\"true\" keyProperty=\"id\" " \
"parameterType=\"" + className + "\"><br/>"
code += "\t\t" + insert_sql + "<br/>"
code += "\t</insert><br/><br/>"
# Update SQL
code += "\t<update id=\"update" + className + "\" parameterType=\"" + className + "\"><br/>"
code += "\t\t" + update_sql + "<br/>"
code += "\t</update><br/><br/>"
# Update SQL V2
#code += "\t<update id=\"update" + className + "\" parameterType=\"" + className + "\"><br/>"
#code += "\t\t" + update_sql_v2 + "<br/>"
#code += "\t</update><br/><br/>"
# Delete SQL
code += "\t<delete id=\"delete" + className + "\" parameterType=\"int\"><br/>"
code += "\t\tDELETE FROM " + tableName + " WHERE id=#{id}<br/>"
code += "\t</delete><br/>"
code += "</mapper><br/>"
return code
def getJavaMapper(objectName, tableName, fields):
className = titleFirst(objectName)
select_fileds = get_select_field_AS_Bean(tableName, fields)
simple_select_sql = get_simple_select_sql(tableName, fields)
insert_sql = get_insert_sql(objectName, tableName, fields)
update_sql = get_update_sql(objectName, tableName, fields)
code = "public interface " + className + "Mapper {<br/>"
code += "\t" + 'String selectFields = " ' + select_fileds + ' ";<br/>'
code += "<br>"
code += "\t" + '/**' + "<br/>"
code += "\t" + ' * <pre></pre>' + "<br/>"
code += "\t" + ' *' + "<br/>"
code += "\t" + ' * @param ' + objectName + "<br/>"
code += "\t" + ' * @return int' + "<br/>"
code += "\t" + ' */' + "<br/>"
code += "\t" + '@Options(useGeneratedKeys = true, keyProperty = "id")' + "<br/>"
code += "\t" + '@Insert("' + insert_sql + '")' + "<br/>"
code += "\t"+ 'int create' + className + '(' + className + ' '+objectName+');' + "<br/>"
code += "<br>"
code += "\t" + '/**' + "<br/>"
code += "\t" + ' * <pre></pre>' + "<br/>"
code += "\t" + ' *' + "<br/>"
code += "\t" + ' * @param ' + objectName + "<br/>"
code += "\t" + ' * @return int' + "<br/>"
code += "\t" + ' */' + "<br/>"
code += "\t" + '@Update("' + update_sql + '")' + "<br/>"
code += "\t"+ 'int update' + className + '(' + className + ' '+objectName+');' + "<br/>"
code += "<br>"
code += "\t" + '/**' + "<br/>"
code += "\t" + ' * <pre></pre>' + "<br/>"
code += "\t" + ' *' + "<br/>"
code += "\t" + ' * @param id' + "<br/>"
code += "\t" + ' * @return int' + "<br/>"
code += "\t" + ' */' + "<br/>"
code += "\t" + '@Delete("DELETE FROM ' + tableName + ' WHERE id=#{id}")' + "<br/>"
code += "\t"+ 'int delete' + className + '(@Param("id") int id);' + "<br/>"
code += "<br>"
code += "\t" + '/**' + "<br/>"
code += "\t" + ' * <pre></pre>' + "<br/>"
code += "\t" + ' *' + "<br/>"
code += "\t" + ' * @param id' + "<br/>"
code += "\t" + ' * @return int' + "<br/>"
code += "\t" + ' */' + "<br/>"
code += "\t" + '@Select(selectFields + " WHERE id=#{id} ")' + "<br/>"
code += "\t"+ '' + className + ' get' + className + 'ById(@Param("id") int id);' + "<br/>"
code += "<br>"
code += "\t" + '/**' + "<br/>"
code += "\t" + ' * <pre></pre>' + "<br/>"
code += "\t" + ' *' + "<br/>"
code += "\t" + ' * @return int' + "<br/>"
code += "\t" + ' */' + "<br/>"
code += "\t" + '@Select(selectFields + " ORDER BY id DESC ")' + "<br/>"
code += "\t"+ 'List<' + className + '> get' + className + 'List();' + "<br/>"
code += '}'
return code
def getJavaService(objectName):
className = titleFirst(objectName)
code = "public interface " + className + "Service {<br/>"
code += "\t" + '/**' + "<br/>"
code += "\t" + ' * <pre></pre>' + "<br/>"
code += "\t" + ' *' + "<br/>"
code += "\t" + ' * @param ' + objectName + "<br/>"
code += "\t" + ' * @return int' + "<br/>"
code += "\t" + ' */' + "<br/>"
code += "\t"+ 'int create' + className + '(' + className + ' '+objectName+');' + "<br/>"
code += "<br>"
code += "\t" + '/**' + "<br/>"
code += "\t" + ' * <pre></pre>' + "<br/>"
code += "\t" + ' *' + "<br/>"
code += "\t" + ' * @param ' + objectName + "<br/>"
code += "\t" + ' * @return int' + "<br/>"
code += "\t" + ' */' + "<br/>"
code += "\t"+ 'int update' + className + '(' + className + ' '+objectName+');' + "<br/>"
code += "<br>"
code += "\t" + '/**' + "<br/>"
code += "\t" + ' * <pre></pre>' + "<br/>"
code += "\t" + ' *' + "<br/>"
code += "\t" + ' * @param '+objectName+'Id' + "<br/>"
code += "\t" + ' * @return int' + "<br/>"
code += "\t" + ' */' + "<br/>"
code += "\t"+ 'int delete' + className + '(int '+objectName+'Id);' + "<br/>"
code += "<br>"
code += "\t" + '/**' + "<br/>"
code += "\t" + ' * <pre></pre>' + "<br/>"
code += "\t" + ' *' + "<br/>"
code += "\t" + ' * @param '+objectName+'Id' + "<br/>"
code += "\t" + ' * @return ' + className + "<br/>"
code += "\t" + ' */' + "<br/>"
code += "\t"+ '' + className + ' get' + className + 'ById(int '+objectName+'Id);' + "<br/>"
code += "<br>"
code += "\t" + '/**' + "<br/>"
code += "\t" + ' * <pre></pre>' + "<br/>"
code += "\t" + ' *' + "<br/>"
code += "\t" + ' * @return List<' + className + '>'+"<br/>"
code += "\t" + ' */' + "<br/>"
code += "\t"+ 'List<' + className + '> get' + className + 'List();' + "<br/>"
code += '}'
return code
def get_test_case(objectName):
className = titleFirst(objectName)
code = "public class " + className + "ServiceTest {<br/><br/>"
code += "\t" + 'private static final Logger logger = LoggerFactory.getLogger(' + className + 'ServiceTest.class);' + "<br/>"
code += "\t" + 'private static ' + className + ' ' + objectName + ';' + "<br/>"
code += "\t" + 'private static ' + className + 'ServiceImpl ' + objectName + 'ServiceImpl;' + "<br/>"
code += "\t" + 'private static ' + className + 'Mapper ' + objectName + 'Mapper;' + "<br/><br/>"
code += "\t" + '@SuppressWarnings("resource")' + "<br/>"
code += "\t" + '@BeforeClass' + "<br/>"
code += "\t" + 'public static void init() {' + "<br/>"
code += "\t\t" + 'ApplicationContext context = new ClassPathXmlApplicationContext("classpath*:applicationContext-test.xml");' + "<br/>"
code += "\t\t" + objectName + 'Mapper = (' + className + 'Mapper) context.getBean("' + objectName + 'Mapper");' + "<br/>"
code += "\t\t" + objectName + 'ServiceImpl = (' + className + 'ServiceImpl) context.getBean("' + objectName + 'ServiceImpl");' + "<br/><br/>"
code += "\t\t" + 'ReflectionTestUtils.setField(' + objectName + 'ServiceImpl, "' + objectName + 'Mapper", ' + objectName + 'Mapper);' + "<br/><br/>"
code += "\t\t" + objectName + ' = new ' + className + '();' + "<br/>"
code += "\t" + '}' + "<br/>"
code += "<br>"
code += '}'
return code
def get_java_codeStr(objectName, fields):
code = "@Data\r"
code += "public class " + titleFirst(objectName) + " {\r"
for field in fields:
attr = convertField(field['field'])
code += " " + '// ' + field['comment'] + '' + "\r"
codeType = field['type']
if codeType.find('int')==0:
codeType = 'Integer'
elif codeType.find('tinyint')==0:
codeType = 'Integer'
elif codeType.find('bigint')==0:
codeType = 'Long'
elif codeType.find('timestamp')==0:
codeType = 'LocalDateTime'
elif codeType.find('datetime')==0:
codeType = 'LocalDateTime'
elif codeType.find('date')==0:
codeType = 'LocalDate'
elif codeType.find('char')==0:
codeType = 'String'
elif codeType.find('varchar')==0:
codeType = 'String'
elif codeType.find('text')>=0:
codeType = 'String'
elif codeType.find('blob')>=0:
codeType = 'String'
else:
codeType = 'String'
code += " " + 'private ' + codeType + ' ' + attr + ";\r"
code += '}'
return code
def getJavaMapperStr(objectName, tableName, fields):
className = titleFirst(objectName)
select_fileds = get_select_field_AS_Bean(tableName, fields)
simple_select_sql = get_simple_select_sql(tableName, fields)
insert_sql = get_insert_sql(objectName, tableName, fields)
update_sql = get_update_sql(objectName, tableName, fields)
code = "public interface " + className + "Mapper {\r"
code += " " + 'String selectFields = " ' + select_fileds + ' ";\r'
code += "\r"
code += " " + '/**' + "\r"
code += " " + ' * <pre></pre>' + "\r"
code += " " + ' *' + "\r"
code += " " + ' * @param ' + objectName + "\r"
code += " " + ' * @return int' + "\r"
code += " " + ' */' + "\r"
code += " " + '@Options(useGeneratedKeys = true, keyProperty = "id")' + "\r"
code += " " + '@Insert("' + insert_sql + '")' + "\r"
code += " " + 'int create' + className + '(' + className + ' ' + objectName + ');' + "\r"
code += "\r"
code += " " + '/**' + "\r"
code += " " + ' * <pre></pre>' + "\r"
code += " " + ' *' + "\r"
code += " " + ' * @param ' + objectName + "\r"
code += " " + ' * @return int' + "\r"
code += " " + ' */' + "\r"
code += " " + '@Update("' + update_sql + '")' + "\r"
code += " " + 'int update' + className + '(' + className + ' ' + objectName + ');' + "\r"
code += "\r"
code += " " + '/**' + "\r"
code += " " + ' * <pre></pre>' + "\r"
code += " " + ' *' + "\r"
code += " " + ' * @param id' + "\r"
code += " " + ' * @return int' + "\r"
code += " " + ' */' + "\r"
code += " " + '@Delete("DELETE FROM ' + tableName + ' WHERE id=#{id}")' + "\r"
code += " " + 'int delete' + className + '(@Param("id") int id);' + "\r"
code += "\r"
code += " " + '/**' + "\r"
code += " " + ' * <pre></pre>' + "\r"
code += " " + ' *' + "\r"
code += " " + ' * @param id' + "\r"
code += " " + ' * @return int' + "\r"
code += " " + ' */' + "\r"
code += " " + '@Select(selectFields + " WHERE id=#{id} ")' + "\r"
code += " " + '' + className + ' get' + className + 'ById(@Param("id") int id);' + "\r"
code += "\r"
code += " " + '/**' + "\r"
code += " " + ' * <pre></pre>' + "\r"
code += " " + ' *' + "\r"
code += " " + ' * @return int' + "\r"
code += " " + ' */' + "\r"
code += " " + '@Select(selectFields + " ORDER BY id DESC ")' + "\r"
code += " " + 'List<' + className + '> get' + className + 'List();' + "\r"
code += '}'
return code
def getJavaServiceStr(objectName):
className = titleFirst(objectName)
code = "public interface " + className + "Service {\r"
code += " " + '/**' + "\r"
code += " " + ' * <pre></pre>' + "\r"
code += " " + ' *' + "\r"
code += " " + ' * @param ' + objectName + "\r"
code += " " + ' * @return int' + "\r"
code += " " + ' */' + "\r"
code += " "+ 'int create' + className + '(' + className + ' '+objectName+');' + "\r"
code += "\r"
code += " " + '/**' + "\r"
code += " " + ' * <pre></pre>' + "\r"
code += " " + ' *' + "\r"
code += " " + ' * @param ' + objectName + "\r"
code += " " + ' * @return int' + "\r"
code += " " + ' */' + "\r"
code += " "+ 'int update' + className + '(' + className + ' '+objectName+');' + "\r"
code += "\r"
code += " " + '/**' + "\r"
code += " " + ' * <pre></pre>' + "\r"
code += " " + ' *' + "\r"
code += " " + ' * @param '+objectName+'Id' + "\r"
code += " " + ' * @return int' + "\r"
code += " " + ' */' + "\r"
code += " "+ 'int delete' + className + '(int '+objectName+'Id);' + "\r"
code += "\r"
code += " " + '/**' + "\r"
code += " " + ' * <pre></pre>' + "\r"
code += " " + ' *' + "\r"
code += " " + ' * @param '+objectName+'Id' + "\r"
code += " " + ' * @return ' + className + "\r"
code += " " + ' */' + "\r"
code += " "+ '' + className + ' get' + className + 'ById(int '+objectName+'Id);' + "\r"
code += "\r"
code += " " + '/**' + "\r"
code += " " + ' * <pre></pre>' + "\r"
code += " " + ' *' + "\r"
code += " " + ' * @return List<' + className + '>'+"\r"
code += " " + ' */' + "\r"
code += " "+ 'List<' + className + '> get' + className + 'List();' + "\r"
code += '}'
return code
def convertField(field):
fieldList = field.split("_")
attr = ''
for f in fieldList:
if (attr == ''):
attr = f
else:
attr += f.title()
return attr
def titleFirst(str):
if len(str)<=0:
return
return str[0:1].title() + str[1:]
def lowerFirst(str):
if len(str)<=0:
return
return str[0:1].lower() + str[1:]
def debug_print(msg):
print("\n##########################################################################################################")
print(msg)
print("\n##########################################################################################################")
| 34.211744 | 149 | 0.504031 | 2,135 | 19,227 | 4.488993 | 0.082904 | 0.077003 | 0.077421 | 0.040693 | 0.768573 | 0.698038 | 0.64399 | 0.616235 | 0.580029 | 0.551962 | 0 | 0.002649 | 0.214542 | 19,227 | 561 | 150 | 34.272727 | 0.631969 | 0.017215 | 0 | 0.680942 | 0 | 0 | 0.325955 | 0.046656 | 0.357602 | 0 | 0 | 0 | 0 | 1 | 0.049251 | false | 0 | 0.002141 | 0 | 0.098501 | 0.008565 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cb6f59ec4572da89ea0442177be6d5a3a77df0f | 1,931 | py | Python | pythonforandroid/recipes/numpy/__init__.py | wo01/python-for-android | df0866d95c9c508299a6f948302454beb971e3ac | [
"MIT"
] | 2 | 2019-01-07T12:13:25.000Z | 2019-10-19T09:53:50.000Z | pythonforandroid/recipes/numpy/__init__.py | tangingw/python-for-android | 7c125ad96f71a950ed272a116a9446c6f60e87a9 | [
"MIT"
] | null | null | null | pythonforandroid/recipes/numpy/__init__.py | tangingw/python-for-android | 7c125ad96f71a950ed272a116a9446c6f60e87a9 | [
"MIT"
] | 3 | 2018-12-13T09:57:33.000Z | 2019-01-09T15:36:46.000Z | from pythonforandroid.recipe import CompiledComponentsPythonRecipe
from pythonforandroid.toolchain import warning
from os.path import join
class NumpyRecipe(CompiledComponentsPythonRecipe):
version = '1.15.1'
url = 'https://pypi.python.org/packages/source/n/numpy/numpy-{version}.zip'
site_packages_name = 'numpy'
depends = [('python2', 'python3crystax')]
patches = [
join('patches', 'fix-numpy.patch'),
join('patches', 'prevent_libs_check.patch'),
join('patches', 'ar.patch'),
join('patches', 'lib.patch'),
join('patches', 'python2-fixes.patch')
]
def get_recipe_env(self, arch):
env = super(NumpyRecipe, self).get_recipe_env(arch)
flags = " -L{} --sysroot={}".format(
join(self.ctx.ndk_platform, 'usr', 'lib'),
self.ctx.ndk_platform
)
if self.ctx.ndk == 'crystax':
py_ver = self.ctx.python_recipe.version[0:3]
src_dir = join(self.ctx.ndk_dir, 'sources')
py_inc_dir = join(src_dir, 'python', py_ver, 'include', 'python')
py_lib_dir = join(src_dir, 'python', py_ver, 'libs', arch.arch)
cry_inc_dir = join(src_dir, 'crystax', 'include')
cry_lib_dir = join(src_dir, 'crystax', 'libs', arch.arch)
flags += ' -I{}'.format(py_inc_dir)
flags += ' -L{} -lpython{}m'.format(py_lib_dir, py_ver)
flags += " -I{}".format(cry_inc_dir)
flags += " -L{}".format(cry_lib_dir)
if flags not in env['CC']:
env['CC'] += flags
if flags not in env['LD']:
env['LD'] += flags + ' -shared'
return env
def prebuild_arch(self, arch):
super(NumpyRecipe, self).prebuild_arch(arch)
warning('Numpy is built assuming the archiver name is '
'arm-linux-androideabi-ar, which may not always be true!')
recipe = NumpyRecipe()
| 34.482143 | 79 | 0.592439 | 239 | 1,931 | 4.623431 | 0.368201 | 0.049774 | 0.057919 | 0.047059 | 0.117647 | 0.043439 | 0.043439 | 0 | 0 | 0 | 0 | 0.006281 | 0.257897 | 1,931 | 55 | 80 | 35.109091 | 0.764829 | 0 | 0 | 0 | 0 | 0.02381 | 0.232522 | 0.025375 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.071429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cba683ad98eb61e03c5f63735dc9df320aadbb6 | 6,136 | py | Python | webdriverwrapper/exceptions.py | jayeshgupta91/python-webdriverwrapper | 9b2c32bbf06ada669feb62ef17fd23365a14ad14 | [
"MIT"
] | null | null | null | webdriverwrapper/exceptions.py | jayeshgupta91/python-webdriverwrapper | 9b2c32bbf06ada669feb62ef17fd23365a14ad14 | [
"MIT"
] | null | null | null | webdriverwrapper/exceptions.py | jayeshgupta91/python-webdriverwrapper | 9b2c32bbf06ada669feb62ef17fd23365a14ad14 | [
"MIT"
] | null | null | null | # pylint: disable=wildcard-import,unused-wildcard-import
from selenium.common.exceptions import *
try:
from Levenshtein import distance as levenshteinDistance
except ImportError:
levenshteinDistance = None
def _create_exception_msg(
id_=None, class_name=None, name=None, tag_name=None,
parent_id=None, parent_class_name=None, parent_name=None, parent_tag_name=None,
text=None, xpath=None, css_selector=None, url=None,
driver=None,
):
elm_text = _create_exception_msg_tag(
id_, class_name, name, tag_name,
parent_id, parent_class_name, parent_name, parent_tag_name,
text, xpath, css_selector,
)
msg = 'No element {} found'.format(elm_text)
if url:
msg += ' at {}'.format(url)
suggest = _get_suggestion(driver, id_, class_name, tag_name)
if suggest:
msg += ' {}'.format(suggest)
return msg
def _create_exception_msg_tag(
id_=None, class_name=None, name=None, tag_name=None,
parent_id=None, parent_class_name=None, parent_name=None, parent_tag_name=None,
text=None, xpath=None, css_selector=None,
):
elm_text = _create_exception_msg_tag_element(id_, class_name, name, tag_name, text, xpath, css_selector)
parent_text = _create_exception_msg_tag_element(parent_id, parent_class_name, parent_name, parent_tag_name)
if parent_text:
return '{} in parent element {}'.format(elm_text, parent_text)
return elm_text
def _create_exception_msg_tag_element(id_=None, class_name=None, name=None, tag_name=None, text=None, xpath=None, css_selector=None):
if text:
return '"{}"'.format(text)
if xpath:
return xpath
if css_selector:
return css_selector
if id_ or class_name or tag_name or name:
msg = '<{}'.format(tag_name or '*')
if id_:
msg += ' id={}'.format(id_)
if class_name:
msg += ' class={}'.format(class_name)
if name:
msg += ' name={}'.format(name)
msg += '>'
return msg
return ''
def _get_suggestion(driver, id_=None, class_name=None, name=None):
if not driver or not levenshteinDistance:
return ''
if id_:
suggest_by = 'id'
value = id_
elif class_name:
suggest_by = 'class'
value = class_name
elif name:
suggest_by = 'name'
value = name
else:
return ''
# Can't be used xpath because it can return only element and then
# get attribute for every element is very slow. So by JS it's done
# by only one Selenium call.
items = driver.execute_script('return Array.prototype.map.call(document.querySelectorAll("[id]"), function(el) {return el.id})')
if not items:
return ''
suggestion = _find_best_suggestion(value, set(items))
if not suggestion:
return ''
return 'did you mean {}={}?'.format(suggest_by, suggestion)
def _find_best_suggestion(value, items):
if not levenshteinDistance:
return None
best = None
min_distance = len(value) + 10 # So it can find distance between btn and btn-default for example.
for item in items:
distance = levenshteinDistance(value, item)
if 0 < distance < min_distance:
min_distance = distance
best = item
return best
class WebdriverWrapperException(Exception):
"""
Base exception of WebDriver Wrapper.
"""
def __init__(self, url, msg):
super().__init__()
self.url = url
self.msg = msg
def __str__(self):
return '{} [at {}]'.format(self.msg, self.url)
def __repr__(self):
return self.__str__()
class ErrorPageException(WebdriverWrapperException):
"""
Exception raised when there is some unexpected error page. Like page 404,
500 and so on.
"""
def __init__(self, url, error_page, expected_error_page, allowed_error_pages, traceback=None):
if expected_error_page:
msg = 'Expected error page "{}", but found "{}" instead.'.format(expected_error_page, error_page)
else:
msg = 'Unexpected error page "{}".'.format(error_page)
if allowed_error_pages:
msg += ' Allowed error pages: "{}"'.format(allowed_error_pages)
if traceback:
msg += '\n\nTraceback:\n{}'.format(traceback)
super(ErrorPageException, self).__init__(url, msg)
class ErrorMessagesException(WebdriverWrapperException):
"""
Exception raised when there is some unexpected error message. Like "some
field is mandatory", "wrong e-mail" and so on.
"""
def __init__(self, url, error_messages, expected_error_messages, allowed_error_messages):
if expected_error_messages:
msg = 'Expected error messages "{}", but found "{}" instead.'.format(expected_error_messages, error_messages)
else:
msg = 'Unexpected error messages "{}".'.format(error_messages)
if allowed_error_messages:
msg += ' Allowed error messages: "{}"'.format(allowed_error_messages)
super(ErrorMessagesException, self).__init__(url, msg)
class JSErrorsException(WebdriverWrapperException):
"""
Exception raised when there is some JS error.
See :py:meth:`get_js_errors <webdriverwrapper.errors.WebdriverWrapperErrorMixin.get_js_errors>`
for more information.
"""
def __init__(self, url, js_errors):
msg = 'Unexpected JavaScript errors "{}".'.format(js_errors)
super(JSErrorsException, self).__init__(url, msg)
class InfoMessagesException(WebdriverWrapperException):
"""
Exception raised when there is missing some expected info message. Like
"sucessfully saved" and so on.
"""
def __init__(self, url, info_messages, expected_info_messages, allowed_info_messages):
msg = 'Expected info messages "{}", but found "{}" instead.'.format(expected_info_messages, info_messages)
if allowed_info_messages:
msg += ' Allowed info messages: "{}"'.format(allowed_info_messages)
super(InfoMessagesException, self).__init__(url, msg)
| 33.167568 | 133 | 0.661506 | 750 | 6,136 | 5.125333 | 0.198667 | 0.03538 | 0.028096 | 0.027315 | 0.306972 | 0.28512 | 0.203174 | 0.16077 | 0.147242 | 0.110822 | 0 | 0.001917 | 0.234681 | 6,136 | 184 | 134 | 33.347826 | 0.816652 | 0.128748 | 0 | 0.147541 | 0 | 0.008197 | 0.107701 | 0.011247 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098361 | false | 0 | 0.02459 | 0.016393 | 0.303279 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cc08333be3174d0b142d7bc58887c3bfc8700a5 | 6,369 | py | Python | app/routes.py | MBkkt/TaskManager | ed4a5cb5bbb93dc3b10d459cf2e37a82be5b8a5e | [
"MIT"
] | null | null | null | app/routes.py | MBkkt/TaskManager | ed4a5cb5bbb93dc3b10d459cf2e37a82be5b8a5e | [
"MIT"
] | 1 | 2019-04-30T22:19:15.000Z | 2019-04-30T22:20:06.000Z | app/routes.py | MBkkt/Task_Manager | ed4a5cb5bbb93dc3b10d459cf2e37a82be5b8a5e | [
"MIT"
] | null | null | null | from functools import wraps
from flask import render_template, flash, redirect, url_for, request
from flask_login import login_user, logout_user, current_user, login_required
from app import app
from app.models import User, Task
from app.forms import (
LoginForm, RegistrationForm, AddTask,
EditTaskForPerformer, EditTaskForOwner, ProfileForm
)
def admin_required(func):
@wraps(func)
def func_new(*args, **kwargs):
if current_user.type == 0:
return redirect(url_for('index'))
return func(*args, **kwargs)
return func_new
@app.route('/')
@app.route('/index')
@login_required
def index():
stat = current_user.tasks_quantity()
return render_template('index.html', title='Main', stat=stat)
@app.route('/register', methods=('GET', 'POST'))
def register():
if current_user.is_authenticated and current_user.type == 0:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
login_user(User.create(source={
'login': form.login.data,
'email': form.email.data,
'first_name': form.first_name.data,
'last_name': form.last_name.data,
'type': form.type.data,
'password': form.password.data,
}), remember=True)
next_page = request.args.get('next') or url_for('index')
return redirect(next_page)
return render_template('register.html', title='Register', form=form)
@app.route('/login', methods=('GET', 'POST'))
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(login=form.login.data).first()
next_page = url_for('login')
if user is None:
flash('Account does not exist', 'danger')
elif not user.check_password(form.password.data):
flash('Wrong password', 'danger')
else:
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next') or url_for('index')
return redirect(next_page)
return render_template('login.html', title='Log in', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/tasks')
@login_required
def tasks():
temp = current_user.tasks
tasks_len = temp.count() if temp else 0
return render_template(
'tasks.html', title='Tasks', current_user=current_user,
tasks=temp, tasks_len=tasks_len
)
@app.route('/assigned_tasks')
@login_required
@admin_required
def assigned_tasks():
return render_template(
'tasks.html', title='Assigned tasks', current_user=current_user,
tasks=current_user.assign_tasks.all(),
tasks_len=len(current_user.assign_tasks.all())
)
@app.route('/profile/<int:user_id>', methods=('GET', 'POST'))
@login_required
def profile(user_id):
if current_user.id != user_id:
return redirect(request.args.get('next') or url_for('index'))
user = User.query.filter_by(id=user_id).first()
form = ProfileForm()
if form.validate_on_submit():
User.edit(user, {
'delete': form.delete.data,
'login': form.login.data,
'email': form.email.data,
'first_name': form.first_name.data,
'last_name': form.last_name.data,
'type': form.type.data,
})
if form.delete.data:
flash('Profile is deleted', 'primary')
else:
flash('Profile is edited', 'primary')
return redirect(url_for('login'))
if request.method == 'GET':
form.login.data = user.login
form.email.data = user.email
form.first_name.data = user.first_name
form.last_name.data = user.last_name
form.type.data = user.type
return render_template(
'profile.html', title='Profile', form=form,
current_user=current_user
)
def task_for_owner(task_):
form = EditTaskForOwner(request.form)
form.users_id.choices = [
(user.id, user.login) for user in User.query.all()
]
if form.validate_on_submit():
task_.edit(task_, {
'delete': form.delete.data,
'title': form.title.data,
'description': form.description.data,
'status': form.status.data,
'users_id': form.users_id.data,
})
flash('Task is edited', 'primary')
return redirect(url_for('assigned_tasks'))
if request.method == 'GET':
form.title.data = task_.title
form.description.data = task_.description
form.status.data = task_.status
form.users_id.data = [user.id for user in task_.users]
else:
flash('Task does not correct', 'error')
return render_template(
'add_task.html', title='Edit task', task=task_,
current_user=current_user,
form=form
)
def task_for_performer(task_):
form = EditTaskForPerformer(request.form)
if request.method == 'POST' and form.validate_on_submit():
task_.edit_status(form.status.data)
flash('Task status is edited', 'primary')
return redirect(url_for('tasks'))
return render_template(
'add_task.html', title='Edit task', task=task_,
current_user=current_user,
form=form
)
@app.route('/task/<int:task_id>', methods=('GET', 'POST'))
@login_required
def task(task_id):
task_ = Task.query.filter_by(id=task_id).first()
if current_user.id == task_.author_id:
return task_for_owner(task_)
return task_for_performer(task_)
@app.route('/add_task', methods=('GET', 'POST'))
@login_required
@admin_required
def add_task():
form = AddTask()
form.users_id.choices = [
(user.id, user.login) for user in User.query.all()
]
task_ = {'author_id': current_user.id}
if form.validate_on_submit():
task_ = Task.create({
'title': form.title.data,
'description': form.description.data,
'author_id': current_user.id,
'users_id': form.users_id.data,
})
flash('Task successfully assigned', 'primary')
return render_template(
'add_task.html', title='Add task', form=form, task=task_,
current_user=current_user
)
| 31.374384 | 77 | 0.633851 | 807 | 6,369 | 4.802974 | 0.137546 | 0.070949 | 0.04644 | 0.03612 | 0.469298 | 0.372807 | 0.28741 | 0.23452 | 0.185759 | 0.164603 | 0 | 0.000614 | 0.23269 | 6,369 | 202 | 78 | 31.529703 | 0.792511 | 0 | 0 | 0.380682 | 0 | 0 | 0.119014 | 0.003454 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073864 | false | 0.017045 | 0.034091 | 0.005682 | 0.238636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cc092fe85073efa8f38bdc1b3e3d9c07f22b3d8 | 4,136 | py | Python | project.py | Peter-Sanders/IE-512-Decison-Analysis-Final-Project | 868c6bb3843350d943027073cf4b4631d0188de0 | [
"Unlicense"
] | 1 | 2019-12-14T03:08:23.000Z | 2019-12-14T03:08:23.000Z | project.py | Peter-Sanders/IE-512-Decison-Analysis-Final-Project | 868c6bb3843350d943027073cf4b4631d0188de0 | [
"Unlicense"
] | null | null | null | project.py | Peter-Sanders/IE-512-Decison-Analysis-Final-Project | 868c6bb3843350d943027073cf4b4631d0188de0 | [
"Unlicense"
] | null | null | null |
# coding: utf-8
# # Stock Choice Decision Analysis
# Code written and commentated by Peter Sanders
# ### Load Relevant Packages
# In[1]:
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from datetime import datetime
import numpy as np
import math
from scipy.special import expit
sns.set(style='whitegrid', context='talk')
# ### Define the Softmax Funcction to be used later
# In[2]:
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0) # only difference
# ### Insert stock tickers
# In[3]:
tickers = ["IFNNY", "INTC", "MCHP","ON","STM","SWKS","^VIX"]
# ### Set date range
# In[4]:
start_date = datetime(2008,12,5)
end_date = datetime(2019,12,5)
# ### Get the data
# In[68]:
df = data.DataReader(tickers, 'yahoo', start_date, end_date)
dates =[]
for x in range(len(df)):
newdate = str(df.index[x])
newdate = newdate[0:10]
dates.append(newdate)
df['dates'] = dates
# ### Get only closing price data
# In[69]:
close = df['Close']
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
close = close.reindex(all_weekdays)
close = close.fillna(method='ffill')
close = close.rename(columns={"^VIX" : "VIX"})
# In[70]:
for i in close:
vars()[i] = close.loc[:,'%s' %i]
# In[71]:
close = close.drop(columns="VIX")
# In[72]:
close.describe()
# ### Get Covariance Matrix
# In[73]:
cov =df['Close'].cov()
cov =cov.drop(columns="^VIX", index = "^VIX")
# In[74]:
cov
# ### Send Cov matrix to CSV
# In[75]:
cov.to_csv(r'\Users\Pete\OneDrive\School\Grad\IE 512\Project 2\Project\cov.csv')
# # Moving Average Time
# ### exponenetial weighted averages of closing prices
# In[76]:
ema_short = close.ewm(span=30, adjust=False).mean()
ema_long = close.ewm(span=90, adjust=False).mean()
ema_loong = close.ewm(span=300, adjust=False).mean()
# ### Trading position
# In[77]:
trading_positions_raw = ema_short - ema_long
trading_positions = trading_positions_raw.apply(np.sign)
trading_positions_final = trading_positions.shift(1)
# ### Build Epsilon
# In[82]:
q = abs(trading_positions_final.tail(30).mean())
r = abs(trading_positions_raw.tail(30).mean())
d = q*r
epsilon = expit(d)
print(epsilon)
# ### Lambda
# In[67]:
Lambda = expit(np.log10(cov.sum()/10))
Lambda
# ### Chi
# In[83]:
vix30=VIX.tail(30).mean()
pg = math.exp(1-abs(vix30-12)/12)
pb = math.exp(1-abs(vix30-30)/30)
Chi = softmax([pg,pb])
Chi
print(pb,pg)
# ### Plot EMA vs Price
# In[80]:
for i in close:
fig, ax = plt.subplots(figsize=(16,9))
ax.plot(close.loc[start_date:end_date, :].index, close.loc[start_date:end_date, i], label= 'Close')
ax.plot(ema_short.loc[start_date:end_date, :].index, ema_short.loc[start_date:end_date, i], label = 'Span 30-days EMA')
ax.plot(ema_long.loc[start_date:end_date, :].index, ema_long.loc[start_date:end_date, i], label = 'Span 90-days EMA')
#ax.plot(ema_loong.loc[start_date:end_date, :].index, ema_loong.loc[start_date:end_date, i], label = 'Span 180-days EMA')
ax.legend(loc='best')
ax.set_ylabel('Price in $')
ax.set_title(i)
# ### Plot trading position vs PRice and EMA
# In[81]:
for i in close:
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(16,9))
ax1.set_title(i)
ax1.plot(close.loc[start_date:end_date, :].index, close.loc[start_date:end_date, i], label= 'Close')
ax1.plot(ema_short.loc[start_date:end_date, :].index, ema_short.loc[start_date:end_date, i], label = 'Span 30-days EMA')
ax1.plot(ema_long.loc[start_date:end_date, :].index, ema_long.loc[start_date:end_date, i], label = 'Span 90-days EMA')
#ax1.plot(ema_loong.loc[start_date:end_date, :].index, ema_loong.loc[start_date:end_date, i], label = 'Span 300-days EMA')
ax1.legend(loc='best')
ax1.set_ylabel('Price in $')
ax2.set_title(i)
ax2.plot(trading_positions_final.loc[start_date:end_date, :].index, trading_positions_final.loc[start_date:end_date, i],
label='Trading position')
ax2.set_ylabel('Trading position')
| 19.237209 | 126 | 0.671905 | 676 | 4,136 | 3.97929 | 0.301775 | 0.07026 | 0.089219 | 0.113011 | 0.28513 | 0.259108 | 0.257249 | 0.255019 | 0.225279 | 0.225279 | 0 | 0.035817 | 0.15619 | 4,136 | 214 | 127 | 19.327103 | 0.734957 | 0.223646 | 0 | 0.040541 | 0 | 0.013514 | 0.091142 | 0.011232 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013514 | false | 0 | 0.108108 | 0 | 0.135135 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cc331db5c05974b7e5d6a6759775540b86d78ad | 40,276 | py | Python | slicem_gui.py | marcottelab/Slicem | 2dbf9f8faf89a12c983595871809445663bc5a26 | [
"MIT"
] | 5 | 2019-10-05T03:04:57.000Z | 2021-10-02T23:52:56.000Z | slicem_gui.py | marcottelab/Slicem | 2dbf9f8faf89a12c983595871809445663bc5a26 | [
"MIT"
] | 2 | 2019-11-24T09:58:13.000Z | 2020-11-09T23:57:28.000Z | slicem_gui.py | marcottelab/SLICEM | 2dbf9f8faf89a12c983595871809445663bc5a26 | [
"MIT"
] | null | null | null | import os
import mrcfile
import numpy as np
import pandas as pd
import networkx as nx
from igraph import Graph
from scipy import ndimage as ndi
from skimage import transform, measure
import tkinter as tk
from tkinter import ttk
import tkinter.filedialog
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import ImageGrid
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
matplotlib.use('TkAgg')
class SLICEM_GUI(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.wm_title(self, "SLICEM_GUI")
tabControl = ttk.Notebook(self)
input_tab = ttk.Frame(tabControl)
network_tab = ttk.Frame(tabControl)
projection_tab = ttk.Frame(tabControl)
output_tab = ttk.Frame(tabControl)
tabControl.add(input_tab, text='Inputs')
tabControl.add(network_tab, text='Network Plot')
tabControl.add(projection_tab, text='Projection Plot')
tabControl.add(output_tab, text='Outputs')
tabControl.pack(expand=1, fill="both")
self.cwd = os.getcwd()
######################### INPUT TAB ##############################
mrc_label = ttk.Label(input_tab, text="path to 2D class averages (mrcs): ")
mrc_label.grid(row=0, column=0, sticky=tk.E, pady=10)
self.mrc_entry = ttk.Entry(input_tab, width=20)
self.mrc_entry.grid(row=0, column=1, sticky=tk.W, pady=10)
self.mrc_button = ttk.Button(
input_tab,
text="Browse",
command=lambda: self.set_text(
text=self.askfile(),
entry=self.mrc_entry
)
)
self.mrc_button.grid(row=0, column=2, sticky=tk.W, pady=2)
scores_label = ttk.Label(input_tab, text="path to SLICEM scores: ")
scores_label.grid(row=1, column=0, sticky=tk.E, pady=10)
self.score_entry = ttk.Entry(input_tab, width=20)
self.score_entry.grid(row=1, column=1, sticky=tk.W, pady=10)
self.score_button = ttk.Button(
input_tab,
text="Browse",
command=lambda: self.set_text(
text=self.askfile(),
entry=self.score_entry
)
)
self.score_button.grid(row=1, column=2, sticky=tk.W, pady=2)
scale_label = ttk.Label(input_tab, text="scale factor (if used): ")
scale_label.grid(row=2, column=0, sticky=tk.E, pady=10)
self.scale_entry = ttk.Entry(input_tab, width=5)
self.scale_entry.grid(row=2, column=1, sticky=tk.W, pady=10)
self.load_button = ttk.Button(
input_tab,
text='Load Inputs',
command=lambda: self.load_inputs(
self.mrc_entry.get(),
self.score_entry.get(),
self.scale_entry.get()
)
)
self.load_button.grid(row=3, column=1, pady=20)
############################################################################
######################### NETWORK TAB ##############################
network_tab.grid_rowconfigure(0, weight=1)
network_tab.grid_columnconfigure(0, weight=1)
#TOP FRAME
nettopFrame = tk.Frame(network_tab, bg='lightgrey', width=600, height=400)
nettopFrame.grid(row=0, column=0, sticky='nsew')
self.netcanvas = None
self.nettoolbar = None
#BOTTOM FRAME
netbottomFrame = ttk.Frame(network_tab, width=600, height=100)
netbottomFrame.grid(row=1, column=0, sticky='nsew')
netbottomFrame.grid_propagate(0)
self.detection = tk.StringVar(network_tab)
self.detection.set('walktrap')
comm_label = ttk.Label(netbottomFrame, text='community detection:')
comm_label.grid(row=0, column=0, sticky=tk.E)
self.community_wt = ttk.Radiobutton(
netbottomFrame,
text='walktrap',
variable=self.detection,
value='walktrap'
)
self.community_wt.grid(row=0, column=1, padx=5, sticky=tk.W)
n_clusters_label = ttk.Label(netbottomFrame, text='# of clusters (optional):')
n_clusters_label.grid(row=0, column=2, sticky=tk.E)
self.n_clust = ttk.Entry(netbottomFrame, width=6)
self.n_clust.grid(row=0, column=3, padx=5, sticky=tk.W)
self.wt_steps = ttk.Entry(netbottomFrame, width=6)
self.wt_steps.insert(0, 4)
# self.wt_steps.grid(row=0, column=2, padx=50, sticky=tk.W)
#EV: Errors w/ betweenness iGraph version, temporarily remove
#self.community_eb = ttk.Radiobutton(
# netbottomFrame,
# text='betweenness',
# variable=self.detection,
# value='betweenness'
#)
#self.community_eb.grid(row=0, column=2, padx=3, sticky=tk.W)
self.network = tk.StringVar(network_tab)
self.network.set('knn')
net_label = ttk.Label(netbottomFrame, text='construct network from:')
net_label.grid(row=1, column=0, sticky=tk.E)
self.net1 = ttk.Radiobutton(
netbottomFrame,
text='nearest neighbors',
variable=self.network,
value='knn'
)
self.net1.grid(row=1, column=1, padx=5, sticky=tk.W)
self.net2 = ttk.Radiobutton(
netbottomFrame,
text='top n scores',
variable=self.network,
value='top_n'
)
self.net2.grid(row=2, column=1, padx=5, sticky=tk.W)
knn_label = ttk.Label(netbottomFrame, text='# of k:')
knn_label.grid(row=1, column=2, sticky=tk.E)
self.knn_entry = ttk.Entry(netbottomFrame, width=6)
self.knn_entry.insert(0, 0)
self.knn_entry.grid(row=1, column=3, padx=5, sticky=tk.W)
topn_label = ttk.Label(netbottomFrame, text='# of n:')
topn_label.grid(row=2, column=2, sticky=tk.E)
self.topn_entry = ttk.Entry(netbottomFrame, width=6)
self.topn_entry.insert(0, 0)
self.topn_entry.grid(row=2, column=3, padx=5, sticky=tk.W)
self.cluster = ttk.Button(
netbottomFrame,
width=12,
text='cluster',
command=lambda: self.slicem_cluster(
self.detection.get(),
self.network.get(),
int(self.wt_steps.get()),
self.n_clust.get(),
int(self.knn_entry.get()),
int(self.topn_entry.get()),
self.drop_nodes.get()
)
)
self.cluster.grid(row=0, column=4, sticky=tk.W, padx=5, pady=2)
self.net_plot = ttk.Button(
netbottomFrame,
width=12,
text='plot network',
command=lambda: self.plot_slicem_network(
self.network.get(),
nettopFrame)
)
self.net_plot.grid(row=1, column=4, sticky=tk.W, padx=5, pady=2)
self.tiles = ttk.Button(
netbottomFrame,
width=12,
text='plot 2D classes',
command=lambda: self.plot_tiles()
)
self.tiles.grid(row=2, column=4, sticky=tk.W, padx=5, pady=2)
drop_label = ttk.Label(netbottomFrame, text='remove nodes')
drop_label.grid(row=0, column=5)
self.drop_nodes = ttk.Entry(netbottomFrame, width=15)
self.drop_nodes.grid(row=1, column=5, sticky=tk.W, padx=10)
############################################################################
######################### PROJECTION TAB ##########################
projection_tab.grid_rowconfigure(0, weight=1)
projection_tab.grid_columnconfigure(0, weight=1)
#TOP FRAME
projtopFrame = tk.Frame(projection_tab, bg='lightgrey', width=600, height=400)
projtopFrame.grid(row=0, column=0, sticky='nsew')
projtopFrame.grid_rowconfigure(0, weight=1)
projtopFrame.grid_columnconfigure(0, weight=1)
self.projcanvas = None
self.projtoolbar = None
#BOTTOM FRAME
projbottomFrame = ttk.Frame(projection_tab, width=600, height=50)
projbottomFrame.grid(row=1, column=0, sticky='nsew')
projbottomFrame.grid_propagate(0)
avg1_label = ttk.Label(projbottomFrame, text='class average 1: ')
avg1_label.grid(row=0, column=0, sticky=tk.E, padx=2)
self.avg1 = ttk.Entry(projbottomFrame, width=5)
self.avg1.grid(row=0, column=1, padx=2)
avg2_label = ttk.Label(projbottomFrame, text='class avereage 2: ')
avg2_label.grid(row=0, column=2, sticky=tk.E, padx=2)
self.avg2 = ttk.Entry(projbottomFrame, width=5)
self.avg2.grid(row=0, column=3, padx=2)
self.proj_button = ttk.Button(
projbottomFrame,
text='plot projections',
command=lambda: self.plot_projections(
int(self.avg1.get()),
int(self.avg2.get()),
projtopFrame
)
)
self.proj_button.grid(row=0, column=4, padx=20)
self.overlay_button = ttk.Button(
projbottomFrame,
text='plot overlap',
command=lambda: self.overlay_lines(
int(self.avg1.get()),
int(self.avg2.get()),
self.ft_check_var.get(),
projtopFrame
)
)
self.overlay_button.grid(row=0, column=5, padx=12)
self.ft_check_var = tk.BooleanVar()
self.ft_check_var.set(0)
self.ft_check = ttk.Checkbutton(projbottomFrame, text='FT plot', variable=self.ft_check_var)
self.ft_check.grid(row=0, column=6, padx=12)
################################################################################
########################### OUTPUT TAB #################################
star_label = ttk.Label(output_tab, text='path to corresponding star file (star): ')
star_label.grid(row=0, column=0, sticky=tk.E, pady=10)
self.star_entry = ttk.Entry(output_tab, width=20)
self.star_entry.grid(row=0, column=1, stick=tk.W, pady=10)
self.star_button = ttk.Button(
output_tab,
text="Browse",
command=lambda: self.set_text(
text=self.askfile(),
entry=self.star_entry
)
)
self.star_button.grid(row=0, column=2, sticky=tk.W, pady=2)
outdir_label = ttk.Label(output_tab, text='directory to save files in: ')
outdir_label.grid(row=1, column=0, sticky=tk.E, pady=10)
self.out_entry = ttk.Entry(output_tab, width=20)
self.out_entry.grid(row=1, column=1, sticky=tk.W, pady=10)
self.out_button = ttk.Button(
output_tab,
text="Browse",
command=lambda: self.set_text(
text=self.askpath(),
entry=self.out_entry
)
)
self.out_button.grid(row=1, column=2, sticky=tk.W, pady=2)
self.write_button = ttk.Button(
output_tab,
text='Write Star Files',
command=lambda: self.write_star_files(
self.star_entry.get(),
self.out_entry.get()
)
)
self.write_button.grid(row=2, column=1, pady=20)
self.write_edges = ttk.Button(
output_tab,
text='Write Edge List',
command=lambda: self.write_edge_list(
self.network.get(),
self.out_entry.get()
)
)
self.write_edges.grid(row=3, column=1, pady=10)
################################################################################
############################### GUI METHODS ################################
def load_scores(self, score_file):
complete_scores = {}
with open(score_file, 'r') as f:
next(f)
for line in f:
l = line.rstrip('\n').split('\t')
complete_scores[(int(l[0]), int(l[2]))] = (int(l[1]), int(l[3]), float(l[4]))
return complete_scores
def load_class_avg(self, mrcs, factor):
"""read, scale and extract class averages"""
global shape
projection_2D = {}
extract_2D = {}
if len(factor) == 0: # Empty entry, set factor 1
factor = 1
with mrcfile.open(mrcs) as mrc:
for i, data in enumerate(mrc.data):
projection_2D[i] = data
mrc.close()
shape = transform.rotate(projection_2D[0].copy(), 45, resize=True).shape[0]
for k, avg in projection_2D.items():
if factor == 1:
extract_2D[k] = extract_class_avg(avg)
else:
scaled_img = transform.rescale(
avg,
scale=(1/float(factor)),
anti_aliasing=True,
multichannel=False, # Add to supress warning
mode='constant' # Add to supress warning
)
extract_2D[k] = extract_class_avg(scaled_img)
return projection_2D, extract_2D
def load_inputs(self, mrc_entry, score_entry, scale_entry):
global projection_2D, extract_2D, num_class_avg, complete_scores
projection_2D, extract_2D = self.load_class_avg(mrc_entry, scale_entry)
num_class_avg = len(projection_2D)
complete_scores = self.load_scores(score_entry)
print('Inputs Loaded!')
def askfile(self):
file = tk.filedialog.askopenfilename(initialdir=self.cwd)
return file
def askpath(self):
path = tk.filedialog.askdirectory(initialdir=self.cwd)
return path
def set_text(self, text, entry):
entry.delete(0, tk.END)
entry.insert(0, text)
def show_dif_class_msg(self):
tk.messagebox.showwarning(None, 'Select different class averages')
def show_cluster_fail(self):
tk.messagebox.showwarning(None, 'Clustering failed.\nTry adjusting # of clusters\n or # of edges')
def show_drop_list_msg(self):
tk.messagebox.showwarning(None, 'use comma separated list\nfor nodes to drop \ne.g. 1, 2, 3')
def slicem_cluster(self, community_detection, network_from, wt_steps, n_clust, neighbors, top, drop_nodes):
"""construct graph and get colors for plotting"""
#TODO: change to prevent cluster on exception
global scores_update, drop, flat, clusters, G, colors
if len(n_clust) == 0:
n_clust = None # Cluster at optimum modularity
else:
n_clust = int(n_clust)
if len(drop_nodes) > 0:
try:
drop = [int(n) for n in drop_nodes.split(',')]
print('dropping nodes:', drop)
scores_update = {}
for pair, score in complete_scores.items():
if pair[0] in drop or pair[1] in drop:
next
else:
scores_update[pair] = score
except:
self.show_drop_list_msg()
else:
drop = []
scores_update = complete_scores
flat, clusters, G = self.create_network(
community_detection=community_detection,
wt_steps=wt_steps,
n_clust=n_clust,
network_from=network_from,
neighbors=neighbors,
top=top
)
colors = get_plot_colors(clusters, G)
print('clusters computed!')
def create_network(self, community_detection, wt_steps, n_clust, network_from, neighbors, top):
"""get new clusters depending on input options"""
if network_from == 'top_n':
sort_by_scores = []
for pair, score in scores_update.items():
sort_by_scores.append([pair[0], pair[1], score[2]])
top_n = sorted(sort_by_scores, reverse=False, key=lambda x: x[2])[:top]
# Convert from distance to similarity for edge
for score in top_n:
c = 1/(1 + score[2])
score[2] = c
flat = [tuple(pair) for pair in top_n]
elif network_from == 'knn':
flat = []
projection_knn = nearest_neighbors(neighbors=neighbors)
for projection, knn in projection_knn.items():
for n in knn:
flat.append((projection, n[0], abs(n[3]))) # p1, p2, score
clusters = {}
g = Graph.TupleList(flat, weights=True)
if community_detection == 'walktrap':
try:
wt = Graph.community_walktrap(g, weights='weight', steps=wt_steps)
cluster_dendrogram = wt.as_clustering(n_clust)
except:
self.show_cluster_fail()
elif community_detection == 'betweenness':
try:
ebs = Graph.community_edge_betweenness(g, weights='weight', directed=True)
cluster_dendrogram = ebs.as_clustering(n_clust)
except:
self.show_cluster_fail()
for community, projection in enumerate(cluster_dendrogram.subgraphs()):
clusters[community] = projection.vs['name']
#convert node IDs back to ints
for cluster, nodes in clusters.items():
clusters[cluster] = sorted([int(node) for node in nodes])
remove_outliers(clusters)
clustered = []
for cluster, nodes in clusters.items():
for n in nodes:
clustered.append(n)
clusters['singles'] = [] # Add singles to clusters if not in top n scores
clusters['removed'] = []
for node in projection_2D:
if node not in clustered and node not in drop:
clusters['singles'].append(node)
elif node in drop:
clusters['removed'].append(node)
G = nx.Graph()
for pair in flat:
G.add_edge(int(pair[0]), int(pair[1]), weight=pair[2])
#if you want to see directionality in the networkx plot
#G = nx.MultiDiGraph(G)
#adds singles if not in top n scores
for node_key in projection_2D:
if node_key not in G.nodes:
G.add_node(node_key)
return flat, clusters, G
def plot_slicem_network(self, network_from, frame):
#TODO: adjust k, scale for clearer visualization
G_subset = G.copy()
color_dict = {i: color for i, color in enumerate(colors)}
node_dict = {node: i for i, node in enumerate(G.nodes)}
for d in drop:
G_subset.remove_node(d)
color_dict.pop(node_dict[d])
color_subset = [color for k, color in color_dict.items()]
if network_from == 'knn':
positions = nx.spring_layout(G_subset, weight='weight', k=0.3, scale=3.5)
else:
positions = nx.spring_layout(G_subset, weight='weight', k=0.18, scale=1.5)
f = Figure(figsize=(8,5))
a = f.add_subplot(111)
a.axis('off')
nx.draw_networkx_nodes(G_subset, positions, ax=a, edgecolors='black', linewidths=2,
node_size=300, alpha=0.65, node_color=color_subset)
nx.draw_networkx_edges(G_subset, positions, ax=a, width=1, edge_color='grey')
nx.draw_networkx_labels(G_subset, positions, ax=a, font_weight='bold', font_size=10)
if self.netcanvas:
self.netcanvas.get_tk_widget().destroy()
self.nettoolbar.destroy()
self.netcanvas = FigureCanvasTkAgg(f, frame)
self.netcanvas.draw()
self.netcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.netcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.nettoolbar = NavigationToolbar2Tk(self.netcanvas, frame)
self.nettoolbar.update()
def plot_tiles(self):
"""plot 2D class avgs sorted and colored by cluster"""
#TODO: adjust plot, border and text_box sizes
ordered_projections = []
flat_clusters = []
colors_2D = []
for cluster, nodes in clusters.items():
for n in nodes:
ordered_projections.append(projection_2D[n])
for n in nodes:
flat_clusters.append(n)
for i, n in enumerate(G.nodes):
if n in nodes:
colors_2D.append(colors[i])
grid_cols = int(np.ceil(np.sqrt(len(ordered_projections))))
if len(ordered_projections) <= (grid_cols**2 - grid_cols):
grid_rows = grid_cols - 1
else:
grid_rows = grid_cols
#assuming images are same size, get shape
l, w = ordered_projections[0].shape
#add blank images to pack in grid
while len(ordered_projections) < grid_rows*grid_cols:
ordered_projections.append(np.zeros((l, w)))
colors_2D.append((0., 0., 0.))
flat_clusters.append('')
f = Figure()
grid = ImageGrid(f, 111, #similar to subplot(111)
nrows_ncols=(grid_rows, grid_cols), #creates grid of axes
axes_pad=0.05) #pad between axes in inch
lw = 1.75
text_box_size = 5
props = dict(boxstyle='round', facecolor='white')
for i, (ax, im) in enumerate(zip(grid, ordered_projections)):
ax.imshow(im, cmap='gray')
for side, spine in ax.spines.items():
spine.set_color(colors_2D[i])
spine.set_linewidth(lw)
ax.get_yaxis().set_ticks([])
ax.get_xaxis().set_ticks([])
text = str(flat_clusters[i])
ax.text(1, 1, text, va='top', ha='left', bbox=props, size=text_box_size)
newWindow = tk.Toplevel()
newWindow.grid_rowconfigure(0, weight=1)
newWindow.grid_columnconfigure(0, weight=1)
#PLOT FRAME
plotFrame = tk.Frame(newWindow, bg='lightgrey', width=600, height=400)
plotFrame.grid(row=0, column=0, sticky='nsew')
canvas = FigureCanvasTkAgg(f, plotFrame)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas.figure.tight_layout()
#TOOLBAR FRAME
toolbarFrame = ttk.Frame(newWindow, width=600, height=100)
toolbarFrame.grid(row=1, column=0, sticky='nsew')
toolbarFrame.grid_propagate(0)
toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)
toolbar.update()
def plot_projections(self, p1, p2, frame):
if p1 == p2:
self.show_dif_class_msg()
else:
projection1 = extract_2D[p1]
projection2 = extract_2D[p2]
angle1 = complete_scores[p1, p2][0]
angle2 = complete_scores[p1, p2][1]
ref = transform.rotate(projection1, angle1, resize=True)
comp = transform.rotate(projection2, angle2, resize=True)
ref_square, comp_square = make_equal_square_images(ref, comp)
ref_intensity = ref_square.sum(axis=0)
comp_intensity = comp_square.sum(axis=0)
y_axis_max = max(np.amax(ref_intensity), np.amax(comp_intensity))
y_axis_min = min(np.amin(ref_intensity), np.amin(comp_intensity))
f = Figure(figsize=(4,4))
spec = gridspec.GridSpec(ncols=2, nrows=2, figure=f)
tl = f.add_subplot(spec[0, 0])
tr = f.add_subplot(spec[0, 1])
bl = f.add_subplot(spec[1, 0])
br = f.add_subplot(spec[1, 1])
# PROJECTION_1
#2D projection image
tl.imshow(ref_square, cmap=plt.get_cmap('gray'), aspect='equal')
tl.axis('off')
#1D line projection
bl.plot(ref_intensity, color='black')
bl.xaxis.set_visible(False)
bl.yaxis.set_visible(False)
bl.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])
bl.fill_between(range(len(ref_intensity)), ref_intensity, alpha=0.5, color='deepskyblue')
# PROJECTION_2
#2D projection image
tr.imshow(comp_square, cmap=plt.get_cmap('gray'), aspect='equal')
tr.axis('off')
#lD line projection
br.plot(comp_intensity, color='black')
br.xaxis.set_visible(False)
br.yaxis.set_visible(False)
br.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])
br.fill_between(range(len(comp_intensity)), comp_intensity, alpha=0.5, color='yellow')
asp = np.diff(bl.get_xlim())[0] / np.diff(bl.get_ylim())[0]
bl.set_aspect(asp)
asp1 = np.diff(br.get_xlim())[0] / np.diff(br.get_ylim())[0]
br.set_aspect(asp)
f.tight_layout()
if self.projcanvas:
self.projcanvas.get_tk_widget().destroy()
self.projtoolbar.destroy()
self.projcanvas = FigureCanvasTkAgg(f, frame)
self.projcanvas.draw()
self.projcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projtoolbar = NavigationToolbar2Tk(self.projcanvas, frame)
self.projtoolbar.update()
def overlay_lines(self, p1, p2, FT, frame):
"""overlays line projections at optimum angle between two class averages"""
if p1 == p2:
self.show_dif_class_msg()
else:
a1 = complete_scores[p1, p2][0]
a2 = complete_scores[p1, p2][1]
projection1 = make_1D(extract_2D[p1], a1)
projection2 = make_1D(extract_2D[p2], a2)
if FT:
pad_p1 = np.pad(projection1.vector, pad_width=(0, shape-projection1.size()))
pad_p2 = np.pad(projection2.vector, pad_width=(0, shape-projection2.size()))
A = abs(np.fft.rfft(pad_p1))
B = abs(np.fft.rfft(pad_p2))
f = Figure(figsize=(8,4))
ax = f.add_subplot(111)
ax.bar(range(len(A)), A, alpha=0.35, color='deepskyblue', ec='k', linewidth=1)
ax.bar(range(len(B)), B, alpha=0.35, color='yellow', ec='k', linewidth=1)
ax.get_xaxis().set_ticks([])
ax.set_xlabel('frequency component')
ax.set_ylabel('Amplitude')
else:
a2_flip = complete_scores[p1, p2][1] + 180
projection2_flip = make_1D(extract_2D[p2], a2_flip)
score_default, r, c = slide_score(projection1, projection2) # Score and location of optimum
score_flip, r_flip, c_flip = slide_score(projection1, projection2_flip) # Score of phase flipped
if score_default <= score_flip:
ref_intensity, comp_intensity = r, c
else:
ref_intensity, comp_intensity = r_flip, c_flip
f = Figure(figsize=(8,4))
ax = f.add_subplot(111)
x_axis_max = len(ref_intensity)
y_axis_max = max(np.amax(ref_intensity), np.amax(comp_intensity))
y_axis_min = min(np.amin(ref_intensity), np.amin(comp_intensity))
ax.plot(ref_intensity, color='black')
ax.plot(comp_intensity, color='black')
ax.fill_between(range(len(ref_intensity)), ref_intensity, alpha=0.35, color='deepskyblue')
ax.fill_between(range(len(comp_intensity)), comp_intensity, alpha=0.35, color='yellow')
ax.set_ylabel('Intensity')
ax.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])
ax.xaxis.set_visible(False)
f.tight_layout()
if self.projcanvas:
self.projcanvas.get_tk_widget().destroy()
self.projtoolbar.destroy()
self.projcanvas = FigureCanvasTkAgg(f, frame)
self.projcanvas.draw()
self.projcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projtoolbar = NavigationToolbar2Tk(self.projcanvas, frame)
self.projtoolbar.update()
def write_star_files(self, star_input, outpath):
"""split star file into new star files based on clusters"""
with open(star_input, 'r') as f:
table = parse_star(f)
cluster_star = {}
for cluster, nodes in clusters.items():
if nodes:
#convert to str to match df
#add 1 to match RELION indexing
avgs = [str(node+1) for node in nodes]
subset = table[table['ClassNumber'].isin(avgs)]
cluster_star[cluster] = subset
for cluster, table in cluster_star.items():
with open(outpath+'/slicem_cluster_{0}.star'.format(cluster), 'w') as f:
#write the star file
print('data_', file=f)
print('loop_', file=f)
for i, name in enumerate(table.columns):
print('_rln' + name + ' #' + str(i+1), file=f)
table.to_csv(f, sep='\t', index=False, header=False)
with open(outpath+'/slicem_clusters.txt', 'w') as f:
for cluster, averages in clusters.items():
f.write(str(cluster) + '\t' + str(averages) + '\n')
print('star files written!')
def write_edge_list(self, network, outpath):
with open(outpath+'/slicem_edge_list.txt', 'w') as f:
f.write('projection_1'+'\t'+'projection_2'+'\t'+'score'+'\n')
for t in flat:
f.write(str(t[0])+'\t'+str(t[1])+'\t'+str(t[2])+'\n')
if network == 'top_n':
if clusters['singles']:
for single in clusters['singles']:
f.write(str(single)+'\n')
print('edge list written!')
#Utility functions from main script to make GUI standalone
def extract_class_avg(avg):
"""fit in minimal bounding box"""
image = avg.copy()
image[image < 0] = 0
struct = np.ones((2, 2), dtype=bool)
dilate = ndi.binary_dilation(image, struct)
labeled = measure.label(dilate, connectivity=2)
rprops = measure.regionprops(labeled, image, cache=False)
if len(rprops) == 1:
select_region = 0
else:
img_y, img_x = image.shape
if labeled[int(img_y/2), int(img_x/2)] != 0: # Check for central region
select_region = labeled[int(img_y/2), int(img_x/2)] - 1 # For index
else:
distances = [
(i, np.linalg.norm(np.array((img_y/2, img_x/2)) - np.array(r.weighted_centroid)))
for i, r in enumerate(rprops)
]
select_region = min(distances, key=lambda x: x[1])[0] # Pick first closest region
y_min, x_min, y_max, x_max = [p for p in rprops[select_region].bbox]
return image[y_min:y_max, x_min:x_max]
def nearest_neighbors(neighbors):
"""group k best scores for each class average to construct graph"""
projection_knn = {}
order_scores = {avg: [] for avg in range(num_class_avg)}
for d in drop:
order_scores.pop(d, None)
#projection_knn[projection_1] = [projection_2, angle_1, angle_2, score]
for pair, values in scores_update.items():
p1, p2 = [p for p in pair]
a1, a2, s = [v for v in values]
c = [p2, a1, a2, s]
order_scores[p1].append(c)
# Zscore per class avg for edge
for projection, scores in order_scores.items():
all_scores = [v[3] for v in scores]
u = np.mean(all_scores)
s = np.std(all_scores)
for v in scores:
zscore = (v[3] - u)/s
v[3] = zscore
for avg, scores in order_scores.items():
sort = sorted(scores, reverse=False, key=lambda x: x[3])[:neighbors]
projection_knn[avg] = sort
return projection_knn
def remove_outliers(clusters):
"""
Use median absolute deviation to remove outliers
Boris Iglewicz and David Hoaglin (1993)
"""
pixel_sums = {}
outliers = []
for cluster, nodes in clusters.items():
if len(nodes) > 1:
pixel_sums[cluster] = []
for node in nodes:
pixel_sums[cluster].append(sum(sum(extract_2D[node])))
for cluster, psums in pixel_sums.items():
med = np.median(psums)
m_psums = [abs(x - med) for x in psums]
mad = np.median(m_psums)
if mad == 0:
next
else:
for i, proj in enumerate(psums):
z = 0.6745*(proj - med)/mad
if abs(z) > 3.5:
outliers.append((cluster, clusters[cluster][i]))
clusters["outliers"] = [o[1] for o in outliers]
for outlier in outliers:
cluster, node = outlier[0], outlier[1]
clusters[cluster].remove(node)
print('class_avg node {0} was removed from cluster {1} as an outlier'.format(node, cluster))
def random_color():
return tuple(np.random.rand(1,3)[0])
def get_plot_colors(clusters, graph):
color_list = []
preset_colors = [color for colors in [cm.Set3.colors] for color in colors]
for i in range(len(clusters)):
if i < len(preset_colors):
color_list.append(preset_colors[i])
else:
color_list.append(random_color())
colors = []
for i, node in enumerate(graph.nodes):
for cluster, projections in clusters.items():
if cluster == 'singles':
if node in projections:
colors.append((0.85, 0.85, 0.85))
elif cluster == 'outliers':
if node in projections:
colors.append((0.35, 0.35, 0.35))
elif cluster == 'removed':
if node in projections:
colors.append((0.9, 0, 0))
elif node in projections:
colors.append((color_list[cluster]))
return colors
def make_equal_square_images(ref, comp):
ry, rx = np.shape(ref)
cy, cx = np.shape(comp)
max_dim = max(rx, ry, cx, cy) # Max dimension
ref = adjust_image_size(ref, max_dim)
comp = adjust_image_size(comp, max_dim)
return ref, comp
def adjust_image_size(img, max_dim):
y, x = np.shape(img)
y_pad = int((max_dim-y)/2)
if y % 2 == 0:
img = np.pad(img, pad_width=((y_pad,y_pad), (0,0)), mode='constant')
else:
img = np.pad(img, pad_width=((y_pad+1,y_pad), (0,0)), mode='constant')
x_pad = int((max_dim-x)/2)
if x % 2 == 0:
img = np.pad(img, pad_width=((0,0), (x_pad,x_pad)), mode='constant')
else:
img = np.pad(img, pad_width=((0,0), (x_pad+1,x_pad)), mode='constant')
return img
class Projection:
"""for 1D projection vectors"""
def __init__(self,
class_avg,
angle,
vector):
self.class_avg = class_avg
self.angle = angle
self.vector = vector
def size(self):
return len(self.vector)
def make_1D(projection, angle):
proj_1D = transform.rotate(projection, angle, resize=True).sum(axis=0)
trim_1D = np.trim_zeros(proj_1D, trim='fb')
p = Projection(class_avg=projection, angle=angle, vector=trim_1D)
return p
def slide_score(a, b):
"""
finds minimum pairwise score for translations of 1D projections
a, b are instances of the Projection class
modified from main for plotting
"""
scores = []
if a.size() > b.size():
l, s = a.vector, b.vector
else:
l, s = b.vector, a.vector
l_size, s_size = len(l), len(s)
pad_l = np.pad(l, pad_width=(s_size-1, s_size-1))
diff_of_len = abs(len(pad_l) - s_size)
for i in range(s_size+l_size-1):
shift_s = np.pad(s, pad_width=(i, diff_of_len-i))
scores.append(np.linalg.norm(pad_l - shift_s))
score = min(scores)
loc = np.argwhere(scores == np.amin(scores))
loc = loc[0][0].astype('int') # If multiple minimum occur pick the first
if a.size() > b.size():
ref_intensity = pad_l
comp_intensity = np.pad(s, pad_width=(loc, diff_of_len-loc))
else:
ref_intensity = np.pad(s, pad_width=(loc, diff_of_len-loc))
comp_intensity = pad_l
#Crop lines for plotting
if loc < s_size-1:
ref_intensity = ref_intensity[loc:s_size-1+l_size]
comp_intensity = comp_intensity[loc:s_size-1+l_size]
elif loc >= s_size-1 and loc+s_size < s_size-1+l_size:
ref_intensity = ref_intensity[s_size-1:s_size+l_size]
comp_intensity = comp_intensity[s_size-1:s_size+l_size]
elif loc >= s_size-1 and loc+s_size >= s_size-1+l_size:
ref_intensity = ref_intensity[s_size-1:loc+s_size]
comp_intensity = comp_intensity[s_size-1:loc+s_size]
return score, ref_intensity, comp_intensity
def parse_star(f):
"""
functions to parse star file adapted from Tristan Bepler
https://github.com/tbepler/topaz
https://www.nature.com/articles/s41592-019-0575-8
"""
return parse(f)
def parse(f):
lines = f.readlines()
for i in range(len(lines)):
line = lines[i]
if line.startswith('data_'):
return parse_star_body(lines[i+1:])
def parse_star_body(lines):
#data_images line has been read, next is loop
for i in range(len(lines)):
if lines[i].startswith('loop_'):
lines = lines[i+1:]
break
header,lines = parse_star_loop(lines)
#parse the body
content = []
for i in range(len(lines)):
line = lines[i].strip()
if line.startswith('data'): # done with image block
break
if line.startswith('#') or line.startswith(';'): # comment lines
continue
if line != '':
tokens = line.split()
content.append(tokens)
table = pd.DataFrame(content, columns=header)
return table
def parse_star_loop(lines):
columns = []
for i in range(len(lines)):
line = lines[i].strip()
if not line.startswith('_'):
break
name = line[1:]
#strip trailing comments from name
loc = name.find('#')
if loc >= 0:
name = name[:loc]
#strip 'rln' prefix
if name.startswith('rln'):
name = name[3:]
name = name.strip()
columns.append(name)
return columns, lines[i:]
app = SLICEM_GUI()
app.geometry('900x750')
app.mainloop() | 34.931483 | 112 | 0.553282 | 5,018 | 40,276 | 4.290753 | 0.127541 | 0.015605 | 0.008917 | 0.015605 | 0.343458 | 0.270169 | 0.210394 | 0.173517 | 0.143978 | 0.123496 | 0 | 0.02384 | 0.318875 | 40,276 | 1,153 | 113 | 34.931483 | 0.761018 | 0.064828 | 0 | 0.159601 | 0 | 0 | 0.038482 | 0.00122 | 0 | 0 | 0 | 0.000867 | 0 | 1 | 0.041147 | false | 0 | 0.023691 | 0.002494 | 0.089776 | 0.011222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cc537735a2d67e707923633b8c5359a8e975af1 | 650 | py | Python | src/openprocurement/tender/openeu/procedure/state/bid.py | ProzorroUKR/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 10 | 2020-02-18T01:56:21.000Z | 2022-03-28T00:32:57.000Z | src/openprocurement/tender/openeu/procedure/state/bid.py | quintagroup/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 26 | 2018-07-16T09:30:44.000Z | 2021-02-02T17:51:30.000Z | src/openprocurement/tender/openeu/procedure/state/bid.py | ProzorroUKR/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 15 | 2019-08-08T10:50:47.000Z | 2022-02-05T14:13:36.000Z | from openprocurement.api.utils import error_handler
from openprocurement.tender.core.procedure.state.bid import BidState as BaseBidState
class BidState(BaseBidState):
def status_up(self, before, after, data):
assert before != after, "Statuses must be different"
# this logic moved here from validate_update_bid_status validator
# if request.authenticated_role != "Administrator":
if after not in ("pending", "active"):
self.request.errors.add("body", "bid", "Can't update bid to ({}) status".format(after))
self.request.errors.status = 403
raise error_handler(self.request)
| 40.625 | 99 | 0.695385 | 80 | 650 | 5.5625 | 0.6625 | 0.074157 | 0.076404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005803 | 0.204615 | 650 | 15 | 100 | 43.333333 | 0.854932 | 0.173846 | 0 | 0 | 0 | 0 | 0.144465 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cc54c1983960e2959f4569eed5ba54d8ffadc15 | 4,903 | py | Python | api/serializers.py | apigram/jade-api | 1aece29c3109db68897fdf854be431554e7f2863 | [
"Apache-2.0"
] | null | null | null | api/serializers.py | apigram/jade-api | 1aece29c3109db68897fdf854be431554e7f2863 | [
"Apache-2.0"
] | null | null | null | api/serializers.py | apigram/jade-api | 1aece29c3109db68897fdf854be431554e7f2863 | [
"Apache-2.0"
] | null | null | null | from rest_framework_nested.serializers import NestedHyperlinkedModelSerializer
from rest_framework_nested.relations import NestedHyperlinkedRelatedField
from rest_framework import serializers
from api.models import User, Company, CompanyContact, Order, Item, Contact, OrderItem
import copy
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'contact', 'email')
class CompanyContactsSerializer(NestedHyperlinkedModelSerializer):
parent_lookup_kwargs = {
'company_pk': 'company__pk'
}
contact = serializers.HyperlinkedRelatedField(
view_name='contact-detail',
many=False,
read_only=True
)
class Meta:
model = CompanyContact
fields = ('contact',)
class CompanySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Company
fields = ('url', 'name', 'business_number', 'type', 'contacts')
contacts = CompanyContactsSerializer(many=True, read_only=False)
def create(self, validated_data):
client_data = copy.deepcopy(validated_data)
del client_data['contacts']
contact_list_data = validated_data['contacts']
company = Company(**client_data)
company.save()
for contact_data in contact_list_data:
contact = Contact(**contact_data)
contact.save()
companycontact = CompanyContact()
companycontact.company = company
companycontact.contact = contact
companycontact.save()
return company
class ClientSerializer(CompanySerializer):
url = serializers.HyperlinkedIdentityField(
view_name='client-detail',
lookup_field='pk'
)
class SupplierSerializer(CompanySerializer):
url = serializers.HyperlinkedIdentityField(
view_name='supplier-detail',
lookup_field='pk'
)
class OrderItemSerializers(NestedHyperlinkedModelSerializer):
parent_lookup_kwargs = {
'order_pk': 'order__pk'
}
order = serializers.HyperlinkedRelatedField(
view_name='order-detail',
many=False,
read_only=True
)
item = serializers.HyperlinkedRelatedField(
view_name='item-detail',
many=False,
read_only=True
)
class Meta:
model = OrderItem
fields = ('order', 'item', 'quantity', 'unit_price', 'comments')
class ItemOrderSerializers(NestedHyperlinkedModelSerializer):
parent_lookup_kwargs = {
'item_pk': 'item__pk'
}
order = serializers.HyperlinkedRelatedField(
view_name='order-detail',
many=False,
read_only=True
)
item = serializers.HyperlinkedRelatedField(
view_name='item-detail',
many=False,
read_only=True
)
class Meta:
model = OrderItem
fields = ('order', 'item', 'quantity', 'price', 'comments')
class OrderCompanySerializer(NestedHyperlinkedModelSerializer):
parent_lookup_kwargs = {
'order_pk': 'order__pk'
}
class Meta:
model = Company
fields = ('url', 'name', 'contacts', 'business_number', 'type')
contacts = CompanyContactsSerializer(many=True, read_only=True)
class OrderSerializer(serializers.HyperlinkedModelSerializer):
client = serializers.HyperlinkedRelatedField(
view_name="client-detail",
many=False,
read_only=False,
queryset=Company.objects.filter(type='CLIENT')
)
supplier = serializers.HyperlinkedRelatedField(
view_name="supplier-detail",
many=False,
read_only=False,
queryset=Company.objects.filter(type='SUPPLIER')
)
class Meta:
model = Order
fields = (
'url',
'items',
'client',
'supplier',
'received_date',
'scheduled_deliver_date',
'delivered_date',
'status',
'comments'
)
items = OrderItemSerializers(many=True, read_only=False)
def create(self, validated_data):
order_data = copy.deepcopy(validated_data)
del order_data['items']
item_list_data = validated_data['items']
order = Order(**order_data)
order.save()
for item_data in item_list_data:
order_item = OrderItem(**item_data)
order_item.order = order
order_item.save()
return order
class ItemSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Item
fields = ('url', 'orders', 'label', 'quantity', 'unit_price')
orders = OrderItemSerializers(many=True, read_only=True)
class ContactSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Contact
fields = ('first_name', 'last_name', 'role', 'phone', 'email', 'address')
| 27.544944 | 85 | 0.646543 | 442 | 4,903 | 6.9819 | 0.208145 | 0.028516 | 0.04083 | 0.095269 | 0.466948 | 0.376863 | 0.277706 | 0.255671 | 0.214841 | 0.201555 | 0 | 0 | 0.255354 | 4,903 | 177 | 86 | 27.700565 | 0.845248 | 0 | 0 | 0.33813 | 0 | 0 | 0.112176 | 0.004487 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014388 | false | 0 | 0.035971 | 0 | 0.330935 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cc5625c16ac62cef1a16f88ef8ee506d837539a | 1,703 | py | Python | kanga/management/commands/add-origins.py | deptofdefense/kanga | 9c8d926a4828e2fca528915ddf35759d1c328c85 | [
"MIT"
] | 1 | 2022-03-05T01:17:59.000Z | 2022-03-05T01:17:59.000Z | kanga/management/commands/add-origins.py | deptofdefense/kanga | 9c8d926a4828e2fca528915ddf35759d1c328c85 | [
"MIT"
] | null | null | null | kanga/management/commands/add-origins.py | deptofdefense/kanga | 9c8d926a4828e2fca528915ddf35759d1c328c85 | [
"MIT"
] | null | null | null | # =================================================================
#
# Work of the U.S. Department of Defense, Defense Digital Service.
# Released as open source under the MIT License. See LICENSE file.
#
# =================================================================
import json
import sys
from django.core.management.base import BaseCommand
from django.forms.models import model_to_dict
from kanga.models import Account, Origin
from kanga.encoder import KangaEncoder
from kanga.utils import clean_phone_number
class Command(BaseCommand):
help = 'Add origins'
def add_arguments(self, parser):
parser.add_argument(
'--account',
type=str,
default=None,
required=True,
help='Name')
parser.add_argument(
'--path',
type=str,
default=None,
required=True,
help='Path')
def handle(self, *args, **options):
#
account = options['account']
p = options['path']
#
a = Account.objects.get(id=account)
#
Origin.objects.all().delete()
#
with open(p, 'r') as file:
text = clean_phone_number(file.read())
for line in text.splitlines():
Origin.objects.create(
account=a,
phone_number="+1{}".format(line),
voice=True,
sms=True,
mms=True,
fax=True,
active=True)
#
origins = [o for o in Origin.objects.all()]
json.dump([model_to_dict(o) for o in origins], sys.stdout, cls=KangaEncoder)
| 28.864407 | 84 | 0.500881 | 174 | 1,703 | 4.833333 | 0.511494 | 0.032105 | 0.026159 | 0.042806 | 0.080856 | 0.080856 | 0.080856 | 0 | 0 | 0 | 0 | 0.000875 | 0.328831 | 1,703 | 58 | 85 | 29.362069 | 0.734908 | 0.153846 | 0 | 0.2 | 0 | 0 | 0.03499 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.175 | 0 | 0.275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2cc813a4b5a65446f5e51d5a31e50f0a7569413f | 692 | py | Python | examples/joost/grammarator.py | wilkeraziz/grasp | 95f5135fd3711eed32cddce2049dd595314fb1f4 | [
"Apache-2.0"
] | 9 | 2015-07-22T18:07:44.000Z | 2021-11-08T11:21:11.000Z | examples/joost/grammarator.py | wilkeraziz/grasp | 95f5135fd3711eed32cddce2049dd595314fb1f4 | [
"Apache-2.0"
] | null | null | null | examples/joost/grammarator.py | wilkeraziz/grasp | 95f5135fd3711eed32cddce2049dd595314fb1f4 | [
"Apache-2.0"
] | 1 | 2021-01-12T10:00:22.000Z | 2021-01-12T10:00:22.000Z | #!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('k', metavar='k', type=int, nargs='?',
help='number of classes of terminals (terminal-generating non-terminals)')
args = parser.parse_args()
# print("k = %d" % args.k)
print("""[A] ||| [S] [VT] [O] ||| 1.0
[S] ||| [S] [S] ||| 0.2
[S] ||| [S] 'rpi' [S] [VT] ||| 0.2
[S] ||| [ST] ||| 0.6
[O] ||| [O] [O] ||| 0.2
[O] ||| [S] 'rpi' [S] [VT] ||| 0.2
[O] ||| [OT] ||| 0.6""")
p = 1.0 / float(args.k)
for i in range(1, args.k + 1):
print("[ST] ||| 'si%d' ||| %f" % (i, p))
print("[OT] ||| 'oi%d' ||| %f" % (i, p))
print("[VT] ||| 'vi%d' ||| %f" % (i, p))
| 26.615385 | 76 | 0.492775 | 115 | 692 | 2.947826 | 0.452174 | 0.023599 | 0.026549 | 0.035398 | 0.106195 | 0.053097 | 0 | 0 | 0 | 0 | 0 | 0.033451 | 0.179191 | 692 | 25 | 77 | 27.68 | 0.56338 | 0.066474 | 0 | 0 | 0 | 0 | 0.535714 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.058824 | 0.235294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ccbbcd205dc73323b99918be9662a57635d2a3f | 5,924 | py | Python | app/sims/method/fields.py | DeepanshS/mrsimulator-ui | 42f1a85f1cb76896cde2a3c8d4e38fe2c989b710 | [
"BSD-3-Clause"
] | 2 | 2019-11-21T16:14:13.000Z | 2020-10-17T21:36:00.000Z | app/sims/method/fields.py | DeepanshS/mrsimulator-ui | 42f1a85f1cb76896cde2a3c8d4e38fe2c989b710 | [
"BSD-3-Clause"
] | 32 | 2021-07-07T20:16:29.000Z | 2022-03-29T14:09:23.000Z | app/sims/method/fields.py | DeepanshS/mrsimulator-ui | 42f1a85f1cb76896cde2a3c8d4e38fe2c989b710 | [
"BSD-3-Clause"
] | 2 | 2019-10-23T18:23:57.000Z | 2021-03-25T00:13:10.000Z | # -*- coding: utf-8 -*-
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input
from dash.dependencies import Output
from app import app
from app.custom_widgets import collapsable_card
from app.custom_widgets import container
from app.custom_widgets import custom_button
from app.custom_widgets import custom_input_group
# from dash.dependencies import State
__author__ = "Deepansh J. Srivastava"
__email__ = "srivastava.89@osu.edu"
def experiment_ui():
# upload experiment dataset
tooltip = (
"Click to attach a measurement file to the selected method. "
"Alternatively, drag and drop the file onto the Simulation area."
)
icon = html.I(className="fas fa-paperclip fa-lg", title=tooltip)
clip_btn = html.Button(icon, className="icon-button")
upload = dcc.Upload(clip_btn, id="import-measurement-for-method")
# label = dbc.InputGroupAddon("Measurement data", addon_type="prepend")
# upload_ui = dbc.InputGroup([label, upload], className="input-form")
# standard deviation
calc_tooltip = (
"Click to calculate the noise standard deviation from the selected region of ∂"
"the experiment spectrum."
)
calc_icon = html.I(className="fas fa-calculator", title=calc_tooltip)
calc_btn = html.Button(calc_icon, id="calc-sigma-button", className="icon-button")
sigma = custom_input_group(
prepend_label="Noise standard deviation (σ)", # Text overwraps the input field
append_label=calc_btn,
value=1.0,
min=1e-6,
id="measurement-sigma",
debounce=True,
)
return container(
text=["Experiment", upload],
featured=[sigma],
)
# app.clientside_callback(
# """
# function(index, data) {
# console.log(data);
# if (data == null) { throw window.dash_clientside.PreventUpdate; }
# if (data.methods[index] == null){throw window.dash_clientside.PreventUpdate;}
# if (data.methods[index].experiment == null) {
# return [false, false, false, false, false, false];
# }
# else { return [true, true, true, true, true, true]; }
# }
# """,
# [
# *[Output(f"count-{i}", "disabled") for i in range(2)],
# *[Output(f"spectral_width-{i}", "disabled") for i in range(2)],
# *[Output(f"reference_offset-{i}", "disabled") for i in range(2)],
# ],
# Input("select-method", "value"),
# Input("local-mrsim-data", "data"),
# prevent_initial_call=True,
# )
def spectral_dimension_ui(i):
"""Return a list of widgets whose entries are used in evaluating the dimension
coordinates along the i^th dimension. The widgets includes number of points (count),
spectral width, and reference offset.
Args:
i: An integer with the dimension index.
"""
# count
count = custom_input_group(
prepend_label="Number of points",
value=512,
min=2,
id=f"count-{i}",
debounce=True,
pattern="[0-9]*",
)
# spectral width
spectral_width = custom_input_group(
prepend_label="Spectral width",
append_label="kHz",
value=25.0,
min=1e-6,
id=f"spectral_width-{i}",
debounce=True,
)
# reference offset
reference_offset = custom_input_group(
prepend_label="Reference offset",
append_label="kHz",
value=0.0,
id=f"reference_offset-{i}",
debounce=True,
)
# origin offset
# origin_offset = custom_input_group(
# prepend_label="Origin offset",
# append_label="MHz",
# value=0.0,
# id=f"origin_offset-{i}",
# debounce=True,
# )
# origin offset
label = custom_input_group(
prepend_label="Label",
append_label="",
input_type="text",
value="frequency",
id=f"label-{i}",
debounce=True,
)
return collapsable_card(
text=f"Spectral Dimension - {i}",
id_=f"dim-{i}",
featured=[count, spectral_width, reference_offset],
hidden=[label],
message="Show/Hide",
outer=True,
)
def global_environment():
"""Generate a list of widgets whose entries are the sample global environment
parameter. The widgets includes flux density, rotor frequency, and rotor angle."""
flux_density = custom_input_group(
prepend_label="Magnetic flux density (B₀)",
append_label="T",
value=9.4,
id="magnetic_flux_density",
min=0.0,
debounce=True,
)
# rotor frequency
rotor_frequency = custom_input_group(
prepend_label="Rotor frequency (𝜈ᵣ)",
append_label="kHz",
value=0.0,
id="rotor_frequency",
min=0.0,
debounce=True,
)
# rotor angle
magic_angle = custom_button(
icon_classname="fas fa-magic",
tooltip="Set value to the magic angle.",
id="set-to-magic-angle",
className="icon-button",
module="html",
)
# dbc.Button(
# html.I(className="fas fa-magic"),
# color="light",
# id="set-to-magic-angle",
# size="sm",
# )
# datalist = html.Datalist([0, 54.7356103172, 90], id="datalist-magic-angle")
rotor_angle = custom_input_group(
prepend_label=html.Div(["Rotor angle (θᵣ)", magic_angle]),
append_label="deg",
value=54.7356103172,
id="rotor_angle",
max=90,
min=0,
debounce=True,
# list="datalist-magic-angle",
)
app.clientside_callback(
"""function(n) { return 54.7356103172; }""",
Output("rotor_angle", "value"),
Input("set-to-magic-angle", "n_clicks"),
)
return container(
text="Global Environment Parameters",
featured=[flux_density, rotor_frequency, rotor_angle],
)
| 29.182266 | 88 | 0.61293 | 706 | 5,924 | 4.997167 | 0.273371 | 0.031179 | 0.045351 | 0.058673 | 0.27466 | 0.165816 | 0.085317 | 0.049887 | 0.049887 | 0.034014 | 0 | 0.017348 | 0.260466 | 5,924 | 202 | 89 | 29.326733 | 0.78772 | 0.326469 | 0 | 0.159664 | 0 | 0 | 0.222941 | 0.018384 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02521 | false | 0 | 0.084034 | 0 | 0.134454 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ccee2fa8c8ba625dd902253db0db5b0a2526b08 | 563 | py | Python | class2/exercises/exercise2/exercise2c.py | EndlessDynamics/Fork_nornir_course | 04bf7e3819659f481a4e04059152877b795177b2 | [
"Apache-2.0"
] | 60 | 2019-05-11T05:23:10.000Z | 2022-03-30T08:03:43.000Z | class2/exercises/exercise2/exercise2c.py | EndlessDynamics/Fork_nornir_course | 04bf7e3819659f481a4e04059152877b795177b2 | [
"Apache-2.0"
] | 14 | 2021-03-31T19:08:08.000Z | 2021-09-15T17:29:40.000Z | class2/exercises/exercise2/exercise2c.py | EndlessDynamics/Fork_nornir_course | 04bf7e3819659f481a4e04059152877b795177b2 | [
"Apache-2.0"
] | 21 | 2019-08-08T21:30:46.000Z | 2022-03-28T06:22:25.000Z | from rich import print
from nornir import InitNornir
from nornir.core.filter import F
from nornir_netmiko import netmiko_send_command
def main():
nr = InitNornir(config_file="config.yaml")
filt = F(groups__contains="ios")
nr = nr.filter(filt)
my_results = nr.run(
task=netmiko_send_command, command_string="show run | i hostname"
)
host_results = my_results["cisco3"]
print()
print(type(host_results))
print(repr(host_results[0]))
print(host_results.__iter__)
print()
if __name__ == "__main__":
main()
| 22.52 | 73 | 0.694494 | 76 | 563 | 4.789474 | 0.5 | 0.120879 | 0.098901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004425 | 0.197158 | 563 | 24 | 74 | 23.458333 | 0.800885 | 0 | 0 | 0.105263 | 0 | 0 | 0.087034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.263158 | 0.315789 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2ba3189a471ae4c5fad168efa534a2e6749bec4 | 13,265 | py | Python | portality/api/current/data_objects/application.py | DOAJ/doaj | b11f163c48f51f9e3ada2b02c617b50b847dcb4c | [
"Apache-2.0"
] | 47 | 2015-04-24T13:13:39.000Z | 2022-03-06T03:22:42.000Z | portality/api/current/data_objects/application.py | DOAJ/doaj | b11f163c48f51f9e3ada2b02c617b50b847dcb4c | [
"Apache-2.0"
] | 1,215 | 2015-01-02T14:29:38.000Z | 2022-03-28T14:19:13.000Z | portality/api/current/data_objects/application.py | DOAJ/doaj | b11f163c48f51f9e3ada2b02c617b50b847dcb4c | [
"Apache-2.0"
] | 14 | 2015-11-27T13:01:23.000Z | 2021-05-21T07:57:23.000Z | import uuid
from datetime import datetime
from portality.api.current.data_objects.common import _check_for_script
from portality.lib import swagger, seamless, coerce, dates, dataobj
from portality import models
from copy import deepcopy
from portality.api.current.data_objects.common_journal_application import OutgoingCommonJournalApplication, _SHARED_STRUCT
# both incoming and outgoing applications share this struct
# "required" fields are only put on incoming applications
from portality.lib.coerce import COERCE_MAP
from portality.lib.seamless import SeamlessMixin
from portality.models import JournalLikeBibJSON
from portality.ui.messages import Messages
OUTGOING_APPLICATION_STRUCT = {
"fields": {
"id": {"coerce": "unicode"}, # Note that we'll leave these in for ease of use by the
"created_date": {"coerce": "utcdatetime"}, # caller, but we'll need to ignore them on the conversion
"last_updated": {"coerce": "utcdatetime"}, # to the real object
"last_manual_update": {"coerce": "utcdatetime"}
},
"objects": ["admin", "bibjson"],
"structs": {
"admin" : {
"fields" : {
"application_status" : {"coerce" : "unicode"},
"current_journal" : {"coerce" : "unicode"},
"date_applied" : {"coerce" : "unicode"},
"owner" : {"coerce" : "unicode"}
}
}
}
}
INTERNAL_APPLICATION_STRUCT = {
"fields": {
"id": {"coerce": "unicode"}, # Note that we'll leave these in for ease of use by the
"created_date": {"coerce": "utcdatetime"}, # caller, but we'll need to ignore them on the conversion
"last_updated": {"coerce": "utcdatetime"}, # to the real object
"last_manual_update": {"coerce": "utcdatetime"},
"es_type": {"coerce": "unicode"}
},
"objects": ["admin", "bibjson"],
"structs": {
"admin" : {
"fields" : {
"related_journal" : {"coerce" : "unicode"},
"editor_group" : {"coerce" : "unicode"},
"editor" : {"coerce" : "unicode"},
"owner" : {"coerce" : "unicode"},
"seal" : {"coerce" : "unicode"}
},
"lists": {
"notes" : {"contains" : "object"},
}
}
}
}
INCOMING_APPLICATION_REQUIREMENTS = {
"required" : ["admin", "bibjson"],
"structs": {
"bibjson": {
"required": [
"copyright",
"deposit_policy",
"editorial",
"eissn",
"keywords",
"language",
"license",
"ref",
"pid_scheme",
"pissn",
"plagiarism",
"preservation",
"publication_time_weeks",
"publisher",
"ref",
"oa_start",
"other_charges",
"waiver",
"title"
],
"structs": {
"copyright": {
"required" : ["url"]
},
"editorial": {
"required" : ["review_process", "review_url"]
},
"plagiarism": {
"required": ["detection","url"]
},
"publisher": {
"required": ["name"]
},
"ref": {
"required" : ["journal"]
}
}
}
}
}
class IncomingApplication(SeamlessMixin, swagger.SwaggerSupport):
"""
~~APIIncomingApplication:Model->Seamless:Library~~
"""
__type__ = "application"
__SEAMLESS_COERCE__ = COERCE_MAP
__SEAMLESS_STRUCT__ = [
OUTGOING_APPLICATION_STRUCT,
# FIXME: should this be here? It looks like it allows users to send administrative data to the system
# I have removed it as it was exposing incorrect data in the auto-generated documentation
# INTERNAL_APPLICATION_STRUCT,
_SHARED_STRUCT,
# FIXME: can we live without specifying required fields, since the form validation will handle this?
INCOMING_APPLICATION_REQUIREMENTS
]
def __init__(self, raw=None, **kwargs):
if raw is None:
super(IncomingApplication, self).__init__(silent_prune=False, check_required_on_init=False, **kwargs)
else:
super(IncomingApplication, self).__init__(raw=raw, silent_prune=False, **kwargs)
@property
def data(self):
return self.__seamless__.data
def custom_validate(self):
# only attempt to validate if this is not a blank object
if len(list(self.__seamless__.data.keys())) == 0:
return
if _check_for_script(self.data):
raise dataobj.ScriptTagFoundException(Messages.EXCEPTION_SCRIPT_TAG_FOUND)
# extract the p/e-issn identifier objects
pissn = self.data["bibjson"]["pissn"]
eissn = self.data["bibjson"]["eissn"]
# check that at least one of them appears and if they are different
if pissn is None and eissn is None or pissn == eissn:
raise seamless.SeamlessException("You must specify at least one of bibjson.pissn and bibjson.eissn, and they must be different")
# normalise the ids
if pissn is not None:
pissn = self._normalise_issn(pissn)
if eissn is not None:
eissn = self._normalise_issn(eissn)
# check they are not the same
if pissn is not None and eissn is not None:
if pissn == eissn:
raise seamless.SeamlessException("P-ISSN and E-ISSN should be different")
# A link to the journal homepage is required
#
if self.data["bibjson"]["ref"]["journal"] is None or self.data["bibjson"]["ref"]["journal"] == "":
raise seamless.SeamlessException("You must specify the journal homepage in bibjson.ref.journal")
# if plagiarism detection is done, then the url is a required field
if self.data["bibjson"]["plagiarism"]["detection"] is True:
url = self.data["bibjson"]["plagiarism"]["url"]
if url is None:
raise seamless.SeamlessException("In this context bibjson.plagiarism.url is required")
# if licence_display is "embed", then the url is a required field #TODO: what with "display"
art = self.data["bibjson"]["article"]
if "embed" in art["license_display"] or "display" in art["license_display"]:
if art["license_display_example_url"] is None or art["license_display_example_url"] == "":
raise seamless.SeamlessException("In this context bibjson.article.license_display_example_url is required")
# if the author does not hold the copyright the url is optional, otherwise it is required
if self.data["bibjson"]["copyright"]["author_retains"] is not False:
if self.data["bibjson"]["copyright"]["url"] is None or self.data["bibjson"]["copyright"]["url"] == "":
raise seamless.SeamlessException("In this context bibjson.copyright.url is required")
# check the number of keywords is no more than 6
if len(self.data["bibjson"]["keywords"]) > 6:
raise seamless.SeamlessException("bibjson.keywords may only contain a maximum of 6 keywords")
def _normalise_issn(self, issn):
issn = issn.upper()
if len(issn) > 8: return issn
if len(issn) == 8:
if "-" in issn: return "0" + issn
else: return issn[:4] + "-" + issn[4:]
if len(issn) < 8:
if "-" in issn: return ("0" * (9 - len(issn))) + issn
else:
issn = ("0" * (8 - len(issn))) + issn
return issn[:4] + "-" + issn[4:]
def to_application_model(self, existing=None):
nd = deepcopy(self.data)
if existing is None:
return models.Suggestion(**nd)
else:
nnd = seamless.SeamlessMixin.extend_struct(self._struct, nd)
return models.Suggestion(**nnd)
@property
def id(self):
return self.__seamless__.get_single("id")
def set_id(self, id=None):
if id is None:
id = self.makeid()
self.__seamless__.set_with_struct("id", id)
def set_created(self, date=None):
if date is None:
date = dates.now()
self.__seamless__.set_with_struct("created_date", date)
@property
def created_date(self):
return self.__seamless__.get_single("created_date")
@property
def created_timestamp(self):
return self.__seamless__.get_single("created_date", coerce=coerce.to_datestamp())
def set_last_updated(self, date=None):
if date is None:
date = dates.now()
self.__seamless__.set_with_struct("last_updated", date)
@property
def last_updated(self):
return self.__seamless__.get_single("last_updated")
@property
def last_updated_timestamp(self):
return self.__seamless__.get_single("last_updated", coerce=coerce.to_datestamp())
def set_last_manual_update(self, date=None):
if date is None:
date = dates.now()
self.__seamless__.set_with_struct("last_manual_update", date)
@property
def last_manual_update(self):
return self.__seamless__.get_single("last_manual_update")
@property
def last_manual_update_timestamp(self):
return self.__seamless__.get_single("last_manual_update", coerce=coerce.to_datestamp())
def has_been_manually_updated(self):
lmut = self.last_manual_update_timestamp
if lmut is None:
return False
return lmut > datetime.utcfromtimestamp(0)
def has_seal(self):
return self.__seamless__.get_single("admin.seal", default=False)
def set_seal(self, value):
self.__seamless__.set_with_struct("admin.seal", value)
@property
def owner(self):
return self.__seamless__.get_single("admin.owner")
def set_owner(self, owner):
self.__seamless__.set_with_struct("admin.owner", owner)
def remove_owner(self):
self.__seamless__.delete("admin.owner")
@property
def editor_group(self):
return self.__seamless__.get_single("admin.editor_group")
def set_editor_group(self, eg):
self.__seamless__.set_with_struct("admin.editor_group", eg)
def remove_editor_group(self):
self.__seamless__.delete("admin.editor_group")
@property
def editor(self):
return self.__seamless__.get_single("admin.editor")
def set_editor(self, ed):
self.__seamless__.set_with_struct("admin.editor", ed)
def remove_editor(self):
self.__seamless__.delete('admin.editor')
def add_note(self, note, date=None, id=None):
if date is None:
date = dates.now()
obj = {"date": date, "note": note, "id": id}
self.__seamless__.delete_from_list("admin.notes", matchsub=obj)
if id is None:
obj["id"] = uuid.uuid4()
self.__seamless__.add_to_list_with_struct("admin.notes", obj)
def remove_note(self, note):
self.__seamless__.delete_from_list("admin.notes", matchsub=note)
def set_notes(self, notes):
self.__seamless__.set_with_struct("admin.notes", notes)
def remove_notes(self):
self.__seamless__.delete("admin.notes")
@property
def notes(self):
return self.__seamless__.get_list("admin.notes")
@property
def ordered_notes(self):
notes = self.notes
clusters = {}
for note in notes:
if note["date"] not in clusters:
clusters[note["date"]] = [note]
else:
clusters[note["date"]].append(note)
ordered_keys = sorted(list(clusters.keys()), reverse=True)
ordered = []
for key in ordered_keys:
clusters[key].reverse()
ordered += clusters[key]
return ordered
def bibjson(self):
bj = self.__seamless__.get_single("bibjson")
if bj is None:
self.__seamless__.set_single("bibjson", {})
bj = self.__seamless__.get_single("bibjson")
return JournalLikeBibJSON(bj)
def set_bibjson(self, bibjson):
bibjson = bibjson.data if isinstance(bibjson, JournalLikeBibJSON) else bibjson
self.__seamless__.set_with_struct("bibjson", bibjson)
class OutgoingApplication(OutgoingCommonJournalApplication):
"""
~~APIOutgoingApplication:Model->APIOutgoingCommonJournalApplication:Model~~
~~->Seamless:Library~~
"""
__SEAMLESS_COERCE__ = COERCE_MAP
__SEAMLESS_STRUCT__ = [
OUTGOING_APPLICATION_STRUCT,
_SHARED_STRUCT
]
def __init__(self, raw=None, **kwargs):
super(OutgoingApplication, self).__init__(raw, silent_prune=True, **kwargs)
@classmethod
def from_model(cls, application):
assert isinstance(application, models.Suggestion)
return super(OutgoingApplication, cls).from_model(application)
@property
def data(self):
return self.__seamless__.data
| 35.467914 | 140 | 0.603166 | 1,462 | 13,265 | 5.214774 | 0.192886 | 0.055089 | 0.025708 | 0.040399 | 0.390346 | 0.320698 | 0.247639 | 0.195042 | 0.119753 | 0.084601 | 0 | 0.001893 | 0.283302 | 13,265 | 373 | 141 | 35.563003 | 0.800042 | 0.104109 | 0 | 0.216028 | 0 | 0 | 0.170979 | 0.013699 | 0 | 0 | 0 | 0.002681 | 0.003484 | 1 | 0.135889 | false | 0 | 0.038328 | 0.04878 | 0.278746 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e2bd7c8a213c65827af43039aa4e15d0d27034d2 | 4,086 | py | Python | app.py | joumanarahime/sqlalchemy-challenge | e86c3a05443a6d2664b2f9ab8145565c7fbce24b | [
"ADSL"
] | null | null | null | app.py | joumanarahime/sqlalchemy-challenge | e86c3a05443a6d2664b2f9ab8145565c7fbce24b | [
"ADSL"
] | null | null | null | app.py | joumanarahime/sqlalchemy-challenge | e86c3a05443a6d2664b2f9ab8145565c7fbce24b | [
"ADSL"
] | null | null | null | import numpy as np
import pandas as dp
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#engine = create_engine("sqlite:///Resources/hawaii.sqlite")
engine = create_engine("sqlite:////Users/joumanarahime/Documents/Vanderbilt BootCamp/sqlalchemy-challenge/Resources/hawaii.sqlite")
Base=automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
session=Session(engine)
max_date = session.execute('select MAX(date) from measurement').fetchall()
max_date = max_date[0][0]
# Calculate the date 1 year ago from the last data point in the database
date_stamp = dt.datetime.strptime(max_date,'%Y-%m-%d')
year = date_stamp.year
month = date_stamp.month
day = date_stamp.day
prior_year = f'{year-1}-{month:02d}-{day:02d}'
#Create the app
app = Flask(__name__)
# index route
@app.route("/")
def home():
"""List of all available api routes."""
return (
f"Available Routes:<br/>"
f"------------------------<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/2016-01-30<br/>"
f"/api/v1.0/2016-01-30/2017-01-30"
)
# 4. /api/v1.0/precipitation
@app.route("/api/v1.0/precipitation")
def prec():
session=Session(engine)
results = session.query(Measurement.prcp, Measurement.date).filter(Measurement.date > prior_year).all()
session.close()
prec_data=[]
for result in results:
prec_dict = {result.date: result.prcp }
prec_data.append(prec_dict)
return jsonify(prec_data)
@app.route("/api/v1.0/stations")
def stations():
session=Session(engine)
results= session.execute('select station, count(*) as count from measurement group by station order by count(station) desc ').fetchall()
station_data=[]
for result in results:
station_dict = {result.station: result.count}
station_data.append(station_dict)
return jsonify(station_data)
@app.route("/api/v1.0/tobs")
def tobs():
session=Session(engine)
cal_temp = session.execute(f"select date, min(tobs), avg(tobs), max(tobs) from measurement where date> '{prior_year}'").fetchall()
temp_dict= {
"Date": cal_temp[0][0],
"Low Temp": cal_temp[0][1],
"Avg Temp": cal_temp[0][2],
"Highest Temp": cal_temp[0][3]
}
return jsonify(temp_dict)
@app.route("/api/v1.0/<start>")
def start_date(start):
session=Session(engine)
sel= [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results= (session.query(*sel)
.filter(func.strftime("%Y-%m-%d", Measurement.date)>=start)
.all())
dates=[]
for result in results:
start_dict={
"Date": start,
"Low Temp": result[0],
"Avg Temp": result[1],
"Highest Temp": result[2]
}
dates.append(start_dict)
return jsonify(dates)
@app.route("/api/v1.0/<start>/<end>")
def start_end_date(start, end):
session=Session(engine)
sel= [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
results= (session.query(*sel)
.filter(func.strftime("%Y-%m-%d", Measurement.date)>=start)
.filter(func.strftime("%Y-%m-%d", Measurement.date)<=end)
.all())
dates=[]
for result in results:
startEnd_dict={}
startEnd_dict={
"Start Date": start,
"End Date": end,
"Low Temp": result[0],
"Avg Temp": result[1],
"Highest Temp": result[2]
}
dates.append(startEnd_dict)
return jsonify(dates)
if __name__ == "__main__":
app.run(debug=True) | 27.986301 | 140 | 0.610622 | 526 | 4,086 | 4.642586 | 0.226236 | 0.022523 | 0.027027 | 0.01638 | 0.314087 | 0.251433 | 0.199836 | 0.199836 | 0.171171 | 0.171171 | 0 | 0.022172 | 0.238375 | 4,086 | 146 | 141 | 27.986301 | 0.762532 | 0.053108 | 0 | 0.271845 | 0 | 0.009709 | 0.201036 | 0.081865 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058252 | false | 0 | 0.07767 | 0 | 0.194175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |